language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
Netflix__metaflow
metaflow/_vendor/click/_winconsole.py
{ "start": 4260, "end": 5226 }
class ____(_WindowsConsoleRawIOBase): def writable(self): return True @staticmethod def _get_error_message(errno): if errno == ERROR_SUCCESS: return "ERROR_SUCCESS" elif errno == ERROR_NOT_ENOUGH_MEMORY: return "ERROR_NOT_ENOUGH_MEMORY" return "Windows error {}".format(errno) def write(self, b): bytes_to_be_written = len(b) buf = get_buffer(b) code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 code_units_written = c_ulong() WriteConsoleW( HANDLE(self.handle), buf, code_units_to_be_written, byref(code_units_written), None, ) bytes_written = 2 * code_units_written.value if bytes_written == 0 and bytes_to_be_written > 0: raise OSError(self._get_error_message(GetLastError())) return bytes_written
_WindowsConsoleWriter
python
scipy__scipy
scipy/optimize/_shgo_lib/_vertex.py
{ "start": 5006, "end": 5715 }
class ____: """Base class for a vertex cache for a simplicial complex.""" def __init__(self): self.cache = collections.OrderedDict() self.nfev = 0 # Feasible points self.index = -1 def __iter__(self): for v in self.cache: yield self.cache[v] return def size(self): """Returns the size of the vertex cache.""" return self.index + 1 def print_out(self): headlen = len(f"Vertex cache of size: {len(self.cache)}:") print('=' * headlen) print(f"Vertex cache of size: {len(self.cache)}:") print('=' * headlen) for v in self.cache: self.cache[v].print_out()
VertexCacheBase
python
pdm-project__pdm
src/pdm/models/setup.py
{ "start": 356, "end": 1171 }
class ____: """ Abstraction of a Python project setup file. """ name: str | None = None version: str | None = None install_requires: list[str] = field(default_factory=list) extras_require: dict[str, list[str]] = field(default_factory=dict) python_requires: str | None = None summary: str | None = None def update(self, other: Setup) -> None: for f in fields(self): other_field = getattr(other, f.name) if other_field: setattr(self, f.name, other_field) def as_dict(self) -> dict[str, Any]: return asdict(self) @classmethod def from_directory(cls, dir: Path) -> Setup: return _SetupReader.read_from_directory(dir) def as_dist(self) -> Distribution: return SetupDistribution(self)
Setup
python
pyqtgraph__pyqtgraph
pyqtgraph/flowchart/library/Data.py
{ "start": 14101, "end": 14970 }
class ____(CtrlNode): """Select a slice from an array axis. """ nodeName = 'Slice' uiTemplate = [ ('axis', 'intSpin', {'value': 0, 'min': 0, 'max': 1000000}), ('start', 'intSpin', {'value': 0, 'min': -1000000, 'max': 1000000}), ('stop', 'intSpin', {'value': -1, 'min': -1000000, 'max': 1000000}), ('step', 'intSpin', {'value': 1, 'min': -1000000, 'max': 1000000}), ] def processData(self, data): s = self.stateGroup.state() ax = s['axis'] start = s['start'] stop = s['stop'] step = s['step'] if ax == 0: # allow support for non-ndarray sequence types return data[start:stop:step] else: sl = [slice(None) for i in range(data.ndim)] sl[ax] = slice(start, stop, step) return data[sl]
Slice
python
PrefectHQ__prefect
tests/test_task_engine.py
{ "start": 56276, "end": 58967 }
class ____: @pytest.fixture async def flow_run_context(self, prefect_client: PrefectClient): @flow def f(): pass test_task_runner = ThreadPoolTaskRunner() flow_run = await prefect_client.create_flow_run(f) await propose_state(prefect_client, Running(), flow_run_id=flow_run.id) flow_run = await prefect_client.read_flow_run(flow_run.id) assert flow_run.run_count == 1 result_store = await ResultStore().update_for_flow(f) return EngineContext( flow=f, flow_run=flow_run, client=prefect_client, task_runner=test_task_runner, result_store=result_store, parameters={"x": "y"}, ) def test_sync_task_run_counts( self, flow_run_context: EngineContext, sync_prefect_client, events_pipeline ): ID = None proof_that_i_ran = uuid4() @task def foo(): task_run = TaskRunContext.get().task_run nonlocal ID ID = task_run.id assert task_run assert task_run.state assert task_run.state.type == StateType.RUNNING assert task_run.run_count == 1 assert task_run.flow_run_run_count == flow_run_context.flow_run.run_count return proof_that_i_ran with flow_run_context: assert run_task_sync(foo) == proof_that_i_ran events_pipeline.process_events(_sync=True) task_run = sync_prefect_client.read_task_run(ID) assert task_run assert task_run.run_count == 1 assert task_run.flow_run_run_count == flow_run_context.flow_run.run_count async def test_async_task_run_counts( self, flow_run_context: EngineContext, prefect_client, events_pipeline ): ID = None proof_that_i_ran = uuid4() @task async def foo(): task_run = TaskRunContext.get().task_run nonlocal ID ID = task_run.id assert task_run assert task_run.state assert task_run.state.type == StateType.RUNNING assert task_run.run_count == 1 assert task_run.flow_run_run_count == flow_run_context.flow_run.run_count return proof_that_i_ran with flow_run_context: assert await run_task_async(foo) == proof_that_i_ran await events_pipeline.process_events() task_run = await prefect_client.read_task_run(ID) assert task_run assert task_run.run_count == 1 assert task_run.flow_run_run_count == flow_run_context.flow_run.run_count
TestRunCountTracking
python
ray-project__ray
python/ray/data/_internal/datasource/parquet_datasink.py
{ "start": 4004, "end": 11563 }
class ____(_FileDatasink): def __init__( self, path: str, *, partition_cols: Optional[List[str]] = None, arrow_parquet_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, arrow_parquet_args: Optional[Dict[str, Any]] = None, min_rows_per_file: Optional[int] = None, max_rows_per_file: Optional[int] = None, filesystem: Optional["pyarrow.fs.FileSystem"] = None, try_create_dir: bool = True, open_stream_args: Optional[Dict[str, Any]] = None, filename_provider: Optional[FilenameProvider] = None, dataset_uuid: Optional[str] = None, mode: SaveMode = SaveMode.APPEND, ): if arrow_parquet_args_fn is None: arrow_parquet_args_fn = lambda: {} # noqa: E731 if arrow_parquet_args is None: arrow_parquet_args = {} self.arrow_parquet_args_fn = arrow_parquet_args_fn self.arrow_parquet_args = arrow_parquet_args self.min_rows_per_file = min_rows_per_file self.max_rows_per_file = max_rows_per_file self.partition_cols = partition_cols if self.min_rows_per_file is not None and self.max_rows_per_file is not None: assert ( self.min_rows_per_file <= self.max_rows_per_file ), "min_rows_per_file must be less than or equal to max_rows_per_file" if open_stream_args is not None: intersecting_keys = UNSUPPORTED_OPEN_STREAM_ARGS.intersection( set(open_stream_args.keys()) ) if intersecting_keys: logger.warning( "open_stream_args contains unsupported arguments: %s. These arguments " "are not supported by ParquetDatasink. They will be ignored.", intersecting_keys, ) if "compression" in open_stream_args: self.arrow_parquet_args["compression"] = open_stream_args["compression"] super().__init__( path, filesystem=filesystem, try_create_dir=try_create_dir, open_stream_args=open_stream_args, filename_provider=filename_provider, dataset_uuid=dataset_uuid, file_format=FILE_FORMAT, mode=mode, ) def write( self, blocks: Iterable[Block], ctx: TaskContext, ) -> None: import pyarrow as pa blocks = list(blocks) if all(BlockAccessor.for_block(block).num_rows() == 0 for block in blocks): return blocks = [ block for block in blocks if BlockAccessor.for_block(block).num_rows() > 0 ] filename = self.filename_provider.get_filename_for_block( blocks[0], ctx.kwargs[WRITE_UUID_KWARG_NAME], ctx.task_idx, 0 ) write_kwargs = _resolve_kwargs( self.arrow_parquet_args_fn, **self.arrow_parquet_args ) user_schema = write_kwargs.pop("schema", None) def write_blocks_to_path(): tables = [BlockAccessor.for_block(block).to_arrow() for block in blocks] if user_schema is None: output_schema = pa.unify_schemas([table.schema for table in tables]) else: output_schema = user_schema self._write_parquet_files( tables, filename, output_schema, ctx.kwargs[WRITE_UUID_KWARG_NAME], write_kwargs, ) logger.debug(f"Writing {filename} file to {self.path}.") call_with_retry( write_blocks_to_path, description=f"write '{filename}' to '{self.path}'", match=self._data_context.retried_io_errors, max_attempts=WRITE_FILE_MAX_ATTEMPTS, max_backoff_s=WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS, ) def _get_basename_template(self, filename: str, write_uuid: str) -> str: # Check if write_uuid is present in filename, add if missing if write_uuid not in filename and self.mode == SaveMode.APPEND: raise ValueError( f"Write UUID '{write_uuid}' missing from filename template '{filename}'. This could result in files being overwritten." f"Modify your FileNameProvider implementation to include the `write_uuid` into the filename template or change your write mode to SaveMode.OVERWRITE. " ) # Check if filename is already templatized if "{i}" in filename: # Filename is already templatized, but may need file extension if FILE_FORMAT not in filename: # Add file extension to templatized filename basename_template = f"{filename}.{FILE_FORMAT}" else: # Already has extension, use as-is basename_template = filename elif FILE_FORMAT not in filename: # No extension and not templatized, add extension and template basename_template = f"{filename}-{{i}}.{FILE_FORMAT}" else: # TODO(@goutamvenkat-anyscale): Add a warning if you pass in a custom # filename provider and it isn't templatized. # Use pathlib.Path to properly handle filenames with dots filename_path = Path(filename) stem = filename_path.stem # filename without extension assert "." not in stem, "Filename should not contain a dot" suffix = filename_path.suffix # extension including the dot basename_template = f"{stem}-{{i}}{suffix}" return basename_template def _write_parquet_files( self, tables: List["pyarrow.Table"], filename: str, output_schema: "pyarrow.Schema", write_uuid: str, write_kwargs: Dict[str, Any], ) -> None: import pyarrow.dataset as ds # Make every incoming batch conform to the final schema *before* writing for idx, table in enumerate(tables): if output_schema and not table.schema.equals(output_schema): table = table.cast(output_schema) tables[idx] = table row_group_size = write_kwargs.pop("row_group_size", None) existing_data_behavior = EXISTING_DATA_BEHAVIOR_MAP.get( self.mode, "overwrite_or_ignore" ) ( min_rows_per_group, max_rows_per_group, max_rows_per_file, ) = choose_row_group_limits( row_group_size, min_rows_per_file=self.min_rows_per_file, max_rows_per_file=self.max_rows_per_file, ) basename_template = self._get_basename_template(filename, write_uuid) ds.write_dataset( data=tables, base_dir=self.path, schema=output_schema, basename_template=basename_template, filesystem=self.filesystem, partitioning=self.partition_cols, format=FILE_FORMAT, existing_data_behavior=existing_data_behavior, partitioning_flavor="hive", use_threads=True, min_rows_per_group=min_rows_per_group, max_rows_per_group=max_rows_per_group, max_rows_per_file=max_rows_per_file, file_options=ds.ParquetFileFormat().make_write_options(**write_kwargs), ) @property def min_rows_per_write(self) -> Optional[int]: return self.min_rows_per_file
ParquetDatasink
python
getsentry__sentry
src/sentry/hybridcloud/rpc/caching/service.py
{ "start": 12470, "end": 12911 }
class ____(RpcService): key = "control_caching" local_mode = SiloMode.CONTROL @classmethod def get_local_implementation(cls) -> RpcService: from .impl import LocalControlCachingService return LocalControlCachingService() @rpc_method @abc.abstractmethod def clear_key(self, *, key: str) -> int: pass control_caching_service = ControlCachingService.create_delegation()
ControlCachingService
python
python-openxml__python-docx
src/docx/image/exceptions.py
{ "start": 55, "end": 162 }
class ____(Exception): """The recognized image stream appears to be corrupted."""
InvalidImageStreamError
python
great-expectations__great_expectations
great_expectations/expectations/regex_based_column_map_expectation.py
{ "start": 1577, "end": 3719 }
class ____(ColumnMapMetricProvider): """Base class for all RegexColumnMapMetrics. RegexColumnMapMetric classes inheriting from RegexColumnMapMetricProvider are ephemeral, defined by their `regex` attribute, and registered during the execution of their associated RegexColumnMapExpectation. Metric Registration Example: ```python map_metric = RegexBasedColumnMapExpectation.register_metric( regex_camel_name='Vowel', regex_='^[aeiouyAEIOUY]*$', ) ``` In some cases, subclasses of MetricProvider, such as RegexColumnMapMetricProvider, will already have correct values that may simply be inherited by Metric classes. Args: regex (str): A valid regex pattern. metric_name (str): The name of the registered metric. Must be globally unique in a great_expectations installation. Constructed by the `register_metric(...)` function during Expectation execution. domain_keys (tuple): A tuple of the keys used to determine the domain of the metric. condition_value_keys (tuple): A tuple of the keys used to determine the value of the metric. ---Documentation--- - https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations """ # noqa: E501 # FIXME CoP condition_value_keys = () @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.astype(str).str.contains(cls.regex) @column_condition_partial(engine=SqlAlchemyExecutionEngine) def _sqlalchemy(cls, column, _dialect, **kwargs): regex_expression = get_dialect_regex_expression(column, cls.regex, _dialect) if regex_expression is None: msg = f"Regex is not supported for dialect {_dialect.dialect.name!s}" logger.warning(msg) raise NotImplementedError(msg) return regex_expression @column_condition_partial(engine=SparkDFExecutionEngine) def _spark(cls, column, **kwargs): return column.rlike(cls.regex)
RegexColumnMapMetricProvider
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 539273, "end": 539774 }
class ____(sgqlc.types.Type): """Autogenerated return type of CreateMigrationSource""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "migration_source") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" migration_source = sgqlc.types.Field("MigrationSource", graphql_name="migrationSource") """The created migration source."""
CreateMigrationSourcePayload
python
getsentry__sentry
src/sentry/core/endpoints/project_environments.py
{ "start": 965, "end": 3378 }
class ____(ProjectEndpoint): publish_status = { "GET": ApiPublishStatus.PUBLIC, } @extend_schema( operation_id="List a Project's Environments", parameters=[ GlobalParams.ORG_ID_OR_SLUG, GlobalParams.PROJECT_ID_OR_SLUG, EnvironmentParams.VISIBILITY, ], responses={ 200: inline_sentry_response_serializer( "ListProjectEnvironments", list[EnvironmentProjectSerializerResponse] ), 400: OpenApiResponse(description="Invalid value for 'visibility'."), 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, examples=EnvironmentExamples.GET_PROJECT_ENVIRONMENTS, ) def get(self, request: Request, project) -> Response: """ Lists a project's environments. """ queryset = ( EnvironmentProject.objects.filter( project=project, # Including the organization_id is necessary for postgres to use indexes # efficiently. environment__organization_id=project.organization_id, ) .exclude( # HACK(mattrobenolt): We don't want to surface the # "No Environment" environment to the UI since it # doesn't really exist. This might very likely change # with new tagstore backend in the future, but until # then, we're hiding it since it causes more problems # than it's worth. environment__name="" ) .select_related("environment") .order_by("environment__name") ) visibility = request.GET.get("visibility", "visible") if visibility not in environment_visibility_filter_options: return Response( { "detail": "Invalid value for 'visibility', valid values are: {!r}".format( sorted(environment_visibility_filter_options.keys()) ) }, status=400, ) add_visibility_filters = environment_visibility_filter_options[visibility] queryset = add_visibility_filters(queryset) return Response(serialize(list(queryset), request.user))
ProjectEnvironmentsEndpoint
python
getsentry__sentry
src/sentry/codecov/endpoints/sync_repos/serializers.py
{ "start": 113, "end": 1358 }
class ____(serializers.Serializer): """ Serializer for a sync repository response """ isSyncing = serializers.BooleanField() def to_representation(self, graphql_response): """ Transform the GraphQL response to the serialized format """ try: http_method = self.context.get("http_method") or "UNKNOWN" if http_method == "POST": data = graphql_response["data"]["syncRepos"] else: data = graphql_response["data"]["me"] response_data = { "isSyncing": data["isSyncing"], } return super().to_representation(response_data) except (KeyError, TypeError) as e: sentry_sdk.capture_exception(e) logger.exception( "Error parsing GraphQL response", extra={ "error": str(e), "endpoint": "sync-repos", "response_keys": ( list(graphql_response.keys()) if isinstance(graphql_response, dict) else None ), }, ) raise
SyncReposSerializer
python
django__django
tests/sitemaps_tests/urls/https.py
{ "start": 106, "end": 482 }
class ____(SimpleSitemap): protocol = "https" secure_sitemaps = { "simple": HTTPSSitemap, } urlpatterns = [ path("secure/index.xml", views.index, {"sitemaps": secure_sitemaps}), path( "secure/sitemap-<section>.xml", views.sitemap, {"sitemaps": secure_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), ]
HTTPSSitemap
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/call17.py
{ "start": 342, "end": 420 }
class ____(Generic[T_co]): def or_else(self, op: object) -> Ok[T_co]: ...
Ok
python
getsentry__sentry
src/sentry/notifications/types.py
{ "start": 8362, "end": 8490 }
class ____: is_disabled: bool is_active: bool has_only_inactive_subscriptions: bool @dataclass
GroupSubscriptionStatus
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py
{ "start": 49212, "end": 58948 }
class ____(SageMakerBaseOperator): """ Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SageMakerTrainingOperator` :param config: The configuration necessary to start a training job (templated). For details of the configuration parameter see :py:meth:`SageMaker.Client.create_training_job` :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param wait_for_completion: If wait is set to True, the time interval, in seconds, that the operation waits to check the status of the training job. :param print_log: if the operator should print the cloudwatch log during training :param check_interval: if wait is set to be true, this is the time interval in seconds which the operator will check the status of the training job :param max_attempts: Number of times to poll for query state before returning the current state, defaults to None. :param max_ingestion_time: If wait is set to True, the operation fails if the training job doesn't finish within max_ingestion_time seconds. If you set this parameter to None, the operation does not timeout. :param check_if_job_exists: If set to true, then the operator will check whether a training job already exists for the name in the config. :param action_if_job_exists: Behaviour if the job name already exists. Possible options are "timestamp" (default) and "fail". This is only relevant if check_if_job_exists is True. :param deferrable: Run operator in the deferrable mode. This is only effective if wait_for_completion is set to True. :return Dict: Returns The ARN of the training job created in Amazon SageMaker. """ def __init__( self, *, config: dict, wait_for_completion: bool = True, print_log: bool = True, check_interval: int = CHECK_INTERVAL_SECOND, max_attempts: int | None = None, max_ingestion_time: int | None = None, check_if_job_exists: bool = True, action_if_job_exists: str = "timestamp", deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(config=config, **kwargs) self.wait_for_completion = wait_for_completion self.print_log = print_log self.check_interval = check_interval self.max_attempts = max_attempts or 60 self.max_ingestion_time = max_ingestion_time self.check_if_job_exists = check_if_job_exists if action_if_job_exists in {"timestamp", "fail"}: self.action_if_job_exists = action_if_job_exists else: raise AirflowException( f"Argument action_if_job_exists accepts only 'timestamp' and 'fail'. \ Provided value: '{action_if_job_exists}'." ) self.deferrable = deferrable self.serialized_training_data: dict def expand_role(self) -> None: """Expand an IAM role name into an ARN.""" if "RoleArn" in self.config: hook = AwsBaseHook(self.aws_conn_id, client_type="iam") self.config["RoleArn"] = hook.expand_role(self.config["RoleArn"]) def _create_integer_fields(self) -> None: """Set fields which should be cast to integers.""" self.integer_fields: list[list[str]] = [ ["ResourceConfig", "InstanceCount"], ["ResourceConfig", "VolumeSizeInGB"], ["StoppingCondition", "MaxRuntimeInSeconds"], ] def execute(self, context: Context) -> dict: self.preprocess_config() if self.check_if_job_exists: self.config["TrainingJobName"] = self._get_unique_job_name( self.config["TrainingJobName"], self.action_if_job_exists == "fail", self.hook.describe_training_job, ) self.log.info("Creating SageMaker training job %s.", self.config["TrainingJobName"]) if self.deferrable and not self.wait_for_completion: self.log.warning( "Setting deferrable to True does not have effect when wait_for_completion is set to False." ) wait_for_completion = self.wait_for_completion if self.deferrable and self.wait_for_completion: # Set wait_for_completion to False so that it waits for the status in the deferred task. wait_for_completion = False response = self.hook.create_training_job( self.config, wait_for_completion=wait_for_completion, print_log=self.print_log, check_interval=self.check_interval, max_ingestion_time=self.max_ingestion_time, ) if response["ResponseMetadata"]["HTTPStatusCode"] != 200: raise AirflowException(f"Sagemaker Training Job creation failed: {response}") if self.deferrable and self.wait_for_completion: description = self.hook.describe_training_job(self.config["TrainingJobName"]) status = description["TrainingJobStatus"] if self.print_log: instance_count = description["ResourceConfig"]["InstanceCount"] last_describe_job_call = time.monotonic() job_already_completed = status not in self.hook.non_terminal_states _, description, last_describe_job_call = self.hook.describe_training_job_with_log( self.config["TrainingJobName"], {}, [], instance_count, LogState.COMPLETE if job_already_completed else LogState.TAILING, description, last_describe_job_call, ) self.log.info(secondary_training_status_message(description, None)) if status in self.hook.failed_states: reason = description.get("FailureReason", "(No reason provided)") raise AirflowException(f"SageMaker job failed because {reason}") if status == "Completed": log_message = f"{self.task_id} completed successfully." if self.print_log: billable_seconds = SageMakerHook.count_billable_seconds( training_start_time=description["TrainingStartTime"], training_end_time=description["TrainingEndTime"], instance_count=instance_count, ) log_message = f"Billable seconds: {billable_seconds}\n{log_message}" self.log.info(log_message) return {"Training": serialize(description)} timeout = self.execution_timeout if self.max_ingestion_time: timeout = datetime.timedelta(seconds=self.max_ingestion_time) self.defer( timeout=timeout, trigger=SageMakerTrigger( job_name=self.config["TrainingJobName"], job_type="Training", poke_interval=self.check_interval, max_attempts=self.max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", ) return self.serialize_result(self.config["TrainingJobName"]) def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, dict]: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException(f"Error while running job: {validated_event}") self.log.info(validated_event["message"]) return self.serialize_result(validated_event["job_name"]) def serialize_result(self, job_name: str) -> dict[str, dict]: self.serialized_training_data = serialize(self.hook.describe_training_job(job_name)) return {"Training": self.serialized_training_data} def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage: """Return OpenLineage data gathered from SageMaker's API response saved by training job.""" from airflow.providers.openlineage.extractors import OperatorLineage inputs = [] outputs = [] try: for input_data in self.serialized_training_data["InputDataConfig"]: inputs.append(self.path_to_s3_dataset(input_data["DataSource"]["S3DataSource"]["S3Uri"])) except KeyError: self.log.exception("Issues extracting inputs.") try: outputs.append( self.path_to_s3_dataset(self.serialized_training_data["ModelArtifacts"]["S3ModelArtifacts"]) ) except KeyError: self.log.exception("Issues extracting inputs.") return OperatorLineage(inputs=inputs, outputs=outputs)
SageMakerTrainingOperator
python
fluentpython__example-code-2e
13-protocol-abc/lotto.py
{ "start": 70, "end": 634 }
class ____(Tombola): def __init__(self, iterable): self._balls = list(iterable) # <1> def load(self, iterable): self._balls.extend(iterable) def pick(self): try: position = random.randrange(len(self._balls)) # <2> except ValueError: raise LookupError('pick from empty LottoBlower') return self._balls.pop(position) # <3> def loaded(self): # <4> return bool(self._balls) def inspect(self): # <5> return tuple(self._balls) # end::LOTTERY_BLOWER[]
LottoBlower
python
pytorch__pytorch
test/distributed/fsdp/test_fsdp_traversal.py
{ "start": 817, "end": 2131 }
class ____(FSDPTest): @property def world_size(self): if torch.torch.accelerator.is_available(): gpu_cnt = torch.accelerator.device_count() if gpu_cnt < 2: return gpu_cnt return 2 @skip_if_lt_x_gpu(2) def test_fsdp_modules(self): nested_wrapped_module = NestedWrappedModule.init( self.process_group, FSDPInitMode.RECURSIVE, DEVICEInitMode.DEVICE_BEFORE, ) modules = FSDP.fsdp_modules(nested_wrapped_module) self.assertEqual( modules, [ nested_wrapped_module.module.get_submodule("1"), nested_wrapped_module.module.get_submodule("1").get_submodule("0"), nested_wrapped_module.module.get_submodule("2"), ], ) modules = FSDP.fsdp_modules(nested_wrapped_module, root_only=True) self.assertEqual( modules, [ nested_wrapped_module.module.get_submodule("1"), nested_wrapped_module.module.get_submodule("2"), ], ) devices = ("cuda", "hpu", "xpu") instantiate_device_type_tests( TestTraversal, globals(), only_for=devices, allow_xpu=True ) if __name__ == "__main__": run_tests()
TestTraversal
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column_v2_test.py
{ "start": 185702, "end": 216989 }
class ____(test.TestCase, parameterized.TestCase): def test_defaults(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertIsNone(embedding_column.ckpt_to_load_from) self.assertIsNone(embedding_column.tensor_name_in_ckpt) self.assertIsNone(embedding_column.max_norm) self.assertTrue(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) self.assertTrue(embedding_column._is_v2_column) def test_is_v2_column(self): categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertFalse(embedding_column._is_v2_column) def test_all_constructor_args(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) def test_deep_copy(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 original = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column in (original, copy.deepcopy(original)): self.assertEqual('aaa', embedding_column.categorical_column.name) self.assertEqual(3, embedding_column.categorical_column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.categorical_column.parse_example_spec) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) def test_invalid_initializer(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) with self.assertRaisesRegex(ValueError, 'initializer must be callable'): fc.embedding_column(categorical_column, dimension=2, initializer='not_fn') def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded = fc.embedding_column(a, dimension=2) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_embedded])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) def test_transform_feature(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=3) a_embedded = fc.embedding_column(a, dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) } outputs = fc._transform_features_v2(features, [a, a_embedded], None) output_a = outputs[a] output_embedded = outputs[a_embedded] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_embedded)) def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. if not context.executing_eagerly(): global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) def test_get_dense_tensor_old_categorical(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( fc_old._LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. if not context.executing_eagerly(): global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) def test_get_dense_tensor_3d(self): # Inputs. vocabulary_size = 4 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)), values=(2, 0, 1, 1, 2), dense_shape=(4, 2, 5)) # Embedding variable. embedding_dimension = 3 embedding_values = ( (1., 2., 4.), # id 0 (3., 5., 1.), # id 1 (7., 11., 2.), # id 2 (2., 7., 12.) # id 3 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]] ((7., 11., 2.), (0., 0., 0.)), # example 1, ids [[], [0, 1]], embedding # = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]] ((0., 0., 0.), (2., 3.5, 2.5)), # example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]] ((0., 0., 0.), (0., 0., 0.)), # example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]] ((3., 5., 1.), (7., 11., 2.)), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. if not context.executing_eagerly(): global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) def test_get_dense_tensor_placeholder_inputs(self): # This test explicitly checks graph placeholders, so we need to # be in a graph with ops.Graph().as_default(): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int64) input_shape = array_ops.placeholder(dtype=dtypes.int64) embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with _initialized_session(): self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual( expected_lookups, embedding_lookup.eval( feed_dict={ input_indices: sparse_input.indices, input_values: sparse_input.values, input_shape: sparse_input.dense_shape, })) def test_get_dense_tensor_restore_from_ckpt(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. The checkpoint file contains _embedding_values. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) ckpt_path = test.test_src_dir_path( 'python/feature_column/testdata/embedding.ckpt') ckpt_tensor = 'my_embedding' # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, ckpt_to_load_from=ckpt_path, tensor_name_in_ckpt=ckpt_tensor) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. if not context.executing_eagerly(): global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) def test_input_layer(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. feature_layer = fc_old.input_layer({ 'aaa': sparse_input }, (embedding_column,)) if not context.executing_eagerly(): # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(feature_layer)) def test_old_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertCountEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertCountEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertCountEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertCountEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) def test_serialization_with_default_initializer(self): # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column(categorical_column, dimension=2) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column.get_config() self.assertEqual( { 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': { 'class_name': 'TruncatedNormal', 'config': { 'dtype': 'float32', 'stddev': 0.7071067811865475, 'seed': None, 'mean': 0.0 } }, 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True, 'use_safe_embedding_lookup': True }, config) new_embedding_column = fc.EmbeddingColumn.from_config( config, custom_objects=None) self.assertEqual(embedding_column.get_config(), new_embedding_column.get_config()) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn.from_config( config, custom_objects=None, columns_by_name={ serialization._column_name_with_class_name(categorical_column): categorical_column }) self.assertEqual(embedding_column.get_config(), new_embedding_column.get_config()) self.assertIs(categorical_column, new_embedding_column.categorical_column) def test_serialization_with_custom_initializer(self): def _initializer(shape, dtype, partition_info=None): del shape, dtype, partition_info return ValueError('Not expected to be called') # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column( categorical_column, dimension=2, initializer=_initializer) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column.get_config() self.assertEqual( { 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': '_initializer', 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True, 'use_safe_embedding_lookup': True }, config) custom_objects = { '_initializer': _initializer, } # use_safe_embedding_lookup might not be populated for legacy reasons. del config['use_safe_embedding_lookup'] new_embedding_column = fc.EmbeddingColumn.from_config( config, custom_objects=custom_objects) self.assertEqual(embedding_column, new_embedding_column) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn.from_config( config, custom_objects=custom_objects, columns_by_name={ serialization._column_name_with_class_name(categorical_column): categorical_column }) self.assertEqual(embedding_column, new_embedding_column) self.assertIs(categorical_column, new_embedding_column.categorical_column)
EmbeddingColumnTest
python
davidhalter__jedi
test/completion/pep0526_variables.py
{ "start": 1873, "end": 1921 }
class ____: name: int = 1 #? int() DC().name
DC
python
pytorch__pytorch
torch/utils/_pytree.py
{ "start": 17129, "end": 23497 }
class ____: value: Any def _is_constant_holder(spec: "TreeSpec") -> bool: """Checks if the spec is from a pytree registered with register_constant""" return isinstance(spec._context, ConstantNode) def _retrieve_constant(spec: "TreeSpec") -> Any: """Given a spec from a pytree registered with register_constant, retrieves the constant""" if not _is_constant_holder(spec): raise AssertionError("spec does not correspond to a registered constant pytree") return tree_unflatten([], spec) def _register_namedtuple( cls: type[Any], *, serialized_type_name: str, ) -> None: """ Registers a namedtuple as a valid pytree node. By default namedtuples are valid pytree nodes, but they are not serializable. This API provides the argument `serialized_type_name` which allows these namedtuples to be serialized. Args: cls: the dataclass type to register serialized_type_name: The serialized name for the dataclass. This is required if you want to serialize the pytree TreeSpec containing this namedtuple. """ _private_register_pytree_node( cls, _namedtuple_flatten, _namedtuple_unflatten, serialized_type_name=serialized_type_name, to_dumpable_context=_namedtuple_serialize, from_dumpable_context=_namedtuple_deserialize, flatten_with_keys_fn=_namedtuple_flatten_with_keys, ) @deprecated( "`torch.utils._pytree._register_pytree_node` is deprecated. " "Please use `torch.utils._pytree.register_pytree_node` instead.", category=FutureWarning, ) def _register_pytree_node( cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, to_str_fn: ToStrFunc | None = None, # deprecated maybe_from_str_fn: MaybeFromStrFunc | None = None, # deprecated *, serialized_type_name: str | None = None, to_dumpable_context: ToDumpableContextFn | None = None, from_dumpable_context: FromDumpableContextFn | None = None, flatten_with_keys_fn: FlattenWithKeysFunc | None = None, ) -> None: """Register a container-like type as pytree node for the Python pytree only. Args: cls: the type to register flatten_fn: A callable that takes a pytree and returns a flattened representation of the pytree and additional context to represent the flattened pytree. unflatten_fn: A callable that takes a flattened version of the pytree, additional context, and returns an unflattened pytree. serialized_type_name: A keyword argument used to specify the fully qualified name used when serializing the tree spec. to_dumpable_context: An optional keyword argument to custom specify how to convert the context of the pytree to a custom json dumpable representation. This is used for json serialization, which is being used in torch.export right now. from_dumpable_context: An optional keyword argument to custom specify how to convert the custom json dumpable representation of the context back to the original context. This is used for json deserialization, which is being used in torch.export right now. flatten_with_keys_fn: An optional keyword argument to specify how to access each pytree leaf's keypath when flattening and tree-mapping. Like ``flatten_fn``, but in place of a List[leaf], it should return a List[(keypath, leaf)]. """ if to_str_fn is not None or maybe_from_str_fn is not None: warnings.warn( "`to_str_fn` and `maybe_from_str_fn` is deprecated. " "Please use `to_dumpable_context` and `from_dumpable_context` instead.", FutureWarning, stacklevel=2, ) _private_register_pytree_node( cls, flatten_fn, unflatten_fn, serialized_type_name=serialized_type_name, to_dumpable_context=to_dumpable_context, from_dumpable_context=from_dumpable_context, flatten_with_keys_fn=flatten_with_keys_fn, ) def _deregister_pytree_node( cls: type[Any], ) -> None: """This is an internal function that is used to deregister a pytree node type for the Python pytree only. This should be only used inside PyTorch. """ with _NODE_REGISTRY_LOCK: del SUPPORTED_NODES[cls] node_def = SUPPORTED_SERIALIZED_TYPES[cls] del SERIALIZED_TYPE_TO_PYTHON_TYPE[node_def.serialized_type_name] del SUPPORTED_SERIALIZED_TYPES[cls] CONSTANT_NODES.discard(cls) def _private_register_pytree_node( cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, *, serialized_type_name: str | None = None, to_dumpable_context: ToDumpableContextFn | None = None, from_dumpable_context: FromDumpableContextFn | None = None, flatten_with_keys_fn: FlattenWithKeysFunc | None = None, ) -> None: """This is an internal function that is used to register a pytree node type for the Python pytree only. End-users should use :func:`register_pytree_node` instead. """ with _NODE_REGISTRY_LOCK: if cls in SUPPORTED_NODES: # TODO: change this warning to an error after OSS/internal stabilize warnings.warn( f"{cls} is already registered as pytree node. " "Overwriting the previous registration.", stacklevel=2, ) node_def = NodeDef(cls, flatten_fn, unflatten_fn, flatten_with_keys_fn) SUPPORTED_NODES[cls] = node_def if (to_dumpable_context is None) ^ (from_dumpable_context is None): raise ValueError( f"Both to_dumpable_context and from_dumpable_context for {cls} must " "be None or registered." ) if serialized_type_name is None: serialized_type_name = NO_SERIALIZED_TYPE_NAME_FOUND serialize_node_def = _SerializeNodeDef( cls, serialized_type_name, to_dumpable_context, from_dumpable_context, ) SUPPORTED_SERIALIZED_TYPES[cls] = serialize_node_def SERIALIZED_TYPE_TO_PYTHON_TYPE[serialized_type_name] = cls @dataclasses.dataclass(frozen=True)
ConstantNode
python
sqlalchemy__sqlalchemy
test/orm/test_merge.py
{ "start": 62165, "end": 63027 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "data", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", PickleType(comparator=operator.eq)), ) @classmethod def setup_classes(cls): class Data(cls.Basic): pass def test_list(self): Data, data = self.classes.Data, self.tables.data self.mapper_registry.map_imperatively(Data, data) sess = fixture_session() d = Data(data=["this", "is", "a", "list"]) sess.add(d) sess.commit() d2 = Data(id=d.id, data=["this", "is", "another", "list"]) d3 = sess.merge(d2) eq_(d3.data, ["this", "is", "another", "list"])
MutableMergeTest
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_heapq.py
{ "start": 1058, "end": 1499 }
class ____(__TestCase): def test_py_functions(self): for fname in func_names: self.assertEqual(getattr(py_heapq, fname).__module__, 'heapq') @skipUnless(c_heapq, 'requires _heapq') def test_c_functions(self): for fname in func_names: self.assertEqual(getattr(c_heapq, fname).__module__, '_heapq') @torch._dynamo.disable def randrange(*args): return random.randrange(*args)
TestModules
python
Textualize__textual
docs/examples/styles/scrollbar_size2.py
{ "start": 448, "end": 816 }
class ____(App): CSS_PATH = "scrollbar_size2.tcss" def compose(self): yield Horizontal( ScrollableContainer(Label(TEXT * 5), id="v1"), ScrollableContainer(Label(TEXT * 5), id="v2"), ScrollableContainer(Label(TEXT * 5), id="v3"), ) if __name__ == "__main__": app = ScrollbarApp() app.run()
ScrollbarApp
python
ApeWorX__ape
src/ape/cli/paramtype.py
{ "start": 848, "end": 1117 }
class ____(click.ParamType): """ A param-type for ignoring param-types. Good to use when the multi-type handling happens already in a callback or in the command itself. """ def convert(self, value: Any, param, ctx) -> Any: return value
Noop
python
ray-project__ray
python/ray/serve/llm/deployment.py
{ "start": 444, "end": 2233 }
class ____(InternalLLMServer): """The implementation of the vLLM engine deployment. To build a Deployment object you should use `build_llm_deployment` function. We also expose a lower level API for more control over the deployment class through `serve.deployment` function. Examples: .. testcode:: :skipif: True from ray import serve from ray.serve.llm import LLMConfig from ray.serve.llm.deployment import LLMServer # Configure the model llm_config = LLMConfig( model_loading_config=dict( served_model_name="llama-3.1-8b", model_source="meta-llama/Llama-3.1-8b-instruct", ), deployment_config=dict( autoscaling_config=dict( min_replicas=1, max_replicas=8, ) ), ) # Build the deployment directly serve_options = LLMServer.get_deployment_options(llm_config) llm_app = serve.deployment(LLMServer).options( **serve_options).bind(llm_config) model_handle = serve.run(llm_app) # Query the model via `chat` api from ray.serve.llm.openai_api_models import ChatCompletionRequest request = ChatCompletionRequest( model="llama-3.1-8b", messages=[ { "role": "user", "content": "Hello, world!" } ] ) response = ray.get(model_handle.chat(request)) print(response) """ pass @PublicAPI(stability="alpha")
LLMServer
python
django__django
tests/queries/models.py
{ "start": 16650, "end": 16906 }
class ____(models.Model): name = models.CharField(max_length=20) has_blackboard = models.BooleanField(null=True) school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name="classroom")
Classroom
python
walkccc__LeetCode
solutions/1261. Find Elements in a Contaminated Binary Tree/1261.py
{ "start": 0, "end": 387 }
class ____: def __init__(self, root: TreeNode | None): self.vals = set() self.dfs(root, 0) def find(self, target: int) -> bool: return target in self.vals def dfs(self, root: TreeNode | None, val: int) -> None: if not root: return root.val = val self.vals.add(val) self.dfs(root.left, val * 2 + 1) self.dfs(root.right, val * 2 + 2)
FindElements
python
pytorch__pytorch
test/inductor/test_multi_kernel.py
{ "start": 2498, "end": 12887 }
class ____(TestCase): def test_softmax(self, expect_multi_kernel=True): x = torch.rand(2, 1024).to(GPU_TYPE) ref = torch.softmax(x, -1) compiled_fn = torch.compile(torch.softmax) act, wrapper_code = run_and_get_code(compiled_fn, x, -1) # wrapper_code will contains 2 entries if cpp_wrapper=True. # One for the first pass and one for the second pass. # We mainly care about the wrapper for the final pass here. wrapper_code = wrapper_code[-1] self.assertEqual(ref, act) if expect_multi_kernel: self.assertTrue(_contains_multi_kernel_code(wrapper_code)) else: self.assertFalse(_contains_multi_kernel_code(wrapper_code)) @requires_triton() # TODO: bobrenjc93 to fix multi-kernel for ROCM @skipIfRocm @unittest.skipIf(not IS_BIG_GPU, "templates require big gpu") @skipIfXpu(msg="https://github.com/intel/torch-xpu-ops/issues/2295") def test_triton_gemm(self): def fn(x, y): return x @ y compiled_fn = torch.compile( fn, options={ "max_autotune": True, "max_autotune_gemm_backends": "TRITON", }, ) x = torch.randn(4096, 4096, device=GPU_TYPE) y = torch.randn(4096, 4096, device=GPU_TYPE) torch._dynamo.mark_dynamic(x, 0) act, wrapper_code = run_and_get_code(compiled_fn, x, y) ref = fn(x, y) # wrapper_code will contains 2 entries if cpp_wrapper=True. # One for the first pass and one for the second pass. # We mainly care about the wrapper for the final pass here. wrapper_code = wrapper_code[-1] self.assertEqual(ref, act) self.assertTrue(_contains_size_hint_multi_kernel_code(wrapper_code)) @skipIfXpu(msg="https://github.com/intel/torch-xpu-ops/issues/2295") @requires_triton() # TODO: bobrenjc93 to fix multi-kernel for ROCM @skipIfRocm @unittest.skipIf(not IS_BIG_GPU, "templates require big gpu") def test_triton_relu_fused_gemm(self): def fn(x, y): return (x @ y).relu() compiled_fn = torch.compile( fn, options={ "max_autotune": True, "max_autotune_gemm_backends": "TRITON", }, ) x = torch.randn(4096, 4096, device=GPU_TYPE) y = torch.randn(4096, 4096, device=GPU_TYPE) torch._dynamo.mark_dynamic(x, 0) act, wrapper_code = run_and_get_code(compiled_fn, x, y) ref = fn(x, y) # wrapper_code will contains 2 entries if cpp_wrapper=True. # One for the first pass and one for the second pass. # We mainly care about the wrapper for the final pass here. wrapper_code = wrapper_code[-1] self.assertEqual(ref, act) self.assertTrue(_contains_size_hint_multi_kernel_code(wrapper_code)) @parametrize("force_kernel", (0, 1)) @unittest.mock.patch.dict( os.environ, {"TORCHINDUCTOR_DISABLE_MULTI_KERNEL_CACHE": "1"} ) def test_softmax_force_non_persistent_reduction(self, force_kernel): """ Force a specific sub-kernel being picked by mocking the benchmark result. """ x = torch.rand(2, 1024).to(GPU_TYPE) mock_latency = [0.2, 0.2] mock_latency[force_kernel] = 0.1 # this make sure force_kernel will be picked def f(x): return torch.softmax(x, -1) + force_kernel orig_run = MultiKernelCall.run picked_kernel = None def mock_run(self, *args, **kwargs): out = orig_run(self, *args, **kwargs) nonlocal picked_kernel picked_kernel = self.picked_kernel return out with ( unittest.mock.patch.object(MultiKernelCall, "run", mock_run), unittest.mock.patch.object( MultiKernelCall, "benchmark_sub_kernels", lambda *args, **kwargs: mock_latency, ), ): torch.compile(f)(x) self.assertEqual(picked_kernel, force_kernel) @config.patch("warn_mix_layout", True) def test_softmax_warn_mixed_layout(self): self.test_softmax() test_softmax_cpp_wrapper = make_cpp_wrapper_test( test_softmax, expect_multi_kernel=True ) def test_layernorm(self): ln = nn.LayerNorm(1024).to(GPU_TYPE) x = torch.rand(2, 1024).to(GPU_TYPE) ref = ln(x) act = torch.compile(ln)(x) self.assertEqual(ref, act, atol=1e-4, rtol=1e-4) def test_inplace_update(self): """ Inductor generate inplace kernel for mul. """ def f(x, y): return x.sum(dim=-1, keepdims=True) * (y @ y) x = torch.rand(1024, 1024).to(GPU_TYPE) y = torch.rand(1024, 1024).to(GPU_TYPE) ref = f(x, y) act = torch.compile(f)(x, y) self.assertEqual(ref, act) def test_transformer_snippet(self): model = TransformerSnippet().to(GPU_TYPE) x = model.example_inputs() def f(*x): y = model(*x) return y reset_rng_state() ref = f(*x) opt_f = torch.compile(f) reset_rng_state() act = opt_f(*x) # don't compare tensor if using inductor random number generator. # inductor random number implementation is different to eager. # We should fallback to eager if we want to test accuracy. if config.fallback_random: self.assertEqual(ref, act, atol=1e-4, rtol=1e-4) def test_transformer_snippet_with_fallback_random(self): """ Same as test_transformer_snippet but fallback the random number generator to eager so we can check accuracy. """ with config.patch("fallback_random", True): self.test_transformer_snippet() def test_batchnorm_training(self): """ For training, batchnorm will tracking running mean/variance during forward pass. The kernel generated by inductor currently will pass in those tensors twice as arguments: once for input and once for output. They are ruled out as in-out argument because they are considered as graph inputs. Multi-kernel previously assumes that we never pass the same argument multi times for a kernel. No matter if we change inductor behavior to assure that, it's better to make multi-kernel being able to handle those cases. """ bn = nn.BatchNorm2d(3).to(GPU_TYPE) @torch.compile def f(x): bn(x).sum().backward() _, (wrapper_code, _) = run_and_get_code( f, torch.randn(2, 3, 8, 8, device=GPU_TYPE) ) self.assertTrue(_contains_multi_kernel_code(wrapper_code)) def test_pass_same_arg_multi_times(self): """ A super simple example that simulate how BatchNorm update the running stats. Inductor currently pass the same tensor multiple times for the generated kernel: once for input and once for output. Here is a paster for the generated kernel (without multi-kernel enabled): https://gist.github.com/shunting314/f0b446b4b9a28f4940e31dcd3e809cf9 """ def f(x, y): x = x.sum(dim=1, keepdim=False) y.copy_(y * 0.9 + x * 0.1) x = torch.randn(8, 16, device=GPU_TYPE) y = torch.randn(8, device=GPU_TYPE) y_ref = y.clone() ref = f(x, y_ref) # noqa: F841 act = torch.compile(f)(x, y) # noqa: F841 self.assertEqual(y_ref, y) def test_reduction_scratch_buffer(self, force_multi_kernel=1): """ The explicitly realized buffer in the test function will be passed in as a scratch buffer for the non-persistent reduction kernel but can be skipped for the persistent reduction kernel. This causes different argument lists for non-persistent reduction kernel and persistent reduction kernel. Check documentation around torch._inductor.config.triton.multi_kernel about how to interpret the force_multi_kernel argument. """ def f(x): x = x.sum(dim=-1, keepdim=True) + x x = test_operators.realize(x) x = x.sum(dim=-1, keepdim=True) + x return x x = torch.rand(16, 16, device=GPU_TYPE) ref = f(x) with config.patch("triton.multi_kernel", force_multi_kernel): act = torch.compile(f)(x) self.assertEqual(ref, act) def test_split_scan(self, force_multi_kernel=1): def f(x): x = x.view(-1) return torch.cumsum(x, 0) x = make_tensor(10, 3, 352, 352, low=0, dtype=torch.float32, device=GPU_TYPE) expect = f(x) with config.patch("triton.multi_kernel", force_multi_kernel): actual = torch.compile(f)(x) self.assertEqual(expect, actual) def test_sort_disables_multi_kernel(self, force_multi_kernel=1): """ Sort currently requires a persistent kernel, so multi-kernel is not possible. Make sure this falls back gracefully. """ def f(x): return x.sort(-1).values x = torch.rand(32, 32, device=GPU_TYPE) expect = f(x) with config.patch("triton.multi_kernel", force_multi_kernel): actual = torch.compile(f)(x) self.assertEqual(expect, actual) # Use benchmarking to pick the faster kernel test_reduction_scratch_buffer_cpp_wrapper = make_cpp_wrapper_test( test_reduction_scratch_buffer, force_multi_kernel=1 ) # force pick persistent reduction. This can be a good test since this persistent # reduction uses less call arguments than the corresponding non-persistent # reduction. test_reduction_scratch_buffer_cpp_wrapper_persistent_reduction = ( make_cpp_wrapper_test(test_reduction_scratch_buffer, force_multi_kernel=2) ) # force pick non-persistent reduction test_reduction_scratch_buffer_cpp_wrapper_non_persistent_reduction = ( make_cpp_wrapper_test(test_reduction_scratch_buffer, force_multi_kernel=3) ) if __name__ == "__main__": from torch._inductor.test_case import run_tests if HAS_GPU: run_tests()
MultiKernelTest
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 416767, "end": 418860 }
class ____: def test_against_R(self): # Test against R implementation in `distributionsrd` # library(distributionsrd) # options(digits=16) # x = 1.1 # b = 2 # a = 1.5 # m = 3 # s = 1.2 # ddoubleparetolognormal(x, b, a, m, s) # pdoubleparetolognormal(x, b, a, m, s) x, m, s, a, b = 1.1, 3, 1.2, 1.5, 2 dist = stats.dpareto_lognorm(m, s, a, b) np.testing.assert_allclose(dist.pdf(x), 0.02490187219085912) np.testing.assert_allclose(dist.cdf(x), 0.01664024173822796) # Cases are (distribution name, log10 of smallest probability mass to test, # log10 of the complement of the largest probability mass to test, atol, # rtol). None uses default values. @pytest.mark.parametrize("case", [("kappa3", None, None, None, None), ("loglaplace", None, None, None, None), ("lognorm", None, None, None, None), ("lomax", None, None, None, None), ("pareto", None, None, None, None),]) def test_sf_isf_overrides(case): # Test that SF is the inverse of ISF. Supplements # `test_continuous_basic.check_sf_isf` for distributions with overridden # `sf` and `isf` methods. distname, lp1, lp2, atol, rtol = case lpm = np.log10(0.5) # log10 of the probability mass at the median lp1 = lp1 or -290 lp2 = lp2 or -14 atol = atol or 0 rtol = rtol or 1e-12 dist = getattr(stats, distname) params = dict(distcont)[distname] dist_frozen = dist(*params) # Test (very deep) right tail to median. We can benchmark with random # (loguniform) points, but strictly logspaced points are fine for tests. ref = np.logspace(lp1, lpm) res = dist_frozen.sf(dist_frozen.isf(ref)) assert_allclose(res, ref, atol=atol, rtol=rtol) # test median to left tail ref = 1 - np.logspace(lp2, lpm, 20) res = dist_frozen.sf(dist_frozen.isf(ref)) assert_allclose(res, ref, atol=atol, rtol=rtol)
TestDParetoLognorm
python
google__pytype
pytype/tools/analyze_project/pytype_runner_test.py
{ "start": 18174, "end": 21588 }
class ____(TestBase): """Tests for PytypeRunner.write_build_statement.""" def write_build_statement(self, *args, **kwargs): conf = self.parser.config_from_defaults() with test_utils.Tempdir() as d: conf.output = d.path runner = make_runner([], [], conf) output = runner.write_build_statement(*args, **kwargs) with open(runner.ninja_file) as f: return runner, output, f.read().splitlines() def assertOutputMatches(self, module, expected_output): runner, output, _ = self.write_build_statement( module, Action.CHECK, set(), 'imports', '' ) self.assertEqual(output, path_utils.join(runner.pyi_dir, expected_output)) def test_check(self): _, output, build_statement = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.CHECK, set(), 'imports', '' ) self.assertEqual( build_statement[0], f'build {pytype_runner.escape_ninja_path(output)}: check foo.py', ) def test_infer(self): _, output, build_statement = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.INFER, set(), 'imports', '' ) self.assertEqual( build_statement[0], f'build {pytype_runner.escape_ninja_path(output)}: infer foo.py', ) def test_deps(self): _, output, _ = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.INFER, set(), 'imports', '' ) _, _, build_statement = self.write_build_statement( Module('', 'bar.py', 'bar'), Action.CHECK, {output}, 'imports', '' ) expected_suffix = ' | ' + pytype_runner.escape_ninja_path(output) self.assertTrue( build_statement[0].endswith(expected_suffix), f'\n{build_statement[0]!r}\ndoes not end with\n{expected_suffix!r}', ) def test_imports(self): _, _, build_statement = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.CHECK, set(), 'imports', '' ) self.assertIn(' imports = imports', build_statement) def test_module(self): _, _, build_statement = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.CHECK, set(), 'imports', '' ) self.assertIn(' module = foo', build_statement) def test_suffix(self): runner, output, _ = self.write_build_statement( Module('', 'foo.py', 'foo'), Action.CHECK, set(), 'imports', '-1' ) self.assertEqual(path_utils.join(runner.pyi_dir, 'foo.pyi-1'), output) def test_hidden_dir(self): self.assertOutputMatches( Module('', file_utils.replace_separator('.foo/bar.py'), '.foo.bar'), path_utils.join('.foo', 'bar.pyi'), ) def test_hidden_file(self): self.assertOutputMatches( Module('', file_utils.replace_separator('foo/.bar.py'), 'foo..bar'), path_utils.join('foo', '.bar.pyi'), ) def test_hidden_file_with_path_prefix(self): self.assertOutputMatches( Module('', file_utils.replace_separator('foo/.bar.py'), '.bar'), path_utils.join('.bar.pyi'), ) def test_hidden_dir_with_path_mismatch(self): self.assertOutputMatches( Module('', file_utils.replace_separator('symlinked/foo.py'), '.bar'), '.bar.pyi', ) def test_path_mismatch(self): self.assertOutputMatches( Module('', file_utils.replace_separator('symlinked/foo.py'), 'bar.baz'), path_utils.join('bar', 'baz.pyi'), )
TestNinjaBuildStatement
python
openai__openai-python
src/openai/types/beta/assistant_stream_event.py
{ "start": 2436, "end": 2656 }
class ____(BaseModel): data: Run """ Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). """ event: Literal["thread.run.completed"]
ThreadRunCompleted
python
kamyu104__LeetCode-Solutions
Python/permutations.py
{ "start": 34, "end": 709 }
class ____(object): # @param num, a list of integer # @return a list of lists of integers def permute(self, num): result = [] used = [False] * len(num) self.permuteRecu(result, used, [], num) return result def permuteRecu(self, result, used, cur, num): if len(cur) == len(num): result.append(cur[:]) return for i in xrange(len(num)): if not used[i]: used[i] = True cur.append(num[i]) self.permuteRecu(result, used, cur, num) cur.pop() used[i] = False # Time: O(n^2 * n!) # Space: O(n^2)
Solution
python
keras-team__keras
keras/src/backend/common/backend_utils_test.py
{ "start": 579, "end": 1606 }
class ____(test_case.TestCase): def test_valid_padding_without_output_padding(self): """Test conversion with 'valid' padding and no output padding""" ( left_pad, right_pad, ) = _convert_conv_transpose_padding_args_from_keras_to_jax( kernel_size=3, stride=2, dilation_rate=1, padding="valid", output_padding=None, ) self.assertEqual(left_pad, 2) self.assertEqual(right_pad, 2) def test_same_padding_without_output_padding(self): """Test conversion with 'same' padding and no output padding.""" ( left_pad, right_pad, ) = _convert_conv_transpose_padding_args_from_keras_to_jax( kernel_size=3, stride=2, dilation_rate=1, padding="same", output_padding=None, ) self.assertEqual(left_pad, 2) self.assertEqual(right_pad, 1)
ConvertConvTransposePaddingArgsJAXTest
python
encode__django-rest-framework
tests/test_model_serializer.py
{ "start": 18270, "end": 18408 }
class ____(json.JSONDecoder): pass @pytest.mark.skipif(not hasattr(models, 'JSONField'), reason='no models.JSONField')
CustomJSONDecoder
python
pytorch__pytorch
torch/ao/pruning/scheduler/cubic_scheduler.py
{ "start": 168, "end": 3875 }
class ____(BaseScheduler): r"""Sets the sparsity level of each parameter group to the final sl plus a given exponential function. .. math:: s_i = s_f + (s_0 - s_f) \cdot \left( 1 - \frac{t - t_0}{n\Delta t} \right)^3 where :math:`s_i` is the sparsity at epoch :math:`t`, :math;`s_f` is the final sparsity level, :math:`f(i)` is the function to be applied to the current epoch :math:`t`, initial epoch :math:`t_0`, and final epoch :math:`t_f`. :math:`\Delta t` is used to control how often the update of the sparsity level happens. By default, Args: sparsifier (BaseSparsifier): Wrapped sparsifier. init_sl (int, list): Initial level of sparsity init_t (int, list): Initial step, when pruning starts delta_t (int, list): Pruning frequency total_t (int, list): Total number of pruning steps initially_zero (bool, list): If True, sets the level of sparsity to 0 before init_t (:math:`t_0`). Otherwise, the sparsity level before init_t (:math:`t_0`) is set to init_sl(:math:`s_0`) last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. """ def __init__( self, sparsifier, init_sl=0.0, init_t=0, delta_t=10, total_t=100, initially_zero=False, last_epoch=-1, verbose=False, ): self.sparsifier = sparsifier self.init_sl = self._make_sure_a_list(init_sl) self.init_t = self._make_sure_a_list(init_t) self.delta_t = self._make_sure_a_list(delta_t) self.total_t = self._make_sure_a_list(total_t) self.initially_zero = self._make_sure_a_list(initially_zero) super().__init__(sparsifier, last_epoch, verbose) @staticmethod def sparsity_compute_fn(s_0, s_f, t, t_0, dt, n, initially_zero=False): r""" "Computes the current level of sparsity. Based on https://arxiv.org/pdf/1710.01878.pdf Args: s_0: Initial level of sparsity, :math:`s_i` s_f: Target level of sparsity, :math:`s_f` t: Current step, :math:`t` t_0: Initial step, :math:`t_0` dt: Pruning frequency, :math:`\Delta T` n: Pruning steps, :math:`n` initially_zero: Sets the level of sparsity to 0 before t_0. If False, sets to s_0 Returns: The sparsity level :math:`s_t` at the current step :math:`t` """ if initially_zero and t < t_0: return 0 s_t = s_f + (s_0 - s_f) * (1.0 - (t - t_0) / (dt * n)) ** 3 s_t = _clamp(s_t, s_0, s_f) return s_t def get_sl(self): if not self._get_sl_called_within_step: warnings.warn( "To get the last sparsity level computed by the scheduler, " "please use `get_last_sl()`.", stacklevel=2, ) return [ self.sparsity_compute_fn( s_0=initial_sparsity, s_f=final_sparsity, t=self.last_epoch, t_0=initial_epoch, dt=delta_epoch, n=interval_epochs, initially_zero=initially_zero, ) for initial_sparsity, final_sparsity, initial_epoch, delta_epoch, interval_epochs, initially_zero in zip( self.init_sl, self.base_sl, self.init_t, self.delta_t, self.total_t, self.initially_zero, ) ]
CubicSL
python
scrapy__scrapy
tests/CrawlerProcess/reactor_select_subclass_twisted_reactor_select.py
{ "start": 196, "end": 311 }
class ____(SelectReactor): pass reactor = SelectReactorSubclass() installReactor(reactor)
SelectReactorSubclass
python
tensorflow__tensorflow
tensorflow/python/eager/backprop.py
{ "start": 24812, "end": 50300 }
class ____: """Record operations for automatic differentiation. Operations are recorded if they are executed within this context manager and at least one of their inputs is being "watched". Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`, where `trainable=True` is default in both cases) are automatically watched. Tensors can be manually watched by invoking the `watch` method on this context manager. For example, consider the function `y = x * x`. The gradient at `x = 3.0` can be computed as: >>> x = tf.constant(3.0) >>> with tf.GradientTape() as g: ... g.watch(x) ... y = x * x >>> dy_dx = g.gradient(y, x) >>> print(dy_dx) tf.Tensor(6.0, shape=(), dtype=float32) GradientTapes can be nested to compute higher-order derivatives. For example, >>> x = tf.constant(5.0) >>> with tf.GradientTape() as g: ... g.watch(x) ... with tf.GradientTape() as gg: ... gg.watch(x) ... y = x * x ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2 >>> print(dy_dx) tf.Tensor(10.0, shape=(), dtype=float32) >>> print(d2y_dx2) tf.Tensor(2.0, shape=(), dtype=float32) By default, the resources held by a GradientTape are released as soon as GradientTape.gradient() method is called. To compute multiple gradients over the same computation, create a persistent gradient tape. This allows multiple calls to the gradient() method as resources are released when the tape object is garbage collected. For example: >>> x = tf.constant(3.0) >>> with tf.GradientTape(persistent=True) as g: ... g.watch(x) ... y = x * x ... z = y * y >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3) >>> print(dz_dx) tf.Tensor(108.0, shape=(), dtype=float32) >>> dy_dx = g.gradient(y, x) >>> print(dy_dx) tf.Tensor(6.0, shape=(), dtype=float32) By default GradientTape will automatically watch any trainable variables that are accessed inside the context. If you want fine grained control over which variables are watched you can disable automatic tracking by passing `watch_accessed_variables=False` to the tape constructor: >>> x = tf.Variable(2.0) >>> w = tf.Variable(5.0) >>> with tf.GradientTape( ... watch_accessed_variables=False, persistent=True) as tape: ... tape.watch(x) ... y = x ** 2 # Gradients will be available for `x`. ... z = w ** 3 # No gradients will be available as `w` isn't being watched. >>> dy_dx = tape.gradient(y, x) >>> print(dy_dx) tf.Tensor(4.0, shape=(), dtype=float32) >>> # No gradients will be available as `w` isn't being watched. >>> dz_dw = tape.gradient(z, w) >>> print(dz_dw) None Note that when using models you should ensure that your variables exist when using `watch_accessed_variables=False`. Otherwise it's quite easy to make your first iteration not have any gradients: ```python a = tf.keras.layers.Dense(32) b = tf.keras.layers.Dense(32) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(a.variables) # Since `a.build` has not been called at this point # `a.variables` will return an empty list and the # tape will not be watching anything. result = b(a(inputs)) tape.gradient(result, a.variables) # The result of this computation will be # a list of `None`s since a's variables # are not being watched. ``` Note that only tensors with real or complex dtypes are differentiable. """ def __init__(self, persistent=False, watch_accessed_variables=True): """Creates a new GradientTape. Args: persistent: Boolean controlling whether a persistent gradient tape is created. False by default, which means at most one call can be made to the gradient() method on this object. watch_accessed_variables: Boolean controlling whether the tape will automatically `watch` any (trainable) variables accessed while the tape is active. Defaults to True meaning gradients can be requested from any result computed in the tape derived from reading a trainable `Variable`. If False users must explicitly `watch` any `Variable`s they want to request gradients from. """ self._tape = None self._persistent = persistent self._watch_accessed_variables = watch_accessed_variables self._watched_variables = () self._recording = False def __enter__(self): """Enters a context inside which operations are recorded on this tape.""" self._push_tape() return self def __exit__(self, typ, value, traceback): """Exits the recording context, no further operations are traced.""" if self._recording: self._pop_tape() def _push_tape(self): """Pushes a new tape onto the tape stack.""" if self._recording: raise ValueError("Tape is still recording, This can happen if you try to " "re-enter an already-active tape.") if self._tape is None: self._tape = tape.push_new_tape( persistent=self._persistent, watch_accessed_variables=self._watch_accessed_variables) else: tape.push_tape(self._tape) self._recording = True def _pop_tape(self): if not self._recording: raise ValueError("Tape is not recording.") tape.pop_tape(self._tape) self._recording = False @tf_contextlib.contextmanager def _ensure_recording(self): """Ensures that this tape is recording.""" if not self._recording: try: self._push_tape() yield finally: self._pop_tape() else: yield # TODO(b/209081027): Add a variable in composite tensor test case after # variables become composite tensors. def watch(self, tensor): """Ensures that `tensor` is being traced by this tape. Args: tensor: a Tensor/Variable or list of Tensors/Variables. Raises: ValueError: if it encounters something that is not a tensor. """ for t in _extract_tensors_and_variables(tensor): if not backprop_util.IsTrainable(t): logging.log_first_n( logging.WARN, "The dtype of the watched tensor must be " "floating (e.g. tf.float32), got %r", 5, t.dtype) if hasattr(t, "handle"): # There are many variable-like objects, all of them currently have # `handle` attribute that points to a tensor. If this changes, # internals of watch_variable need to change as well. tape.watch_variable(self._tape, t) else: tape.watch(self._tape, t) @tf_contextlib.contextmanager def stop_recording(self): """Temporarily stops recording operations on this tape. Operations executed while this context manager is active will not be recorded on the tape. This is useful for reducing the memory used by tracing all computations. For example: >>> x = tf.constant(4.0) >>> with tf.GradientTape() as tape: ... with tape.stop_recording(): ... y = x ** 2 >>> dy_dx = tape.gradient(y, x) >>> print(dy_dx) None Yields: None Raises: RuntimeError: if the tape is not currently recording. """ if self._tape is None: raise RuntimeError( "Trying to stop recording a tape which is not recording.") self._pop_tape() try: yield finally: self._push_tape() def reset(self): """Clears all information stored in this tape. Equivalent to exiting and reentering the tape context manager with a new tape. For example, the two following code blocks are equivalent: ``` with tf.GradientTape() as t: loss = loss_fn() with tf.GradientTape() as t: loss += other_loss_fn() t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn # The following is equivalent to the above with tf.GradientTape() as t: loss = loss_fn() t.reset() loss += other_loss_fn() t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn ``` This is useful if you don't want to exit the context manager for the tape, or can't because the desired reset point is inside a control flow construct: ``` with tf.GradientTape() as t: loss = ... if loss > k: t.reset() ``` """ self._pop_tape() self._tape = None self._push_tape() def watched_variables(self): """Returns variables watched by this tape in order of construction.""" if self._tape is not None: self._watched_variables = self._tape.watched_variables() return self._watched_variables def gradient(self, target, sources, output_gradients=None, unconnected_gradients=UnconnectedGradients.NONE): """Computes the gradient using operations recorded in context of this tape. Note: Unless you set `persistent=True` a GradientTape can only be used to compute one set of gradients (or jacobians). In addition to Tensors, gradient also supports RaggedTensors. For example, >>> x = tf.ragged.constant([[1.0, 2.0], [3.0]]) >>> with tf.GradientTape() as g: ... g.watch(x) ... y = x * x >>> g.gradient(y, x) <tf.RaggedTensor [[2.0, 4.0], [6.0]]> Args: target: a list or nested structure of Tensors or Variables or CompositeTensors to be differentiated. sources: a list or nested structure of Tensors or Variables or CompositeTensors. `target` will be differentiated against elements in `sources`. output_gradients: a list of gradients, one for each differentiable element of target. Defaults to None. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. Returns: a list or nested structure of Tensors (or IndexedSlices, or None, or CompositeTensor), one for each element in `sources`. Returned structure is the same as the structure of `sources`. Raises: RuntimeError: If called on a used, non-persistent tape. RuntimeError: If called inside the context of the tape. TypeError: If the target is a None object. ValueError: If the target is a variable or if unconnected gradients is called with an unknown value. """ if self._tape is None: raise RuntimeError("A non-persistent GradientTape can only be used to " "compute one set of gradients (or jacobians)") if self._recording: if not self._persistent: self._pop_tape() else: logging.log_first_n( logging.WARN, "Calling GradientTape.gradient on a persistent " "tape inside its context is significantly less " "efficient than calling it outside the context (it " "causes the gradient ops to be recorded on the " "tape, leading to increased CPU and memory usage). " "Only call GradientTape.gradient inside the " "context if you actually want to trace the " "gradient in order to compute higher order " "derivatives.", 1) if target is None: raise TypeError("Argument `target` should be a list or nested structure" " of Tensors, Variables or CompositeTensors to be " "differentiated, but received None.") flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients( nest.flatten(target)) # TODO(b/246997907): Remove this once # ResourceVariableGradient.get_gradient_components returns the handle. flat_targets = nest.map_structure(_handle_or_self, flat_targets) for t in flat_targets: if not backprop_util.IsTrainable(t): logging.vlog( 1, "The dtype of the target tensor must be " "floating (e.g. tf.float32) when calling GradientTape.gradient, " "got %r", t.dtype) flat_sources_raw = nest.flatten(sources) flat_sources = [] for t in flat_sources_raw: flat_sources.append(_handle_or_self(t)) flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients( flat_sources) for t in flat_sources: if not backprop_util.IsTrainable(t): logging.vlog( 1, "The dtype of the source tensor must be " "floating (e.g. tf.float32) when calling GradientTape.gradient, " "got %r", t.dtype) if getattr(t, "is_packed", False): raise ValueError( "GradientTape.gradient is not supported on packed EagerTensors yet." ) if output_gradients is not None: output_gradients = nest.flatten( variable_utils.convert_variables_to_tensors(output_gradients)) output_gradients = ( composite_tensor_gradient.get_flat_tensors_for_gradients( output_gradients)) output_gradients = [None if x is None else ops.convert_to_tensor(x) for x in output_gradients] flat_grad = imperative_grad.imperative_grad( self._tape, flat_targets, flat_sources, output_gradients=output_gradients, sources_raw=flat_sources_raw, unconnected_gradients=unconnected_gradients) if not self._persistent: # Keep track of watched variables before setting tape to None self._watched_variables = self._tape.watched_variables() self._tape = None flat_sources_raw = nest.map_structure(_handle_or_self, flat_sources_raw) flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients( flat_sources_raw, flat_grad) grad = nest.pack_sequence_as(sources, flat_grad) return grad def jacobian(self, target, sources, unconnected_gradients=UnconnectedGradients.NONE, parallel_iterations=None, experimental_use_pfor=True): """Computes the jacobian using operations recorded in context of this tape. Note: Unless you set `persistent=True` a GradientTape can only be used to compute one set of gradients (or jacobians). Note: By default the jacobian implementation uses parallel for (pfor), which creates a tf.function under the hood for each jacobian call. For better performance, and to avoid recompilation and vectorization rewrites on each call, enclose GradientTape code in @tf.function. See[wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the definition of a Jacobian. Example usage: ```python with tf.GradientTape() as g: x = tf.constant([1.0, 2.0]) g.watch(x) y = x * x jacobian = g.jacobian(y, x) # jacobian value is [[2., 0.], [0., 4.]] ``` Args: target: Tensor to be differentiated. sources: a list or nested structure of Tensors or Variables. `target` will be differentiated against elements in `sources`. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. parallel_iterations: A knob to control how many iterations are dispatched in parallel. This knob can be used to control the total memory usage. experimental_use_pfor: If true, vectorizes the jacobian computation. Else falls back to a sequential while_loop. Vectorization can sometimes fail or lead to excessive memory usage. This option can be used to disable vectorization in such cases. Returns: A list or nested structure of Tensors (or None), one for each element in `sources`. Returned structure is the same as the structure of `sources`. Note if any gradient is sparse (IndexedSlices), jacobian function currently makes it dense and returns a Tensor instead. This may change in the future. Raises: RuntimeError: If called on a used, non-persistent tape. RuntimeError: If called on a non-persistent tape with eager execution enabled and without enabling experimental_use_pfor. ValueError: If vectorization of jacobian computation fails. """ if self._tape is None: raise RuntimeError("A non-persistent GradientTape can only be used to " "compute one set of gradients (or jacobians)") flat_sources = nest.flatten(sources) target_static_shape = target.shape target_shape = array_ops.shape(target) # Note that we push and pop the tape here and below. This is needed since we # need gradients through the enclosed operations. with self._ensure_recording(): target = array_ops.reshape(target, [-1]) def loop_fn(i): with self._ensure_recording(): y = array_ops.gather(target, i) return self.gradient(y, flat_sources, unconnected_gradients=unconnected_gradients) try: target_size = int(target.shape[0]) except TypeError: target_size = array_ops.shape(target)[0] if experimental_use_pfor: try: output = pfor_ops.pfor(loop_fn, target_size, parallel_iterations=parallel_iterations) except ValueError as err: raise ValueError( "Encountered an exception while vectorizing the " "jacobian computation. Vectorization can be disabled by setting" " experimental_use_pfor to False.") from err else: if context.executing_eagerly() and not self._persistent: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the jacobian with eager execution enabled and with " " experimental_use_pfor set to False.") output = pfor_ops.for_loop( loop_fn, [target.dtype] * len(flat_sources), target_size, parallel_iterations=parallel_iterations) for i, out in enumerate(output): if out is not None: new_shape = array_ops.concat( [target_shape, array_ops.shape(out)[1:]], axis=0) out = array_ops.reshape(out, new_shape) if context.executing_eagerly(): out.set_shape(target_static_shape.concatenate(flat_sources[i].shape)) output[i] = out return nest.pack_sequence_as(sources, output) def batch_jacobian(self, target, source, unconnected_gradients=UnconnectedGradients.NONE, parallel_iterations=None, experimental_use_pfor=True): """Computes and stacks per-example jacobians. See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the definition of a Jacobian. This function is essentially an efficient implementation of the following: `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`. Note that compared to `GradientTape.jacobian` which computes gradient of each output value w.r.t each input value, this function is useful when `target[i,...]` is independent of `source[j,...]` for `j != i`. This assumption allows more efficient computation as compared to `GradientTape.jacobian`. The output, as well as intermediate activations, are lower dimensional and avoid a bunch of redundant zeros which would result in the jacobian computation given the independence assumption. Note: Unless you set `persistent=True` a GradientTape can only be used to compute one set of gradients (or jacobians). Note: By default the batch_jacobian implementation uses parallel for (pfor), which creates a tf.function under the hood for each batch_jacobian call. For better performance, and to avoid recompilation and vectorization rewrites on each call, enclose GradientTape code in @tf.function. Example usage: ```python with tf.GradientTape() as g: x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32) g.watch(x) y = x * x batch_jacobian = g.batch_jacobian(y, x) # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]] ``` Args: target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n]. `target[i,...]` should only depend on `source[i,...]`. source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m]. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. parallel_iterations: A knob to control how many iterations are dispatched in parallel. This knob can be used to control the total memory usage. experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop. Returns: A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]` is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked per-example jacobians. Raises: RuntimeError: If called on a used, non-persistent tape. RuntimeError: If called on a non-persistent tape with eager execution enabled and without enabling experimental_use_pfor. ValueError: If vectorization of jacobian computation fails or if first dimension of `target` and `source` do not match. """ if self._tape is None: raise RuntimeError("A non-persistent GradientTape can only be used to" "compute one set of gradients (or jacobians)") target_shape = target.shape if target_shape.rank is None: dim = tensor_shape.Dimension(None) else: dim = target_shape.dims[0] if not (target_shape.with_rank_at_least(2) and source.shape.with_rank_at_least(2) and dim.is_compatible_with(source.shape[0])): raise ValueError( "Need first dimension of target shape (%s) and " "source shape (%s) to match." % (target.shape, source.shape)) if target_shape.is_fully_defined(): batch_size = int(target_shape[0]) target_row_size = target_shape.num_elements() // batch_size else: target_shape = array_ops.shape(target) batch_size = target_shape[0] target_row_size = array_ops.size(target) // batch_size source_shape = array_ops.shape(source) # Flatten target to 2-D. # Note that we push and pop the tape here and below. This is needed since we # need gradients through the enclosed operations. with self._ensure_recording(): with ops.control_dependencies( [check_ops.assert_equal(batch_size, source_shape[0])]): target = array_ops.reshape(target, [batch_size, target_row_size]) run_once = False def loop_fn(i): nonlocal run_once if run_once and not self._persistent: if parallel_iterations is not None: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the batch_jacobian with parallel_iterations.") else: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the batch_jacobian.") run_once = True with self._ensure_recording(): y = array_ops.gather(target, i, axis=1) return self.gradient(y, source, unconnected_gradients=unconnected_gradients) if experimental_use_pfor: try: output = pfor_ops.pfor(loop_fn, target_row_size, parallel_iterations=parallel_iterations) except ValueError as err: raise ValueError( "Encountered an exception while vectorizing the " "batch_jacobian computation. Vectorization can be disabled by " "setting experimental_use_pfor to False.") from err else: if context.executing_eagerly() and not self._persistent: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the batch_jacobian with eager execution enabled and " " with experimental_use_pfor set to False.") output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size, parallel_iterations=parallel_iterations) new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0) if output is None: # Note that this block is returning zeros when it could use `None` to # represent unconnected gradients. This is to maintain compatibility with # the previous behavior, which ignored `unconnected_gradients`. output = array_ops.zeros(new_shape, target.dtype) return output else: output = array_ops.reshape(output, [target_row_size, batch_size, -1]) output = array_ops.transpose(output, [1, 0, 2]) output = array_ops.reshape(output, new_shape) return output
GradientTape
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/pool/base.py
{ "start": 40872, "end": 52337 }
class ____(PoolProxiedConnection): """Proxies a DBAPI connection and provides return-on-dereference support. This is an internal object used by the :class:`_pool.Pool` implementation to provide context management to a DBAPI connection delivered by that :class:`_pool.Pool`. The public facing interface for this class is described by the :class:`.PoolProxiedConnection` class. See that class for public API details. The name "fairy" is inspired by the fact that the :class:`._ConnectionFairy` object's lifespan is transitory, as it lasts only for the length of a specific DBAPI connection being checked out from the pool, and additionally that as a transparent proxy, it is mostly invisible. .. seealso:: :class:`.PoolProxiedConnection` :class:`.ConnectionPoolEntry` """ __slots__ = ( "dbapi_connection", "_connection_record", "_echo", "_pool", "_counter", "__weakref__", "__dict__", ) pool: Pool dbapi_connection: DBAPIConnection _echo: log._EchoFlagType def __init__( self, pool: Pool, dbapi_connection: DBAPIConnection, connection_record: _ConnectionRecord, echo: log._EchoFlagType, ): self._pool = pool self._counter = 0 self.dbapi_connection = dbapi_connection self._connection_record = connection_record self._echo = echo _connection_record: Optional[_ConnectionRecord] @property def driver_connection(self) -> Optional[Any]: # type: ignore[override] # mypy#4125 # noqa: E501 if self._connection_record is None: return None return self._connection_record.driver_connection @property @util.deprecated( "2.0", "The _ConnectionFairy.connection attribute is deprecated; " "please use 'driver_connection'", ) def connection(self) -> DBAPIConnection: return self.dbapi_connection @classmethod def _checkout( cls, pool: Pool, threadconns: Optional[threading.local] = None, fairy: Optional[_ConnectionFairy] = None, ) -> _ConnectionFairy: if not fairy: fairy = _ConnectionRecord.checkout(pool) if threadconns is not None: threadconns.current = weakref.ref(fairy) assert ( fairy._connection_record is not None ), "can't 'checkout' a detached connection fairy" assert ( fairy.dbapi_connection is not None ), "can't 'checkout' an invalidated connection fairy" fairy._counter += 1 if ( not pool.dispatch.checkout and not pool._pre_ping ) or fairy._counter != 1: return fairy # Pool listeners can trigger a reconnection on checkout, as well # as the pre-pinger. # there are three attempts made here, but note that if the database # is not accessible from a connection standpoint, those won't proceed # here. attempts = 2 while attempts > 0: connection_is_fresh = fairy._connection_record.fresh fairy._connection_record.fresh = False try: if pool._pre_ping: if not connection_is_fresh: if fairy._echo: pool.logger.debug( "Pool pre-ping on connection %s", fairy.dbapi_connection, ) result = pool._dialect._do_ping_w_event( fairy.dbapi_connection ) if not result: if fairy._echo: pool.logger.debug( "Pool pre-ping on connection %s failed, " "will invalidate pool", fairy.dbapi_connection, ) raise exc.InvalidatePoolError() elif fairy._echo: pool.logger.debug( "Connection %s is fresh, skipping pre-ping", fairy.dbapi_connection, ) pool.dispatch.checkout( fairy.dbapi_connection, fairy._connection_record, fairy ) return fairy except exc.DisconnectionError as e: if e.invalidate_pool: pool.logger.info( "Disconnection detected on checkout, " "invalidating all pooled connections prior to " "current timestamp (reason: %r)", e, ) fairy._connection_record.invalidate(e) pool._invalidate(fairy, e, _checkin=False) else: pool.logger.info( "Disconnection detected on checkout, " "invalidating individual connection %s (reason: %r)", fairy.dbapi_connection, e, ) fairy._connection_record.invalidate(e) try: fairy.dbapi_connection = ( fairy._connection_record.get_connection() ) except BaseException as err: with util.safe_reraise(): fairy._connection_record._checkin_failed( err, _fairy_was_created=True, ) # prevent _ConnectionFairy from being carried # in the stack trace. Do this after the # connection record has been checked in, so that # if the del triggers a finalize fairy, it won't # try to checkin a second time. del fairy # never called, this is for code linters raise attempts -= 1 except BaseException as be_outer: with util.safe_reraise(): rec = fairy._connection_record if rec is not None: rec._checkin_failed( be_outer, _fairy_was_created=True, ) # prevent _ConnectionFairy from being carried # in the stack trace, see above del fairy # never called, this is for code linters raise pool.logger.info("Reconnection attempts exhausted on checkout") fairy.invalidate() raise exc.InvalidRequestError("This connection is closed") def _checkout_existing(self) -> _ConnectionFairy: return _ConnectionFairy._checkout(self._pool, fairy=self) def _checkin(self, transaction_was_reset: bool = False) -> None: _finalize_fairy( self.dbapi_connection, self._connection_record, self._pool, None, self._echo, transaction_was_reset=transaction_was_reset, fairy=self, ) def _close(self) -> None: self._checkin() def _reset( self, pool: Pool, transaction_was_reset: bool, terminate_only: bool, asyncio_safe: bool, ) -> None: if pool.dispatch.reset: pool.dispatch.reset( self.dbapi_connection, self._connection_record, PoolResetState( transaction_was_reset=transaction_was_reset, terminate_only=terminate_only, asyncio_safe=asyncio_safe, ), ) if not asyncio_safe: return if pool._reset_on_return is reset_rollback: if transaction_was_reset: if self._echo: pool.logger.debug( "Connection %s reset, transaction already reset", self.dbapi_connection, ) else: if self._echo: pool.logger.debug( "Connection %s rollback-on-return", self.dbapi_connection, ) pool._dialect.do_rollback(self) elif pool._reset_on_return is reset_commit: if self._echo: pool.logger.debug( "Connection %s commit-on-return", self.dbapi_connection, ) pool._dialect.do_commit(self) @property def _logger(self) -> log._IdentifiedLoggerType: return self._pool.logger @property def is_valid(self) -> bool: return self.dbapi_connection is not None @property def is_detached(self) -> bool: return self._connection_record is None @util.ro_memoized_property def info(self) -> _InfoType: if self._connection_record is None: return {} else: return self._connection_record.info @util.ro_non_memoized_property def record_info(self) -> Optional[_InfoType]: if self._connection_record is None: return None else: return self._connection_record.record_info def invalidate( self, e: Optional[BaseException] = None, soft: bool = False ) -> None: if self.dbapi_connection is None: util.warn("Can't invalidate an already-closed connection.") return if self._connection_record: self._connection_record.invalidate(e=e, soft=soft) if not soft: # prevent any rollback / reset actions etc. on # the connection self.dbapi_connection = None # type: ignore # finalize self._checkin() def cursor(self, *args: Any, **kwargs: Any) -> DBAPICursor: assert self.dbapi_connection is not None return self.dbapi_connection.cursor(*args, **kwargs) def __getattr__(self, key: str) -> Any: return getattr(self.dbapi_connection, key) def detach(self) -> None: if self._connection_record is not None: rec = self._connection_record rec.fairy_ref = None rec.dbapi_connection = None # TODO: should this be _return_conn? self._pool._do_return_conn(self._connection_record) # can't get the descriptor assignment to work here # in pylance. mypy is OK w/ it self.info = self.info.copy() # type: ignore self._connection_record = None if self._pool.dispatch.detach: self._pool.dispatch.detach(self.dbapi_connection, rec) def close(self) -> None: self._counter -= 1 if self._counter == 0: self._checkin() def _close_special(self, transaction_reset: bool = False) -> None: self._counter -= 1 if self._counter == 0: self._checkin(transaction_was_reset=transaction_reset)
_ConnectionFairy
python
plotly__plotly.py
tests/test_core/test_graph_objs/test_repr.py
{ "start": 63, "end": 2223 }
class ____(TestCase): def test_trace_repr(self): N = 100 scatt = go.Scatter( y=list(range(N)), marker={"color": "green", "opacity": [e / N for e in range(N)]}, ) expected = """\ Scatter({ 'marker': {'color': 'green', 'opacity': [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]}, 'y': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99] })""" self.assertEqual(scatt.__repr__(), expected) def test_trace_repr_elided(self): N = 1000 scatt = go.Scatter( y=list(range(N)), marker={"color": "green", "opacity": [e / N for e in range(N)]}, ) expected = """\ Scatter({ 'marker': {'color': 'green', 'opacity': [0.0, 0.001, 0.002, ..., 0.997, 0.998, 0.999]}, 'y': [0, 1, 2, ..., 997, 998, 999] })""" self.assertEqual(scatt.__repr__(), expected)
TestGraphObjRepr
python
qdrant__qdrant-client
tests/congruence_tests/test_recommendation.py
{ "start": 539, "end": 15326 }
class ____: __test__ = False def __init__(self): self.query_image = np.random.random(image_vector_size).tolist() @classmethod def simple_recommend_image(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[10], negative=[]) ), with_payload=True, limit=10, using="image", ).points @classmethod def many_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10, 19])), with_payload=True, limit=10, using="image", ).points @classmethod def simple_recommend_negative(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[10], negative=[15, 7]) ), with_payload=True, limit=10, using="image", ).points @classmethod def recommend_from_another_collection( cls, client: QdrantBase, positive_point_id: Optional[int] = None ) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10] if positive_point_id is None else [positive_point_id], negative=[15, 7] if positive_point_id is None else [], ) ), with_payload=True, limit=10, using="image", lookup_from=models.LookupLocation( collection=secondary_collection_name, vector="image", ), ).points @classmethod def filter_recommend_text( cls, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10])), query_filter=query_filter, with_payload=True, limit=10, using="text", ).points @classmethod def best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10, 20], negative=[], strategy=models.RecommendStrategy.BEST_SCORE, ) ), with_payload=True, limit=10, using="image", ).points @classmethod def best_score_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10, 20], negative=[11, 21], strategy=models.RecommendStrategy.BEST_SCORE, ) ), with_payload=True, limit=10, using="code", ).points @classmethod def only_negatives_best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE ) ), with_payload=True, limit=10, using="image", ).points @classmethod def only_negatives_best_score_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=None, negative=[10, 12], strategy="best_score", # type: ignore # check it works with a literal ) ), with_payload=True, limit=10, using="code", ).points @classmethod def sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10, 20], negative=[], strategy=models.RecommendStrategy.SUM_SCORES ) ), with_payload=True, limit=10, using="image", ).points @classmethod def sum_scores_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10, 20], negative=[11, 21], strategy=models.RecommendStrategy.SUM_SCORES, ) ), with_payload=True, limit=10, using="code", ).points @classmethod def only_negatives_sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES ) ), with_payload=True, limit=10, using="image", ).points @classmethod def only_negatives_sum_scores_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=None, negative=[10, 12], strategy="sum_scores" ) # type: ignore # check it works with a literal ), with_payload=True, limit=10, using="code", ).points @classmethod def avg_vector_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10, 13], negative=[], strategy=models.RecommendStrategy.AVERAGE_VECTOR, ) ), with_payload=True, limit=10, using="image", ).points def recommend_from_raw_vectors(self, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[self.query_image], negative=[]) ), with_payload=True, limit=10, using="image", ).points def recommend_from_raw_vectors_and_ids(self, client: QdrantBase) -> list[models.ScoredPoint]: return client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[self.query_image, 10], negative=[]), ), with_payload=True, limit=10, using="image", ).points @staticmethod def recommend_batch(client: QdrantBase) -> list[models.QueryResponse]: return client.query_batch_points( collection_name=COLLECTION_NAME, requests=[ models.QueryRequest( query=models.RecommendQuery( recommend=models.RecommendInput( positive=[3], negative=None, strategy=models.RecommendStrategy.AVERAGE_VECTOR, ) ), limit=1, using="image", ), models.QueryRequest( query=models.RecommendQuery( recommend=models.RecommendInput( positive=[10], negative=[], strategy=models.RecommendStrategy.BEST_SCORE, ) ), limit=2, using="image", lookup_from=models.LookupLocation( collection=secondary_collection_name, vector="image", ), ), models.QueryRequest( query=models.RecommendQuery( recommend=models.RecommendInput( positive=[4], negative=[], strategy=models.RecommendStrategy.SUM_SCORES, ) ), limit=2, using="image", ), ], ) def test_recommend_from_another_collection(): fixture_points = generate_fixtures(10) secondary_collection_points = generate_fixtures(10) searcher = TestSimpleRecommendation() local_client = init_local() init_client(local_client, fixture_points) init_client(local_client, secondary_collection_points, secondary_collection_name) remote_client = init_remote() init_client(remote_client, fixture_points) init_client(remote_client, secondary_collection_points, secondary_collection_name) for i in range(10): compare_client_results( local_client, remote_client, searcher.recommend_from_another_collection, positive_point_id=i, ) def test_simple_recommend() -> None: fixture_points = generate_fixtures() secondary_collection_points = generate_fixtures(100) searcher = TestSimpleRecommendation() local_client = init_local() init_client(local_client, fixture_points) init_client(local_client, secondary_collection_points, secondary_collection_name) remote_client = init_remote() init_client(remote_client, fixture_points) init_client(remote_client, secondary_collection_points, secondary_collection_name) compare_client_results(local_client, remote_client, searcher.simple_recommend_image) compare_client_results(local_client, remote_client, searcher.many_recommend) compare_client_results(local_client, remote_client, searcher.simple_recommend_negative) compare_client_results(local_client, remote_client, searcher.recommend_from_another_collection) compare_client_results(local_client, remote_client, searcher.best_score_recommend) compare_client_results(local_client, remote_client, searcher.best_score_recommend_euclid) compare_client_results( local_client, remote_client, searcher.only_negatives_best_score_recommend ) compare_client_results( local_client, remote_client, searcher.only_negatives_best_score_recommend_euclid ) compare_client_results(local_client, remote_client, searcher.sum_scores_recommend) compare_client_results(local_client, remote_client, searcher.sum_scores_recommend_euclid) compare_client_results( local_client, remote_client, searcher.only_negatives_sum_scores_recommend ) compare_client_results( local_client, remote_client, searcher.only_negatives_sum_scores_recommend_euclid ) compare_client_results(local_client, remote_client, searcher.avg_vector_recommend) compare_client_results(local_client, remote_client, searcher.recommend_from_raw_vectors) compare_client_results( local_client, remote_client, searcher.recommend_from_raw_vectors_and_ids ) compare_client_results(local_client, remote_client, searcher.recommend_batch) for _ in range(10): query_filter = one_random_filter_please() try: compare_client_results( local_client, remote_client, searcher.filter_recommend_text, query_filter=query_filter, ) except AssertionError as e: print(f"\nFailed with filter {query_filter}") raise e def test_query_with_nan(): fixture_points = generate_fixtures() vector = np.random.random(image_vector_size) vector[0] = np.nan vector = vector.tolist() using = "image" local_client = init_local() remote_client = init_remote() init_client(local_client, fixture_points) init_client(remote_client, fixture_points) with pytest.raises(AssertionError): local_client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[vector], negative=[]) ), using=using, ) with pytest.raises(UnexpectedResponse): remote_client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[vector], negative=[]) ), using=using, ) with pytest.raises(AssertionError): local_client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[1], negative=[vector]), ), using=using, ) with pytest.raises(UnexpectedResponse): remote_client.query_points( collection_name=COLLECTION_NAME, query=models.RecommendQuery( recommend=models.RecommendInput(positive=[1], negative=[vector]), ), using=using, )
TestSimpleRecommendation
python
mlflow__mlflow
tests/pyfunc/test_chat_agent.py
{ "start": 2652, "end": 2970 }
class ____(ChatAgent): @mlflow.trace def predict( self, messages: list[ChatAgentMessage], context: ChatContext, custom_inputs: dict[str, Any] ) -> ChatAgentResponse: mock_response = get_mock_response(messages) return ChatAgentResponse(**mock_response).model_dump()
SimpleDictChatAgent
python
PyCQA__pylint
doc/data/messages/u/unexpected-special-method-signature/good.py
{ "start": 0, "end": 122 }
class ____: def __enter__(self): pass def __exit__(self, type, value, traceback): pass
ContextManager
python
scipy__scipy
scipy/optimize/tests/test__shgo.py
{ "start": 1228, "end": 1695 }
class ____(StructTestFunction): def f(self, x): return x[0] ** 2 + x[1] ** 2 def g(x): return -(np.sum(x, axis=0) - 6.0) cons = wrap_constraints(g) test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], expected_x=[0, 0]) test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], expected_x=[0, 0]) test1_3 = StructTest1(bounds=[(None, None), (None, None)], expected_x=[0, 0])
StructTest1
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 61562, "end": 69317 }
class ____(Request): """ Commit changes to a draft version. :param version: Draft version ID :type version: str :param override_stats: Override version statistics (when provided, these will be used instead of computed statistics) :type override_stats: Statistics :param calculate_stats: If set to false then the version statistics will not be calculated on commit (only when version publish not requested). The default is true :type calculate_stats: bool :param publish: If set to true, version will also be published. :type publish: bool :param force: If publish=true, ignore ongoing annotation tasks with this version as input :type force: bool :param publishing_task: ID of an in-progress annotation task calling this endpoint. Versions which are used as input of in-progress annotation tasks can only be published if there is only one such task and its ID is sent in this field. This is required if one exists. :type publishing_task: str """ _service = "datasets" _action = "commit_version" _version = "2.23" _schema = { "definitions": { "stat_count": { "properties": { "count": { "description": "Item name", "type": ["integer", "null"], }, "name": { "description": "Number of appearances", "type": ["string", "null"], }, }, "type": "object", }, "statistics": { "properties": { "content_types": { "items": { "$ref": "#/definitions/stat_count", "description": ( "List of content type counts for the version (e.g.\n 'image/jpeg'," " 'image/png', 'video/mp4')" ), }, "type": ["array", "null"], }, "frames": { "items": { "$ref": "#/definitions/stat_count", "description": ( "List of frame counts, indicating the\n type of frames included in" " the version (annotated/" ), }, "type": ["array", "null"], }, "labels": { "items": { "$ref": "#/definitions/stat_count", "description": ( "List of labels' counts,\n indicating the categories included in the" " version" ), }, "type": ["array", "null"], }, }, "type": "object", }, }, "properties": { "calculate_stats": { "default": True, "description": ( "If set to false then the version statistics will not be calculated on commit (only when version" " publish not requested). The default is true" ), "type": "boolean", }, "force": { "default": False, "description": "If publish=true, ignore ongoing annotation tasks with this version as input", "type": "boolean", }, "override_stats": { "$ref": "#/definitions/statistics", "description": ( "Override version statistics (when provided, these will be used instead of computed statistics)" ), }, "publish": { "default": False, "description": "If set to true, version will also be published.", "type": "boolean", }, "publishing_task": { "description": ( "ID of an in-progress annotation task calling this endpoint.\n Versions which" " are used as input of in-progress annotation tasks can only be published\n if" " there is only one such task and its ID is sent in this field.\n This is" " required if one exists." ), "type": "string", }, "version": {"description": "Draft version ID", "type": "string"}, }, "required": ["version"], "type": "object", } def __init__( self, version, override_stats=None, calculate_stats=True, publish=False, force=False, publishing_task=None, **kwargs ): super(CommitVersionRequest, self).__init__(**kwargs) self.version = version self.override_stats = override_stats self.calculate_stats = calculate_stats self.publish = publish self.force = force self.publishing_task = publishing_task @schema_property("version") def version(self): return self._property_version @version.setter def version(self, value): if value is None: self._property_version = None return self.assert_isinstance(value, "version", six.string_types) self._property_version = value @schema_property("override_stats") def override_stats(self): return self._property_override_stats @override_stats.setter def override_stats(self, value): if value is None: self._property_override_stats = None return if isinstance(value, dict): value = Statistics.from_dict(value) else: self.assert_isinstance(value, "override_stats", Statistics) self._property_override_stats = value @schema_property("calculate_stats") def calculate_stats(self): return self._property_calculate_stats @calculate_stats.setter def calculate_stats(self, value): if value is None: self._property_calculate_stats = None return self.assert_isinstance(value, "calculate_stats", (bool,)) self._property_calculate_stats = value @schema_property("publish") def publish(self): return self._property_publish @publish.setter def publish(self, value): if value is None: self._property_publish = None return self.assert_isinstance(value, "publish", (bool,)) self._property_publish = value @schema_property("force") def force(self): return self._property_force @force.setter def force(self, value): if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value @schema_property("publishing_task") def publishing_task(self): return self._property_publishing_task @publishing_task.setter def publishing_task(self, value): if value is None: self._property_publishing_task = None return self.assert_isinstance(value, "publishing_task", six.string_types) self._property_publishing_task = value
CommitVersionRequest
python
doocs__leetcode
solution/1800-1899/1864.Minimum Number of Swaps to Make the Binary String Alternating/Solution.py
{ "start": 0, "end": 375 }
class ____: def minSwaps(self, s: str) -> int: def calc(c: int) -> int: return sum((c ^ i & 1) != x for i, x in enumerate(map(int, s))) // 2 n0 = s.count("0") n1 = len(s) - n0 if abs(n0 - n1) > 1: return -1 if n0 == n1: return min(calc(0), calc(1)) return calc(0 if n0 > n1 else 1)
Solution
python
sympy__sympy
sympy/polys/domains/ring.py
{ "start": 229, "end": 3245 }
class ____(Domain[Er]): """Represents a ring domain. """ is_Ring = True def get_ring(self): """Returns a ring associated with ``self``. """ return self def exquo(self, a, b): """Exact quotient of ``a`` and ``b``, implies ``__floordiv__``. """ if a % b: raise ExactQuotientFailed(a, b, self) else: return a // b def quo(self, a, b): """Quotient of ``a`` and ``b``, implies ``__floordiv__``. """ return a // b def rem(self, a, b): """Remainder of ``a`` and ``b``, implies ``__mod__``. """ return a % b def div(self, a, b): """Division of ``a`` and ``b``, implies ``__divmod__``. """ return divmod(a, b) def invert(self, a, b): """Returns inversion of ``a mod b``. """ s, t, h = self.gcdex(a, b) if self.is_one(h): return s % b else: raise NotInvertible("zero divisor") def revert(self, a): """Returns ``a**(-1)`` if possible. """ if self.is_one(a) or self.is_one(-a): return a else: raise NotReversible('only units are reversible in a ring') def is_unit(self, a): try: self.revert(a) return True except NotReversible: return False def numer(self, a): """Returns numerator of ``a``. """ return a def denom(self, a): """Returns denominator of `a`. """ return self.one def free_module(self, rank): """ Generate a free module of rank ``rank`` over self. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(2) QQ[x]**2 """ raise NotImplementedError def ideal(self, *gens): """ Generate an ideal of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).ideal(x**2) <x**2> """ from sympy.polys.agca.ideals import ModuleImplementedIdeal return ModuleImplementedIdeal(self, self.free_module(1).submodule( *[[x] for x in gens])) def quotient_ring(self, e): """ Form a quotient ring of ``self``. Here ``e`` can be an ideal or an iterable. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).quotient_ring(QQ.old_poly_ring(x).ideal(x**2)) QQ[x]/<x**2> >>> QQ.old_poly_ring(x).quotient_ring([x**2]) QQ[x]/<x**2> The division operator has been overloaded for this: >>> QQ.old_poly_ring(x)/[x**2] QQ[x]/<x**2> """ from sympy.polys.agca.ideals import Ideal from sympy.polys.domains.quotientring import QuotientRing if not isinstance(e, Ideal): e = self.ideal(*e) return QuotientRing(self, e) def __truediv__(self, e): return self.quotient_ring(e)
Ring
python
google__jax
jax/_src/numpy/reductions.py
{ "start": 79940, "end": 110922 }
class ____(Protocol): def __call__(self, a: ArrayLike, axis: Axis = None, dtype: DTypeLike | None = None, out: None = None) -> Array: ... def _cumulative_reduction( name: str, reduction: Callable[..., Array], a: ArrayLike, axis: int | None, dtype: DTypeLike | None, out: None = None, fill_nan: bool = False, fill_value: ArrayLike = 0, promote_integers: bool = False) -> Array: """Helper function for implementing cumulative reductions.""" a = ensure_arraylike(name, a) if out is not None: raise NotImplementedError(f"The 'out' argument to jnp.{name} is not supported") if axis is None or _isscalar(a): a = lax.reshape(a, (np.size(a),)) if axis is None: axis = 0 a_shape = list(np.shape(a)) num_dims = len(a_shape) axis = canonicalize_axis(axis, num_dims) if fill_nan: a = _where(lax._isnan(a), lax._const(a, fill_value), a) a_type: DType = a.dtype result_type: DType if dtype is None: result_type = a_type if promote_integers or dtypes.issubdtype(result_type, np.bool_): result_type = _promote_integer_dtype(result_type) else: result_type = dtypes.check_and_canonicalize_user_dtype(dtype, name) if dtypes.issubdtype(result_type, np.bool_): result_type = _promote_integer_dtype(result_type) if a_type != np.bool_ and dtype == np.bool_: a = lax.asarray(a).astype(np.bool_) a = lax.convert_element_type(a, result_type) result = reduction(a, axis) # We downcast to boolean because we accumulate in integer types if dtype is not None and dtypes.issubdtype(dtype, np.bool_): result = lax.convert_element_type(result, np.bool_) return result @export @api.jit(static_argnames=('axis', 'dtype')) def cumsum(a: ArrayLike, axis: int | None = None, dtype: DTypeLike | None = None, out: None = None) -> Array: """Cumulative sum of elements along an axis. JAX implementation of :func:`numpy.cumsum`. Args: a: N-dimensional array to be accumulated. axis: integer axis along which to accumulate. If None (default), then array will be flattened and accumulated along the flattened axis. dtype: optionally specify the dtype of the output. If not specified, then the output dtype will match the input dtype. out: unused by JAX Returns: An array containing the accumulated sum along the given axis. See also: - :func:`jax.numpy.cumulative_sum`: cumulative sum via the array API standard. - :meth:`jax.numpy.add.accumulate`: cumulative sum via ufunc methods. - :func:`jax.numpy.nancumsum`: cumulative sum ignoring NaN values. - :func:`jax.numpy.sum`: sum along axis Examples: >>> x = jnp.array([[1, 2, 3], ... [4, 5, 6]]) >>> jnp.cumsum(x) # flattened cumulative sum Array([ 1, 3, 6, 10, 15, 21], dtype=int32) >>> jnp.cumsum(x, axis=1) # cumulative sum along axis 1 Array([[ 1, 3, 6], [ 4, 9, 15]], dtype=int32) """ return _cumulative_reduction("cumsum", control_flow.cumsum, a, axis, dtype, out) @export @api.jit(static_argnames=('axis', 'dtype')) def cumprod(a: ArrayLike, axis: int | None = None, dtype: DTypeLike | None = None, out: None = None) -> Array: """Cumulative product of elements along an axis. JAX implementation of :func:`numpy.cumprod`. Args: a: N-dimensional array to be accumulated. axis: integer axis along which to accumulate. If None (default), then array will be flattened and accumulated along the flattened axis. dtype: optionally specify the dtype of the output. If not specified, then the output dtype will match the input dtype. out: unused by JAX Returns: An array containing the accumulated product along the given axis. See also: - :meth:`jax.numpy.multiply.accumulate`: cumulative product via ufunc methods. - :func:`jax.numpy.nancumprod`: cumulative product ignoring NaN values. - :func:`jax.numpy.prod`: product along axis Examples: >>> x = jnp.array([[1, 2, 3], ... [4, 5, 6]]) >>> jnp.cumprod(x) # flattened cumulative product Array([ 1, 2, 6, 24, 120, 720], dtype=int32) >>> jnp.cumprod(x, axis=1) # cumulative product along axis 1 Array([[ 1, 2, 6], [ 4, 20, 120]], dtype=int32) """ return _cumulative_reduction("cumprod", control_flow.cumprod, a, axis, dtype, out) @export @api.jit(static_argnames=('axis', 'dtype')) def nancumsum(a: ArrayLike, axis: int | None = None, dtype: DTypeLike | None = None, out: None = None) -> Array: """Cumulative sum of elements along an axis, ignoring NaN values. JAX implementation of :func:`numpy.nancumsum`. Args: a: N-dimensional array to be accumulated. axis: integer axis along which to accumulate. If None (default), then array will be flattened and accumulated along the flattened axis. dtype: optionally specify the dtype of the output. If not specified, then the output dtype will match the input dtype. out: unused by JAX Returns: An array containing the accumulated sum along the given axis. See also: - :func:`jax.numpy.cumsum`: cumulative sum without ignoring NaN values. - :func:`jax.numpy.cumulative_sum`: cumulative sum via the array API standard. - :meth:`jax.numpy.add.accumulate`: cumulative sum via ufunc methods. - :func:`jax.numpy.sum`: sum along axis Examples: >>> x = jnp.array([[1., 2., jnp.nan], ... [4., jnp.nan, 6.]]) The standard cumulative sum will propagate NaN values: >>> jnp.cumsum(x) Array([ 1., 3., nan, nan, nan, nan], dtype=float32) :func:`~jax.numpy.nancumsum` will ignore NaN values, effectively replacing them with zeros: >>> jnp.nancumsum(x) Array([ 1., 3., 3., 7., 7., 13.], dtype=float32) Cumulative sum along axis 1: >>> jnp.nancumsum(x, axis=1) Array([[ 1., 3., 3.], [ 4., 4., 10.]], dtype=float32) """ return _cumulative_reduction("nancumsum", control_flow.cumsum, a, axis, dtype, out, fill_nan=True, fill_value=0) @export @api.jit(static_argnames=('axis', 'dtype')) def nancumprod(a: ArrayLike, axis: int | None = None, dtype: DTypeLike | None = None, out: None = None) -> Array: """Cumulative product of elements along an axis, ignoring NaN values. JAX implementation of :func:`numpy.nancumprod`. Args: a: N-dimensional array to be accumulated. axis: integer axis along which to accumulate. If None (default), then array will be flattened and accumulated along the flattened axis. dtype: optionally specify the dtype of the output. If not specified, then the output dtype will match the input dtype. out: unused by JAX Returns: An array containing the accumulated product along the given axis. See also: - :func:`jax.numpy.cumprod`: cumulative product without ignoring NaN values. - :meth:`jax.numpy.multiply.accumulate`: cumulative product via ufunc methods. - :func:`jax.numpy.prod`: product along axis Examples: >>> x = jnp.array([[1., 2., jnp.nan], ... [4., jnp.nan, 6.]]) The standard cumulative product will propagate NaN values: >>> jnp.cumprod(x) Array([ 1., 2., nan, nan, nan, nan], dtype=float32) :func:`~jax.numpy.nancumprod` will ignore NaN values, effectively replacing them with ones: >>> jnp.nancumprod(x) Array([ 1., 2., 2., 8., 8., 48.], dtype=float32) Cumulative product along axis 1: >>> jnp.nancumprod(x, axis=1) Array([[ 1., 2., 2.], [ 4., 4., 24.]], dtype=float32) """ return _cumulative_reduction("nancumprod", control_flow.cumprod, a, axis, dtype, out, fill_nan=True, fill_value=1) @api.jit(static_argnames=('axis', 'dtype')) def _cumsum_with_promotion(a: ArrayLike, axis: int | None = None, dtype: DTypeLike | None = None, out: None = None) -> Array: """Utility function to compute cumsum with integer promotion.""" return _cumulative_reduction("_cumsum_with_promotion", control_flow.cumsum, a, axis, dtype, out, promote_integers=True) @export def cumulative_sum( x: ArrayLike, /, *, axis: int | None = None, dtype: DTypeLike | None = None, include_initial: bool = False) -> Array: """Cumulative sum along the axis of an array. JAX implementation of :func:`numpy.cumulative_sum`. Args: x: N-dimensional array axis: integer axis along which to accumulate. If ``x`` is one-dimensional, this argument is optional and defaults to zero. dtype: optional dtype of the output. include_initial: if True, then include the initial value in the cumulative sum. Default is False. Returns: An array containing the accumulated values. See Also: - :func:`jax.numpy.cumsum`: alternative API for cumulative sum. - :func:`jax.numpy.nancumsum`: cumulative sum while ignoring NaN values. - :func:`jax.numpy.add.accumulate`: cumulative sum via the ufunc API. Examples: >>> x = jnp.array([[1, 2, 3], ... [4, 5, 6]]) >>> jnp.cumulative_sum(x, axis=1) Array([[ 1, 3, 6], [ 4, 9, 15]], dtype=int32) >>> jnp.cumulative_sum(x, axis=1, include_initial=True) Array([[ 0, 1, 3, 6], [ 0, 4, 9, 15]], dtype=int32) """ x = ensure_arraylike("cumulative_sum", x) if x.ndim == 0: raise ValueError( "The input must be non-scalar to take a cumulative sum, however a " "scalar value or scalar array was given." ) if axis is None: axis = 0 if x.ndim > 1: raise ValueError( f"The input array has rank {x.ndim}, however axis was not set to an " "explicit value. The axis argument is only optional for one-dimensional " "arrays.") axis = canonicalize_axis(axis, x.ndim) if dtype is not None: dtype = dtypes.check_and_canonicalize_user_dtype(dtype) out = _cumsum_with_promotion(x, axis=axis, dtype=dtype) if include_initial: zeros_shape = list(x.shape) zeros_shape[axis] = 1 out = lax.concatenate( [lax.full(zeros_shape, 0, dtype=out.dtype), out], dimension=axis) return out @export def cumulative_prod( x: ArrayLike, /, *, axis: int | None = None, dtype: DTypeLike | None = None, include_initial: bool = False) -> Array: """Cumulative product along the axis of an array. JAX implementation of :func:`numpy.cumulative_prod`. Args: x: N-dimensional array axis: integer axis along which to accumulate. If ``x`` is one-dimensional, this argument is optional and defaults to zero. dtype: optional dtype of the output. include_initial: if True, then include the initial value in the cumulative product. Default is False. Returns: An array containing the accumulated values. See Also: - :func:`jax.numpy.cumprod`: alternative API for cumulative product. - :func:`jax.numpy.nancumprod`: cumulative product while ignoring NaN values. - :func:`jax.numpy.multiply.accumulate`: cumulative product via the ufunc API. Examples: >>> x = jnp.array([[1, 2, 3], ... [4, 5, 6]]) >>> jnp.cumulative_prod(x, axis=1) Array([[ 1, 2, 6], [ 4, 20, 120]], dtype=int32) >>> jnp.cumulative_prod(x, axis=1, include_initial=True) Array([[ 1, 1, 2, 6], [ 1, 4, 20, 120]], dtype=int32) """ x = ensure_arraylike("cumulative_prod", x) if x.ndim == 0: raise ValueError( "The input must be non-scalar to take a cumulative product, however a " "scalar value or scalar array was given." ) if axis is None: axis = 0 if x.ndim > 1: raise ValueError( f"The input array has rank {x.ndim}, however axis was not set to an " "explicit value. The axis argument is only optional for one-dimensional " "arrays.") axis = canonicalize_axis(axis, x.ndim) if dtype is not None: dtype = dtypes.check_and_canonicalize_user_dtype(dtype) out = _cumulative_reduction("cumulative_prod", control_flow.cumprod, x, axis, dtype) if include_initial: zeros_shape = list(x.shape) zeros_shape[axis] = 1 out = lax.concatenate( [lax.full(zeros_shape, 1, dtype=out.dtype), out], dimension=axis) return out # Quantiles # TODO(jakevdp): interpolation argument deprecated 2024-05-16 @export @api.jit(static_argnames=('axis', 'overwrite_input', 'interpolation', 'keepdims', 'method')) def quantile(a: ArrayLike, q: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, method: str = "linear", keepdims: bool = False, *, interpolation: DeprecatedArg = DeprecatedArg()) -> Array: """Compute the quantile of the data along the specified axis. JAX implementation of :func:`numpy.quantile`. Args: a: N-dimensional array input. q: scalar or 1-dimensional array specifying the desired quantiles. ``q`` should contain floating-point values between ``0.0`` and ``1.0``. axis: optional axis or tuple of axes along which to compute the quantile out: not implemented by JAX; will error if not None overwrite_input: not implemented by JAX; will error if not False method: specify the interpolation method to use. Options are one of ``["linear", "lower", "higher", "midpoint", "nearest"]``. default is ``linear``. keepdims: if True, then the returned array will have the same number of dimensions as the input. Default is False. Returns: An array containing the specified quantiles along the specified axes. See also: - :func:`jax.numpy.nanquantile`: compute the quantile while ignoring NaNs - :func:`jax.numpy.percentile`: compute the percentile (0-100) Examples: Computing the median and quartiles of an array, with linear interpolation: >>> x = jnp.arange(10) >>> q = jnp.array([0.25, 0.5, 0.75]) >>> jnp.quantile(x, q) Array([2.25, 4.5 , 6.75], dtype=float32) Computing the quartiles using nearest-value interpolation: >>> jnp.quantile(x, q, method='nearest') Array([2., 4., 7.], dtype=float32) """ a, q = ensure_arraylike("quantile", a, q) if overwrite_input or out is not None: raise ValueError("jax.numpy.quantile does not support overwrite_input=True " "or out != None") # TODO(jakevdp): remove the interpolation argument in JAX v0.9.0 if not isinstance(interpolation, DeprecatedArg): raise TypeError("quantile() argument interpolation was removed in JAX" " v0.8.0. Use method instead.") return _quantile(lax.asarray(a), lax.asarray(q), axis, method, keepdims, False) # TODO(jakevdp): interpolation argument deprecated 2024-05-16 @export @api.jit(static_argnames=('axis', 'overwrite_input', 'interpolation', 'keepdims', 'method')) def nanquantile(a: ArrayLike, q: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, method: str = "linear", keepdims: bool = False, *, interpolation: DeprecatedArg = DeprecatedArg()) -> Array: """Compute the quantile of the data along the specified axis, ignoring NaNs. JAX implementation of :func:`numpy.nanquantile`. Args: a: N-dimensional array input. q: scalar or 1-dimensional array specifying the desired quantiles. ``q`` should contain floating-point values between ``0.0`` and ``1.0``. axis: optional axis or tuple of axes along which to compute the quantile out: not implemented by JAX; will error if not None overwrite_input: not implemented by JAX; will error if not False method: specify the interpolation method to use. Options are one of ``["linear", "lower", "higher", "midpoint", "nearest"]``. default is ``linear``. keepdims: if True, then the returned array will have the same number of dimensions as the input. Default is False. Returns: An array containing the specified quantiles along the specified axes. See also: - :func:`jax.numpy.quantile`: compute the quantile without ignoring nans - :func:`jax.numpy.nanpercentile`: compute the percentile (0-100) Examples: Computing the median and quartiles of a 1D array: >>> x = jnp.array([0, 1, 2, jnp.nan, 3, 4, 5, 6]) >>> q = jnp.array([0.25, 0.5, 0.75]) Because of the NaN value, :func:`jax.numpy.quantile` returns all NaNs, while :func:`~jax.numpy.nanquantile` ignores them: >>> jnp.quantile(x, q) Array([nan, nan, nan], dtype=float32) >>> jnp.nanquantile(x, q) Array([1.5, 3. , 4.5], dtype=float32) """ a, q = ensure_arraylike("nanquantile", a, q) if overwrite_input or out is not None: msg = ("jax.numpy.nanquantile does not support overwrite_input=True or " "out != None") raise ValueError(msg) # TODO(jakevdp): remove the interpolation argument in JAX v0.9.0 if not isinstance(interpolation, DeprecatedArg): raise TypeError("nanquantile() argument interpolation was removed in JAX" " v0.8.0. Use method instead.") return _quantile(lax.asarray(a), lax.asarray(q), axis, method, keepdims, True) def _quantile(a: Array, q: Array, axis: int | tuple[int, ...] | None, method: str, keepdims: bool, squash_nans: bool) -> Array: if method not in ["linear", "lower", "higher", "midpoint", "nearest"]: raise ValueError("method can only be 'linear', 'lower', 'higher', 'midpoint', or 'nearest'") a, = promote_dtypes_inexact(a) keepdim = [] if dtypes.issubdtype(a.dtype, np.complexfloating): raise ValueError("quantile does not support complex input, as the operation is poorly defined.") if axis is None: if keepdims: keepdim = [1] * a.ndim a = a.ravel() axis = 0 elif isinstance(axis, tuple): keepdim = list(a.shape) nd = a.ndim axis = tuple(canonicalize_axis(ax, nd) for ax in axis) if len(set(axis)) != len(axis): raise ValueError('repeated axis') for ax in axis: keepdim[ax] = 1 keep = set(range(nd)) - set(axis) # prepare permutation dimensions = list(range(nd)) for i, s in enumerate(sorted(keep)): dimensions[i], dimensions[s] = dimensions[s], dimensions[i] do_not_touch_shape = tuple(x for idx,x in enumerate(a.shape) if idx not in axis) touch_shape = tuple(x for idx,x in enumerate(a.shape) if idx in axis) a = lax.reshape(a, do_not_touch_shape + (math.prod(touch_shape),), dimensions) axis = canonicalize_axis(-1, a.ndim) else: axis = canonicalize_axis(axis, a.ndim) q_shape = q.shape q_ndim = q.ndim if q_ndim > 1: raise ValueError(f"q must be have rank <= 1, got shape {q.shape}") a_shape = a.shape if squash_nans: a = _where(lax._isnan(a), np.nan, a) # Ensure nans are positive so they sort to the end. a = lax.sort(a, dimension=axis) counts = sum(lax.bitwise_not(lax._isnan(a)), axis=axis, dtype=q.dtype, keepdims=keepdims) shape_after_reduction = counts.shape q = lax.expand_dims( q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim))) counts = lax.expand_dims(counts, tuple(range(q_ndim))) q = lax.mul(q, lax.sub(counts, lax._const(q, 1))) low = lax.floor(q) high = lax.ceil(q) high_weight = lax.sub(q, low) low_weight = lax.sub(lax._const(high_weight, 1), high_weight) low = lax.max(lax._const(low, 0), lax.min(low, counts - 1)) high = lax.max(lax._const(high, 0), lax.min(high, counts - 1)) low = lax.convert_element_type(low, int) high = lax.convert_element_type(high, int) out_shape = q_shape + shape_after_reduction index = [lax.broadcasted_iota(int, out_shape, dim + q_ndim) for dim in range(len(shape_after_reduction))] if keepdims: index[axis] = low else: index.insert(axis, low) low_value = a[tuple(index)] index[axis] = high high_value = a[tuple(index)] else: with config.debug_nans(False): a = _where(any(lax._isnan(a), axis=axis, keepdims=True), np.nan, a) a = lax.sort(a, dimension=axis) n = lax.convert_element_type(a_shape[axis], lax._dtype(q)) q = lax.mul(q, n - 1) low = lax.floor(q) high = lax.ceil(q) high_weight = lax.sub(q, low) low_weight = lax.sub(lax._const(high_weight, 1), high_weight) low = lax.clamp(lax._const(low, 0), low, n - 1) high = lax.clamp(lax._const(high, 0), high, n - 1) low = lax.convert_element_type(low, int) high = lax.convert_element_type(high, int) slice_sizes = list(a_shape) slice_sizes[axis] = 1 dnums = lax_slicing.GatherDimensionNumbers( offset_dims=tuple(range( q_ndim, len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)), collapsed_slice_dims=() if keepdims else (axis,), start_index_map=(axis,)) low_value = lax_slicing.gather(a, low[..., None], dimension_numbers=dnums, slice_sizes=slice_sizes) high_value = lax_slicing.gather(a, high[..., None], dimension_numbers=dnums, slice_sizes=slice_sizes) if q_ndim == 1: low_weight = lax.broadcast_in_dim(low_weight, low_value.shape, broadcast_dimensions=(0,)) high_weight = lax.broadcast_in_dim(high_weight, high_value.shape, broadcast_dimensions=(0,)) if method == "linear": result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight), lax.mul(high_value.astype(q.dtype), high_weight)) elif method == "lower": result = low_value elif method == "higher": result = high_value elif method == "nearest": pred = lax.le(high_weight, lax._const(high_weight, 0.5)) result = lax.select(pred, low_value, high_value) elif method == "midpoint": result = lax.mul(lax.add(low_value, high_value), lax._const(low_value, 0.5)) else: raise ValueError(f"{method=!r} not recognized") if keepdims and keepdim: if q_ndim > 0: keepdim = [np.shape(q)[0], *keepdim] result = result.reshape(keepdim) return lax.convert_element_type(result, a.dtype) # TODO(jakevdp): interpolation argument deprecated 2024-05-16 @export @api.jit(static_argnames=('axis', 'overwrite_input', 'interpolation', 'keepdims', 'method')) def percentile(a: ArrayLike, q: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, method: str = "linear", keepdims: bool = False, *, interpolation: DeprecatedArg = DeprecatedArg()) -> Array: """Compute the percentile of the data along the specified axis. JAX implementation of :func:`numpy.percentile`. Args: a: N-dimensional array input. q: scalar or 1-dimensional array specifying the desired quantiles. ``q`` should contain integer or floating point values between ``0`` and ``100``. axis: optional axis or tuple of axes along which to compute the quantile out: not implemented by JAX; will error if not None overwrite_input: not implemented by JAX; will error if not False method: specify the interpolation method to use. Options are one of ``["linear", "lower", "higher", "midpoint", "nearest"]``. default is ``linear``. keepdims: if True, then the returned array will have the same number of dimensions as the input. Default is False. Returns: An array containing the specified percentiles along the specified axes. See also: - :func:`jax.numpy.quantile`: compute the quantile (0.0-1.0) - :func:`jax.numpy.nanpercentile`: compute the percentile while ignoring NaNs Examples: Computing the median and quartiles of a 1D array: >>> x = jnp.array([0, 1, 2, 3, 4, 5, 6]) >>> q = jnp.array([25, 50, 75]) >>> jnp.percentile(x, q) Array([1.5, 3. , 4.5], dtype=float32) Computing the same percentiles with nearest rather than linear interpolation: >>> jnp.percentile(x, q, method='nearest') Array([1., 3., 4.], dtype=float32) """ a, q = ensure_arraylike("percentile", a, q) q, = promote_dtypes_inexact(q) # TODO(jakevdp): remove the interpolation argument in JAX v0.9.0 if not isinstance(interpolation, DeprecatedArg): raise TypeError("percentile() argument interpolation was removed in JAX" " v0.8.0. Use method instead.") return quantile(a, q / 100, axis=axis, out=out, overwrite_input=overwrite_input, method=method, keepdims=keepdims) # TODO(jakevdp): interpolation argument deprecated 2024-05-16 @export @api.jit(static_argnames=('axis', 'overwrite_input', 'interpolation', 'keepdims', 'method')) def nanpercentile(a: ArrayLike, q: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, method: str = "linear", keepdims: bool = False, *, interpolation: DeprecatedArg = DeprecatedArg()) -> Array: """Compute the percentile of the data along the specified axis, ignoring NaN values. JAX implementation of :func:`numpy.nanpercentile`. Args: a: N-dimensional array input. q: scalar or 1-dimensional array specifying the desired quantiles. ``q`` should contain integer or floating point values between ``0`` and ``100``. axis: optional axis or tuple of axes along which to compute the quantile out: not implemented by JAX; will error if not None overwrite_input: not implemented by JAX; will error if not False method: specify the interpolation method to use. Options are one of ``["linear", "lower", "higher", "midpoint", "nearest"]``. default is ``linear``. keepdims: if True, then the returned array will have the same number of dimensions as the input. Default is False. Returns: An array containing the specified percentiles along the specified axes. See also: - :func:`jax.numpy.nanquantile`: compute the nan-aware quantile (0.0-1.0) - :func:`jax.numpy.percentile`: compute the percentile without special handling of NaNs. Examples: Computing the median and quartiles of a 1D array: >>> x = jnp.array([0, 1, 2, jnp.nan, 3, 4, 5, 6]) >>> q = jnp.array([25, 50, 75]) Because of the NaN value, :func:`jax.numpy.percentile` returns all NaNs, while :func:`~jax.numpy.nanpercentile` ignores them: >>> jnp.percentile(x, q) Array([nan, nan, nan], dtype=float32) >>> jnp.nanpercentile(x, q) Array([1.5, 3. , 4.5], dtype=float32) """ a, q = ensure_arraylike("nanpercentile", a, q) q, = promote_dtypes_inexact(q) q = q / 100 # TODO(jakevdp): remove the interpolation argument in JAX v0.9.0 if not isinstance(interpolation, DeprecatedArg): raise TypeError("nanpercentile() argument interpolation was removed in JAX" " v0.8.0. Use method instead.") return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input, method=method, keepdims=keepdims) @export @api.jit(static_argnames=('axis', 'overwrite_input', 'keepdims')) def median(a: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, keepdims: bool = False) -> Array: r"""Return the median of array elements along a given axis. JAX implementation of :func:`numpy.median`. Args: a: input array. axis: optional, int or sequence of ints, default=None. Axis along which the median to be computed. If None, median is computed for the flattened array. keepdims: bool, default=False. If true, reduced axes are left in the result with size 1. out: Unused by JAX. overwrite_input: Unused by JAX. Returns: An array of the median along the given axis. See also: - :func:`jax.numpy.mean`: Compute the mean of array elements over a given axis. - :func:`jax.numpy.max`: Compute the maximum of array elements over given axis. - :func:`jax.numpy.min`: Compute the minimum of array elements over given axis. Examples: By default, the median is computed for the flattened array. >>> x = jnp.array([[2, 4, 7, 1], ... [3, 5, 9, 2], ... [6, 1, 8, 3]]) >>> jnp.median(x) Array(3.5, dtype=float32) If ``axis=1``, the median is computed along axis 1. >>> jnp.median(x, axis=1) Array([3. , 4. , 4.5], dtype=float32) If ``keepdims=True``, ``ndim`` of the output is equal to that of the input. >>> jnp.median(x, axis=1, keepdims=True) Array([[3. ], [4. ], [4.5]], dtype=float32) """ a = ensure_arraylike("median", a) return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input, keepdims=keepdims, method='midpoint') @export @api.jit(static_argnames=('axis', 'overwrite_input', 'keepdims')) def nanmedian(a: ArrayLike, axis: int | tuple[int, ...] | None = None, out: None = None, overwrite_input: bool = False, keepdims: bool = False) -> Array: r"""Return the median of array elements along a given axis, ignoring NaNs. JAX implementation of :func:`numpy.nanmedian`. Args: a: input array. axis: optional, int or sequence of ints, default=None. Axis along which the median to be computed. If None, median is computed for the flattened array. keepdims: bool, default=False. If true, reduced axes are left in the result with size 1. out: Unused by JAX. overwrite_input: Unused by JAX. Returns: An array containing the median along the given axis, ignoring NaNs. If all elements along the given axis are NaNs, returns ``nan``. See also: - :func:`jax.numpy.nanmean`: Compute the mean of array elements over a given axis, ignoring NaNs. - :func:`jax.numpy.nanmax`: Compute the maximum of array elements over given axis, ignoring NaNs. - :func:`jax.numpy.nanmin`: Compute the minimum of array elements over given axis, ignoring NaNs. Examples: By default, the median is computed for the flattened array. >>> nan = jnp.nan >>> x = jnp.array([[2, nan, 7, nan], ... [nan, 5, 9, 2], ... [6, 1, nan, 3]]) >>> jnp.nanmedian(x) Array(4., dtype=float32) If ``axis=1``, the median is computed along axis 1. >>> jnp.nanmedian(x, axis=1) Array([4.5, 5. , 3. ], dtype=float32) If ``keepdims=True``, ``ndim`` of the output is equal to that of the input. >>> jnp.nanmedian(x, axis=1, keepdims=True) Array([[4.5], [5. ], [3. ]], dtype=float32) """ a = ensure_arraylike("nanmedian", a) return nanquantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input, keepdims=keepdims, method='midpoint')
CumulativeReduction
python
marshmallow-code__marshmallow
tests/base.py
{ "start": 6411, "end": 6634 }
class ____(Schema): title = fields.String() user = fields.Nested(UserSchema) collaborators = fields.List(fields.Nested(UserSchema())) categories = fields.List(fields.String) id = fields.String()
BlogSchema
python
apache__airflow
providers/airbyte/src/airflow/providers/airbyte/sensors/airbyte.py
{ "start": 1380, "end": 5737 }
class ____(BaseSensorOperator): """ Check for the state of a previously submitted Airbyte job. :param airbyte_job_id: Required. Id of the Airbyte job. :param airbyte_conn_id: Optional. The name of the Airflow connection to get connection information for Airbyte. Defaults to "airbyte_default". :param deferrable: Optional. Runs the sensor in deferrable mode. Defaults to the config value "default_deferrable" or False, if not defined. :param api_version: Optional. Airbyte API version. Defaults to "v1". """ template_fields: Sequence[str] = ("airbyte_job_id",) ui_color = "#6C51FD" def __init__( self, *, airbyte_job_id: int, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), airbyte_conn_id: str = "airbyte_default", api_version: str = "v1", **kwargs, ) -> None: if deferrable: if "poke_interval" not in kwargs: kwargs["poke_interval"] = 5 if "timeout" not in kwargs: kwargs["timeout"] = 60 * 60 * 24 * 7 super().__init__(**kwargs) self.deferrable = deferrable self.airbyte_conn_id = airbyte_conn_id self.airbyte_job_id = airbyte_job_id self.api_version = api_version def poke(self, context: Context) -> bool: hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id, api_version=self.api_version) job = hook.get_job_details(job_id=self.airbyte_job_id) status = job.status if status == JobStatusEnum.FAILED: message = f"Job failed: \n{job}" self.log.debug("Failed with context: %s", context) raise AirflowException(message) if status == JobStatusEnum.CANCELLED: message = f"Job was cancelled: \n{job}" raise AirflowException(message) if status == JobStatusEnum.SUCCEEDED: self.log.info("Job %s completed successfully.", self.airbyte_job_id) return True self.log.info("Waiting for job %s to complete.", self.airbyte_job_id) return False def execute(self, context: Context) -> Any: """Submit a job which generates a run_id and gets deferred.""" if not self.deferrable: super().execute(context) else: hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id, api_version=self.api_version) job = hook.get_job_details(job_id=(int(self.airbyte_job_id))) state = job.status end_time = time.time() + self.timeout self.log.info("Airbyte Job Id: Job %s", self.airbyte_job_id) if state in (JobStatusEnum.RUNNING, JobStatusEnum.PENDING, JobStatusEnum.INCOMPLETE): self.defer( timeout=self.execution_timeout, trigger=AirbyteSyncTrigger( conn_id=self.airbyte_conn_id, job_id=self.airbyte_job_id, end_time=end_time, poll_interval=60, ), method_name="execute_complete", ) elif state == JobStatusEnum.SUCCEEDED: self.log.info("%s completed successfully.", self.task_id) return elif state == JobStatusEnum.FAILED: self.log.debug("Failed with context: %s", context) raise AirflowException(f"Job failed:\n{job}") elif state == JobStatusEnum.CANCELLED: raise AirflowException(f"Job was cancelled:\n{job}") else: raise AirflowException( f"Encountered unexpected state `{state}` for job_id `{self.airbyte_job_id}" ) def execute_complete(self, context: Context, event: Any = None) -> None: """ Invoke this callback when the trigger fires; return immediately. Relies on trigger to throw an exception, otherwise it assumes execution was successful. """ if event["status"] == "error": self.log.debug("An error occurred with context: %s", context) raise AirflowException(event["message"]) self.log.info("%s completed successfully.", self.task_id) return None
AirbyteJobSensor
python
sqlalchemy__sqlalchemy
test/orm/test_selectin_relations.py
{ "start": 82989, "end": 91741 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "nodes", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("parent_id", Integer, ForeignKey("nodes.id")), Column("data", String(30)), ) @classmethod def setup_classes(cls): class Node(cls.Comparable): def append(self, node): self.children.append(node) @testing.fixture def data_fixture(self): def go(sess): Node = self.classes.Node n1 = Node(data="n1") n1.append(Node(data="n11")) n1.append(Node(data="n12")) n1.append(Node(data="n13")) n1.children[0].children = [Node(data="n111"), Node(data="n112")] n1.children[1].append(Node(data="n121")) n1.children[1].append(Node(data="n122")) n1.children[1].append(Node(data="n123")) n2 = Node(data="n2") n2.append(Node(data="n21")) n2.children[0].append(Node(data="n211")) n2.children[0].append(Node(data="n212")) sess.add(n1) sess.add(n2) sess.flush() sess.expunge_all() return n1, n2 return go def _full_structure(self): Node = self.classes.Node return [ Node( data="n1", children=[ Node(data="n11"), Node( data="n12", children=[ Node(data="n121"), Node(data="n122"), Node(data="n123"), ], ), Node(data="n13"), ], ), Node( data="n2", children=[ Node( data="n21", children=[ Node(data="n211"), Node(data="n212"), ], ) ], ), ] def test_basic(self, data_fixture): nodes = self.tables.nodes Node = self.classes.Node self.mapper_registry.map_imperatively( Node, nodes, properties={ "children": relationship( Node, lazy="selectin", join_depth=3, order_by=nodes.c.id ) }, ) sess = fixture_session() n1, n2 = data_fixture(sess) def go(): d = ( sess.query(Node) .filter(Node.data.in_(["n1", "n2"])) .order_by(Node.data) .all() ) eq_( self._full_structure(), d, ) self.assert_sql_count(testing.db, go, 4) def test_lazy_fallback_doesnt_affect_eager(self, data_fixture): nodes = self.tables.nodes Node = self.classes.Node self.mapper_registry.map_imperatively( Node, nodes, properties={ "children": relationship( Node, lazy="selectin", join_depth=1, order_by=nodes.c.id ) }, ) sess = fixture_session() n1, n2 = data_fixture(sess) def go(): allnodes = sess.query(Node).order_by(Node.data).all() n11 = allnodes[1] eq_(n11.data, "n11") eq_([Node(data="n111"), Node(data="n112")], list(n11.children)) n12 = allnodes[4] eq_(n12.data, "n12") eq_( [Node(data="n121"), Node(data="n122"), Node(data="n123")], list(n12.children), ) self.assert_sql_count(testing.db, go, 2) def test_with_deferred(self, data_fixture): nodes = self.tables.nodes Node = self.classes.Node self.mapper_registry.map_imperatively( Node, nodes, properties={ "children": relationship( Node, lazy="selectin", join_depth=3, order_by=nodes.c.id ), "data": deferred(nodes.c.data), }, ) sess = fixture_session() n1, n2 = data_fixture(sess) def go(): eq_( Node( data="n1", children=[ Node(data="n11"), Node(data="n12"), Node(data="n13"), ], ), sess.query(Node).order_by(Node.id).first(), ) self.assert_sql_count(testing.db, go, 8) sess.expunge_all() def go(): eq_( Node( data="n1", children=[ Node(data="n11"), Node(data="n12"), Node(data="n13"), ], ), sess.query(Node) .options(undefer(Node.data)) .order_by(Node.id) .first(), ) self.assert_sql_count(testing.db, go, 7) sess.expunge_all() def go(): eq_( Node( data="n1", children=[ Node(data="n11"), Node(data="n12"), Node(data="n13"), ], ), sess.query(Node) .options( undefer(Node.data), defaultload(Node.children).undefer(Node.data), ) .first(), ) self.assert_sql_count(testing.db, go, 4) def test_options(self, data_fixture): nodes = self.tables.nodes Node = self.classes.Node self.mapper_registry.map_imperatively( Node, nodes, properties={"children": relationship(Node, order_by=nodes.c.id)}, ) sess = fixture_session() n1, n2 = data_fixture(sess) def go(): d = ( sess.query(Node) .filter_by(data="n1") .order_by(Node.id) .options( selectinload(Node.children).selectinload(Node.children) ) .first() ) eq_( Node( data="n1", children=[ Node(data="n11"), Node( data="n12", children=[ Node(data="n121"), Node(data="n122"), Node(data="n123"), ], ), Node(data="n13"), ], ), d, ) self.assert_sql_count(testing.db, go, 3) def test_no_depth(self, data_fixture): """no join depth is set, so no eager loading occurs.""" nodes = self.tables.nodes Node = self.classes.Node self.mapper_registry.map_imperatively( Node, nodes, properties={"children": relationship(Node, lazy="selectin")}, ) sess = fixture_session() n1, n2 = data_fixture(sess) def go(): d = ( sess.query(Node) .filter(Node.data.in_(["n1", "n2"])) .order_by(Node.data) .all() ) eq_( [ Node( data="n1", children=[ Node(data="n11"), Node( data="n12", children=[ Node(data="n121"), Node(data="n122"), Node(data="n123"), ], ), Node(data="n13"), ], ), Node(data="n2", children=[Node(data="n21")]), ], d, ) self.assert_sql_count(testing.db, go, 4)
SelfReferentialTest
python
tiangolo__fastapi
tests/test_response_model_as_return_annotation.py
{ "start": 442, "end": 49406 }
class ____(BaseModel): name: str price: float app = FastAPI() @app.get("/no_response_model-no_annotation-return_model") def no_response_model_no_annotation_return_model(): return User(name="John", surname="Doe") @app.get("/no_response_model-no_annotation-return_dict") def no_response_model_no_annotation_return_dict(): return {"name": "John", "surname": "Doe"} @app.get("/response_model-no_annotation-return_same_model", response_model=User) def response_model_no_annotation_return_same_model(): return User(name="John", surname="Doe") @app.get("/response_model-no_annotation-return_exact_dict", response_model=User) def response_model_no_annotation_return_exact_dict(): return {"name": "John", "surname": "Doe"} @app.get("/response_model-no_annotation-return_invalid_dict", response_model=User) def response_model_no_annotation_return_invalid_dict(): return {"name": "John"} @app.get("/response_model-no_annotation-return_invalid_model", response_model=User) def response_model_no_annotation_return_invalid_model(): return Item(name="Foo", price=42.0) @app.get( "/response_model-no_annotation-return_dict_with_extra_data", response_model=User ) def response_model_no_annotation_return_dict_with_extra_data(): return {"name": "John", "surname": "Doe", "password_hash": "secret"} @app.get( "/response_model-no_annotation-return_submodel_with_extra_data", response_model=User ) def response_model_no_annotation_return_submodel_with_extra_data(): return DBUser(name="John", surname="Doe", password_hash="secret") @app.get("/no_response_model-annotation-return_same_model") def no_response_model_annotation_return_same_model() -> User: return User(name="John", surname="Doe") @app.get("/no_response_model-annotation-return_exact_dict") def no_response_model_annotation_return_exact_dict() -> User: return {"name": "John", "surname": "Doe"} @app.get("/no_response_model-annotation-return_invalid_dict") def no_response_model_annotation_return_invalid_dict() -> User: return {"name": "John"} @app.get("/no_response_model-annotation-return_invalid_model") def no_response_model_annotation_return_invalid_model() -> User: return Item(name="Foo", price=42.0) @app.get("/no_response_model-annotation-return_dict_with_extra_data") def no_response_model_annotation_return_dict_with_extra_data() -> User: return {"name": "John", "surname": "Doe", "password_hash": "secret"} @app.get("/no_response_model-annotation-return_submodel_with_extra_data") def no_response_model_annotation_return_submodel_with_extra_data() -> User: return DBUser(name="John", surname="Doe", password_hash="secret") @app.get("/response_model_none-annotation-return_same_model", response_model=None) def response_model_none_annotation_return_same_model() -> User: return User(name="John", surname="Doe") @app.get("/response_model_none-annotation-return_exact_dict", response_model=None) def response_model_none_annotation_return_exact_dict() -> User: return {"name": "John", "surname": "Doe"} @app.get("/response_model_none-annotation-return_invalid_dict", response_model=None) def response_model_none_annotation_return_invalid_dict() -> User: return {"name": "John"} @app.get("/response_model_none-annotation-return_invalid_model", response_model=None) def response_model_none_annotation_return_invalid_model() -> User: return Item(name="Foo", price=42.0) @app.get( "/response_model_none-annotation-return_dict_with_extra_data", response_model=None ) def response_model_none_annotation_return_dict_with_extra_data() -> User: return {"name": "John", "surname": "Doe", "password_hash": "secret"} @app.get( "/response_model_none-annotation-return_submodel_with_extra_data", response_model=None, ) def response_model_none_annotation_return_submodel_with_extra_data() -> User: return DBUser(name="John", surname="Doe", password_hash="secret") @app.get( "/response_model_model1-annotation_model2-return_same_model", response_model=User ) def response_model_model1_annotation_model2_return_same_model() -> Item: return User(name="John", surname="Doe") @app.get( "/response_model_model1-annotation_model2-return_exact_dict", response_model=User ) def response_model_model1_annotation_model2_return_exact_dict() -> Item: return {"name": "John", "surname": "Doe"} @app.get( "/response_model_model1-annotation_model2-return_invalid_dict", response_model=User ) def response_model_model1_annotation_model2_return_invalid_dict() -> Item: return {"name": "John"} @app.get( "/response_model_model1-annotation_model2-return_invalid_model", response_model=User ) def response_model_model1_annotation_model2_return_invalid_model() -> Item: return Item(name="Foo", price=42.0) @app.get( "/response_model_model1-annotation_model2-return_dict_with_extra_data", response_model=User, ) def response_model_model1_annotation_model2_return_dict_with_extra_data() -> Item: return {"name": "John", "surname": "Doe", "password_hash": "secret"} @app.get( "/response_model_model1-annotation_model2-return_submodel_with_extra_data", response_model=User, ) def response_model_model1_annotation_model2_return_submodel_with_extra_data() -> Item: return DBUser(name="John", surname="Doe", password_hash="secret") @app.get( "/response_model_filtering_model-annotation_submodel-return_submodel", response_model=User, ) def response_model_filtering_model_annotation_submodel_return_submodel() -> DBUser: return DBUser(name="John", surname="Doe", password_hash="secret") @app.get("/response_model_list_of_model-no_annotation", response_model=List[User]) def response_model_list_of_model_no_annotation(): return [ DBUser(name="John", surname="Doe", password_hash="secret"), DBUser(name="Jane", surname="Does", password_hash="secret2"), ] @app.get("/no_response_model-annotation_list_of_model") def no_response_model_annotation_list_of_model() -> List[User]: return [ DBUser(name="John", surname="Doe", password_hash="secret"), DBUser(name="Jane", surname="Does", password_hash="secret2"), ] @app.get("/no_response_model-annotation_forward_ref_list_of_model") def no_response_model_annotation_forward_ref_list_of_model() -> "List[User]": return [ DBUser(name="John", surname="Doe", password_hash="secret"), DBUser(name="Jane", surname="Does", password_hash="secret2"), ] @app.get( "/response_model_union-no_annotation-return_model1", response_model=Union[User, Item], ) def response_model_union_no_annotation_return_model1(): return DBUser(name="John", surname="Doe", password_hash="secret") @app.get( "/response_model_union-no_annotation-return_model2", response_model=Union[User, Item], ) def response_model_union_no_annotation_return_model2(): return Item(name="Foo", price=42.0) @app.get("/no_response_model-annotation_union-return_model1") def no_response_model_annotation_union_return_model1() -> Union[User, Item]: return DBUser(name="John", surname="Doe", password_hash="secret") @app.get("/no_response_model-annotation_union-return_model2") def no_response_model_annotation_union_return_model2() -> Union[User, Item]: return Item(name="Foo", price=42.0) @app.get("/no_response_model-annotation_response_class") def no_response_model_annotation_response_class() -> Response: return Response(content="Foo") @app.get("/no_response_model-annotation_json_response_class") def no_response_model_annotation_json_response_class() -> JSONResponse: return JSONResponse(content={"foo": "bar"}) client = TestClient(app) def test_no_response_model_no_annotation_return_model(): response = client.get("/no_response_model-no_annotation-return_model") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_no_annotation_return_dict(): response = client.get("/no_response_model-no_annotation-return_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_no_annotation_return_same_model(): response = client.get("/response_model-no_annotation-return_same_model") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_no_annotation_return_exact_dict(): response = client.get("/response_model-no_annotation-return_exact_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_no_annotation_return_invalid_dict(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/response_model-no_annotation-return_invalid_dict") assert "missing" in str(excinfo.value) def test_response_model_no_annotation_return_invalid_model(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/response_model-no_annotation-return_invalid_model") assert "missing" in str(excinfo.value) def test_response_model_no_annotation_return_dict_with_extra_data(): response = client.get("/response_model-no_annotation-return_dict_with_extra_data") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_no_annotation_return_submodel_with_extra_data(): response = client.get( "/response_model-no_annotation-return_submodel_with_extra_data" ) assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_annotation_return_same_model(): response = client.get("/no_response_model-annotation-return_same_model") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_annotation_return_exact_dict(): response = client.get("/no_response_model-annotation-return_exact_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_annotation_return_invalid_dict(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/no_response_model-annotation-return_invalid_dict") assert "missing" in str(excinfo.value) def test_no_response_model_annotation_return_invalid_model(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/no_response_model-annotation-return_invalid_model") assert "missing" in str(excinfo.value) def test_no_response_model_annotation_return_dict_with_extra_data(): response = client.get("/no_response_model-annotation-return_dict_with_extra_data") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_annotation_return_submodel_with_extra_data(): response = client.get( "/no_response_model-annotation-return_submodel_with_extra_data" ) assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_none_annotation_return_same_model(): response = client.get("/response_model_none-annotation-return_same_model") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_none_annotation_return_exact_dict(): response = client.get("/response_model_none-annotation-return_exact_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_none_annotation_return_invalid_dict(): response = client.get("/response_model_none-annotation-return_invalid_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John"} def test_response_model_none_annotation_return_invalid_model(): response = client.get("/response_model_none-annotation-return_invalid_model") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo", "price": 42.0} def test_response_model_none_annotation_return_dict_with_extra_data(): response = client.get("/response_model_none-annotation-return_dict_with_extra_data") assert response.status_code == 200, response.text assert response.json() == { "name": "John", "surname": "Doe", "password_hash": "secret", } def test_response_model_none_annotation_return_submodel_with_extra_data(): response = client.get( "/response_model_none-annotation-return_submodel_with_extra_data" ) assert response.status_code == 200, response.text assert response.json() == { "name": "John", "surname": "Doe", "password_hash": "secret", } def test_response_model_model1_annotation_model2_return_same_model(): response = client.get("/response_model_model1-annotation_model2-return_same_model") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_model1_annotation_model2_return_exact_dict(): response = client.get("/response_model_model1-annotation_model2-return_exact_dict") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_model1_annotation_model2_return_invalid_dict(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/response_model_model1-annotation_model2-return_invalid_dict") assert "missing" in str(excinfo.value) def test_response_model_model1_annotation_model2_return_invalid_model(): with pytest.raises(ResponseValidationError) as excinfo: client.get("/response_model_model1-annotation_model2-return_invalid_model") assert "missing" in str(excinfo.value) def test_response_model_model1_annotation_model2_return_dict_with_extra_data(): response = client.get( "/response_model_model1-annotation_model2-return_dict_with_extra_data" ) assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_model1_annotation_model2_return_submodel_with_extra_data(): response = client.get( "/response_model_model1-annotation_model2-return_submodel_with_extra_data" ) assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_filtering_model_annotation_submodel_return_submodel(): response = client.get( "/response_model_filtering_model-annotation_submodel-return_submodel" ) assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_list_of_model_no_annotation(): response = client.get("/response_model_list_of_model-no_annotation") assert response.status_code == 200, response.text assert response.json() == [ {"name": "John", "surname": "Doe"}, {"name": "Jane", "surname": "Does"}, ] def test_no_response_model_annotation_list_of_model(): response = client.get("/no_response_model-annotation_list_of_model") assert response.status_code == 200, response.text assert response.json() == [ {"name": "John", "surname": "Doe"}, {"name": "Jane", "surname": "Does"}, ] def test_no_response_model_annotation_forward_ref_list_of_model(): response = client.get("/no_response_model-annotation_forward_ref_list_of_model") assert response.status_code == 200, response.text assert response.json() == [ {"name": "John", "surname": "Doe"}, {"name": "Jane", "surname": "Does"}, ] def test_response_model_union_no_annotation_return_model1(): response = client.get("/response_model_union-no_annotation-return_model1") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_response_model_union_no_annotation_return_model2(): response = client.get("/response_model_union-no_annotation-return_model2") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo", "price": 42.0} def test_no_response_model_annotation_union_return_model1(): response = client.get("/no_response_model-annotation_union-return_model1") assert response.status_code == 200, response.text assert response.json() == {"name": "John", "surname": "Doe"} def test_no_response_model_annotation_union_return_model2(): response = client.get("/no_response_model-annotation_union-return_model2") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo", "price": 42.0} def test_no_response_model_annotation_return_class(): response = client.get("/no_response_model-annotation_response_class") assert response.status_code == 200, response.text assert response.text == "Foo" def test_no_response_model_annotation_json_response_class(): response = client.get("/no_response_model-annotation_json_response_class") assert response.status_code == 200, response.text assert response.json() == {"foo": "bar"} def test_invalid_response_model_field(): app = FastAPI() with pytest.raises(FastAPIError) as e: @app.get("/") def read_root() -> Union[Response, None]: return Response(content="Foo") # pragma: no cover assert "valid Pydantic field type" in e.value.args[0] assert "parameter response_model=None" in e.value.args[0] # TODO: remove when dropping Pydantic v1 support @needs_pydanticv1 def test_invalid_response_model_field_pv1(): from fastapi._compat import v1 app = FastAPI() class Model(v1.BaseModel): foo: str with pytest.raises(FastAPIError) as e: @app.get("/") def read_root() -> Union[Response, Model, None]: return Response(content="Foo") # pragma: no cover assert "valid Pydantic field type" in e.value.args[0] assert "parameter response_model=None" in e.value.args[0] def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/no_response_model-no_annotation-return_model": { "get": { "summary": "No Response Model No Annotation Return Model", "operationId": "no_response_model_no_annotation_return_model_no_response_model_no_annotation_return_model_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/no_response_model-no_annotation-return_dict": { "get": { "summary": "No Response Model No Annotation Return Dict", "operationId": "no_response_model_no_annotation_return_dict_no_response_model_no_annotation_return_dict_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model-no_annotation-return_same_model": { "get": { "summary": "Response Model No Annotation Return Same Model", "operationId": "response_model_no_annotation_return_same_model_response_model_no_annotation_return_same_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model-no_annotation-return_exact_dict": { "get": { "summary": "Response Model No Annotation Return Exact Dict", "operationId": "response_model_no_annotation_return_exact_dict_response_model_no_annotation_return_exact_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model-no_annotation-return_invalid_dict": { "get": { "summary": "Response Model No Annotation Return Invalid Dict", "operationId": "response_model_no_annotation_return_invalid_dict_response_model_no_annotation_return_invalid_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model-no_annotation-return_invalid_model": { "get": { "summary": "Response Model No Annotation Return Invalid Model", "operationId": "response_model_no_annotation_return_invalid_model_response_model_no_annotation_return_invalid_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model-no_annotation-return_dict_with_extra_data": { "get": { "summary": "Response Model No Annotation Return Dict With Extra Data", "operationId": "response_model_no_annotation_return_dict_with_extra_data_response_model_no_annotation_return_dict_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model-no_annotation-return_submodel_with_extra_data": { "get": { "summary": "Response Model No Annotation Return Submodel With Extra Data", "operationId": "response_model_no_annotation_return_submodel_with_extra_data_response_model_no_annotation_return_submodel_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_same_model": { "get": { "summary": "No Response Model Annotation Return Same Model", "operationId": "no_response_model_annotation_return_same_model_no_response_model_annotation_return_same_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_exact_dict": { "get": { "summary": "No Response Model Annotation Return Exact Dict", "operationId": "no_response_model_annotation_return_exact_dict_no_response_model_annotation_return_exact_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_invalid_dict": { "get": { "summary": "No Response Model Annotation Return Invalid Dict", "operationId": "no_response_model_annotation_return_invalid_dict_no_response_model_annotation_return_invalid_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_invalid_model": { "get": { "summary": "No Response Model Annotation Return Invalid Model", "operationId": "no_response_model_annotation_return_invalid_model_no_response_model_annotation_return_invalid_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_dict_with_extra_data": { "get": { "summary": "No Response Model Annotation Return Dict With Extra Data", "operationId": "no_response_model_annotation_return_dict_with_extra_data_no_response_model_annotation_return_dict_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/no_response_model-annotation-return_submodel_with_extra_data": { "get": { "summary": "No Response Model Annotation Return Submodel With Extra Data", "operationId": "no_response_model_annotation_return_submodel_with_extra_data_no_response_model_annotation_return_submodel_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_none-annotation-return_same_model": { "get": { "summary": "Response Model None Annotation Return Same Model", "operationId": "response_model_none_annotation_return_same_model_response_model_none_annotation_return_same_model_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_none-annotation-return_exact_dict": { "get": { "summary": "Response Model None Annotation Return Exact Dict", "operationId": "response_model_none_annotation_return_exact_dict_response_model_none_annotation_return_exact_dict_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_none-annotation-return_invalid_dict": { "get": { "summary": "Response Model None Annotation Return Invalid Dict", "operationId": "response_model_none_annotation_return_invalid_dict_response_model_none_annotation_return_invalid_dict_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_none-annotation-return_invalid_model": { "get": { "summary": "Response Model None Annotation Return Invalid Model", "operationId": "response_model_none_annotation_return_invalid_model_response_model_none_annotation_return_invalid_model_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_none-annotation-return_dict_with_extra_data": { "get": { "summary": "Response Model None Annotation Return Dict With Extra Data", "operationId": "response_model_none_annotation_return_dict_with_extra_data_response_model_none_annotation_return_dict_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_none-annotation-return_submodel_with_extra_data": { "get": { "summary": "Response Model None Annotation Return Submodel With Extra Data", "operationId": "response_model_none_annotation_return_submodel_with_extra_data_response_model_none_annotation_return_submodel_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/response_model_model1-annotation_model2-return_same_model": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Same Model", "operationId": "response_model_model1_annotation_model2_return_same_model_response_model_model1_annotation_model2_return_same_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_model1-annotation_model2-return_exact_dict": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Exact Dict", "operationId": "response_model_model1_annotation_model2_return_exact_dict_response_model_model1_annotation_model2_return_exact_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_model1-annotation_model2-return_invalid_dict": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Invalid Dict", "operationId": "response_model_model1_annotation_model2_return_invalid_dict_response_model_model1_annotation_model2_return_invalid_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_model1-annotation_model2-return_invalid_model": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Invalid Model", "operationId": "response_model_model1_annotation_model2_return_invalid_model_response_model_model1_annotation_model2_return_invalid_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_model1-annotation_model2-return_dict_with_extra_data": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Dict With Extra Data", "operationId": "response_model_model1_annotation_model2_return_dict_with_extra_data_response_model_model1_annotation_model2_return_dict_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_model1-annotation_model2-return_submodel_with_extra_data": { "get": { "summary": "Response Model Model1 Annotation Model2 Return Submodel With Extra Data", "operationId": "response_model_model1_annotation_model2_return_submodel_with_extra_data_response_model_model1_annotation_model2_return_submodel_with_extra_data_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_filtering_model-annotation_submodel-return_submodel": { "get": { "summary": "Response Model Filtering Model Annotation Submodel Return Submodel", "operationId": "response_model_filtering_model_annotation_submodel_return_submodel_response_model_filtering_model_annotation_submodel_return_submodel_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/User"} } }, } }, } }, "/response_model_list_of_model-no_annotation": { "get": { "summary": "Response Model List Of Model No Annotation", "operationId": "response_model_list_of_model_no_annotation_response_model_list_of_model_no_annotation_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response Response Model List Of Model No Annotation Response Model List Of Model No Annotation Get", "type": "array", "items": {"$ref": "#/components/schemas/User"}, } } }, } }, } }, "/no_response_model-annotation_list_of_model": { "get": { "summary": "No Response Model Annotation List Of Model", "operationId": "no_response_model_annotation_list_of_model_no_response_model_annotation_list_of_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response No Response Model Annotation List Of Model No Response Model Annotation List Of Model Get", "type": "array", "items": {"$ref": "#/components/schemas/User"}, } } }, } }, } }, "/no_response_model-annotation_forward_ref_list_of_model": { "get": { "summary": "No Response Model Annotation Forward Ref List Of Model", "operationId": "no_response_model_annotation_forward_ref_list_of_model_no_response_model_annotation_forward_ref_list_of_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response No Response Model Annotation Forward Ref List Of Model No Response Model Annotation Forward Ref List Of Model Get", "type": "array", "items": {"$ref": "#/components/schemas/User"}, } } }, } }, } }, "/response_model_union-no_annotation-return_model1": { "get": { "summary": "Response Model Union No Annotation Return Model1", "operationId": "response_model_union_no_annotation_return_model1_response_model_union_no_annotation_return_model1_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response Response Model Union No Annotation Return Model1 Response Model Union No Annotation Return Model1 Get", "anyOf": [ {"$ref": "#/components/schemas/User"}, {"$ref": "#/components/schemas/Item"}, ], } } }, } }, } }, "/response_model_union-no_annotation-return_model2": { "get": { "summary": "Response Model Union No Annotation Return Model2", "operationId": "response_model_union_no_annotation_return_model2_response_model_union_no_annotation_return_model2_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response Response Model Union No Annotation Return Model2 Response Model Union No Annotation Return Model2 Get", "anyOf": [ {"$ref": "#/components/schemas/User"}, {"$ref": "#/components/schemas/Item"}, ], } } }, } }, } }, "/no_response_model-annotation_union-return_model1": { "get": { "summary": "No Response Model Annotation Union Return Model1", "operationId": "no_response_model_annotation_union_return_model1_no_response_model_annotation_union_return_model1_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response No Response Model Annotation Union Return Model1 No Response Model Annotation Union Return Model1 Get", "anyOf": [ {"$ref": "#/components/schemas/User"}, {"$ref": "#/components/schemas/Item"}, ], } } }, } }, } }, "/no_response_model-annotation_union-return_model2": { "get": { "summary": "No Response Model Annotation Union Return Model2", "operationId": "no_response_model_annotation_union_return_model2_no_response_model_annotation_union_return_model2_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response No Response Model Annotation Union Return Model2 No Response Model Annotation Union Return Model2 Get", "anyOf": [ {"$ref": "#/components/schemas/User"}, {"$ref": "#/components/schemas/Item"}, ], } } }, } }, } }, "/no_response_model-annotation_response_class": { "get": { "summary": "No Response Model Annotation Response Class", "operationId": "no_response_model_annotation_response_class_no_response_model_annotation_response_class_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, "/no_response_model-annotation_json_response_class": { "get": { "summary": "No Response Model Annotation Json Response Class", "operationId": "no_response_model_annotation_json_response_class_no_response_model_annotation_json_response_class_get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } }, }, "components": { "schemas": { "Item": { "title": "Item", "required": ["name", "price"], "type": "object", "properties": { "name": {"title": "Name", "type": "string"}, "price": {"title": "Price", "type": "number"}, }, }, "User": { "title": "User", "required": ["name", "surname"], "type": "object", "properties": { "name": {"title": "Name", "type": "string"}, "surname": {"title": "Surname", "type": "string"}, }, }, } }, }
Item
python
celery__celery
t/unit/backends/test_base.py
{ "start": 10790, "end": 12862 }
class ____: def setup_method(self): self.b = BaseBackend(self.app) def test_unpickleable(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_json_exception_arguments(self): self.b.serializer = 'json' x = self.b.prepare_exception(Exception(object)) assert x == { 'exc_message': serialization.ensure_serializable( (object,), self.b.encode), 'exc_type': Exception.__name__, 'exc_module': Exception.__module__} y = self.b.exception_to_python(x) assert isinstance(y, Exception) def test_json_exception_nested(self): self.b.serializer = 'json' x = self.b.prepare_exception(objectexception.Nested('msg')) assert x == { 'exc_message': ('msg',), 'exc_type': 'objectexception.Nested', 'exc_module': objectexception.Nested.__module__} y = self.b.exception_to_python(x) assert isinstance(y, objectexception.Nested) def test_impossible(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Impossible()) assert isinstance(x, UnpickleableExceptionWrapper) assert str(x) y = self.b.exception_to_python(x) assert y.__class__.__name__ == 'Impossible' assert y.__class__.__module__ == 'foo.module' def test_regular(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(KeyError('baz')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_unicode_message(self): message = '\u03ac' x = self.b.prepare_exception(Exception(message)) assert x == {'exc_message': (message,), 'exc_type': Exception.__name__, 'exc_module': Exception.__module__}
test_prepare_exception
python
encode__django-rest-framework
rest_framework/relations.py
{ "start": 15857, "end": 16956 }
class ____(RelatedField): """ A read-write field that represents the target of the relationship by a unique 'slug' attribute. """ default_error_messages = { 'does_not_exist': _('Object with {slug_name}={value} does not exist.'), 'invalid': _('Invalid value.'), } def __init__(self, slug_field=None, **kwargs): assert slug_field is not None, 'The `slug_field` argument is required.' self.slug_field = slug_field super().__init__(**kwargs) def to_internal_value(self, data): queryset = self.get_queryset() try: return queryset.get(**{self.slug_field: data}) except ObjectDoesNotExist: self.fail('does_not_exist', slug_name=self.slug_field, value=smart_str(data)) except (TypeError, ValueError): self.fail('invalid') def to_representation(self, obj): slug = self.slug_field if "__" in slug: # handling nested relationship if defined slug = slug.replace('__', '.') return attrgetter(slug)(obj)
SlugRelatedField
python
getsentry__sentry
src/sentry/web/forms/accounts.py
{ "start": 836, "end": 5009 }
class ____(forms.Form): username_field: Field[Any, Any] username = forms.CharField( label=_("Account"), max_length=128, widget=forms.TextInput(attrs={"placeholder": _("username or email"), "tabindex": 1}), ) password = forms.CharField( label=_("Password"), widget=forms.PasswordInput(attrs={"placeholder": _("password"), "tabindex": 2}), ) error_messages = { "invalid_login": _( "Please enter a correct %(username)s and password. " "Note that both fields may be case-sensitive." ), "rate_limited": _( "You have made too many failed authentication " "attempts. Please try again later." ), "no_cookies": _( "Your Web browser doesn't appear to have cookies " "enabled. Cookies are required for logging in." ), "inactive": _("This account is inactive."), } def __init__(self, request: HttpRequest, *args: Any, **kwargs: Any) -> None: """ If request is passed in, the form will validate that cookies are enabled. Note that the request (a HttpRequest object) must have set a cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before running this validation. """ self.request = request self.user_cache: User | None = None super().__init__(*args, **kwargs) # Set the label for the "username" field. UserModel = get_user_model() self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD) if not self.fields["username"].label: self.fields["username"].label = capfirst(self.username_field.verbose_name) def clean_username(self, value=None): if not value: value = self.cleaned_data.get("username") or "" value = value.strip(" \n\t\r\0") if not value: return return value.lower() def is_rate_limited(self): if self._is_ip_rate_limited(): return True if self._is_user_rate_limited(): return True return False def _is_ip_rate_limited(self): limit = options.get("auth.ip-rate-limit") if not limit: return False ip_address = self.request.META["REMOTE_ADDR"] return ratelimiter.backend.is_limited(f"auth:ip:{ip_address}", limit) def _is_user_rate_limited(self): limit = options.get("auth.user-rate-limit") if not limit: return False username = self.cleaned_data.get("username") if not username: return False return ratelimiter.backend.is_limited(f"auth:username:{username}", limit) def clean(self) -> dict[str, Any] | None: username = self.cleaned_data.get("username") password = self.cleaned_data.get("password") if not (username and password): raise forms.ValidationError( self.error_messages["invalid_login"] % {"username": self.username_field.verbose_name} ) if self.is_rate_limited(): logger.info( "user.auth.rate-limited", extra={"ip_address": self.request.META["REMOTE_ADDR"], "username": username}, ) raise forms.ValidationError(self.error_messages["rate_limited"]) self.user_cache = authenticate(username=username, password=password) if self.user_cache is None: raise forms.ValidationError( self.error_messages["invalid_login"] % {"username": self.username_field.verbose_name} ) self.check_for_test_cookie() return self.cleaned_data def check_for_test_cookie(self): if not self.request.session.test_cookie_worked(): raise forms.ValidationError(self.error_messages["no_cookies"]) else: self.request.session.delete_test_cookie() def get_user_id(self): if self.user_cache: return self.user_cache.id return None def get_user(self): return self.user_cache
AuthenticationForm
python
kamyu104__LeetCode-Solutions
Python/existence-of-a-substring-in-a-string-and-its-reverse.py
{ "start": 53, "end": 508 }
class ____(object): def isSubstringPresent(self, s): """ :type s: str :rtype: bool """ lookup = [[False]*26 for _ in xrange(26)] for i in xrange(len(s)-1): lookup[ord(s[i])-ord('a')][ord(s[i+1])-ord('a')] = True return any(lookup[ord(s[i+1])-ord('a')][ord(s[i])-ord('a')] for i in xrange(len(s)-1)) # Time: O(n) # Space: O(min(n, 26^2)) import collections # hash table
Solution
python
pytorch__pytorch
.github/scripts/test_trymerge.py
{ "start": 7955, "end": 8441 }
class ____(GitRepo): def __init__(self) -> None: super().__init__(get_git_repo_dir(), get_git_remote_name()) def commits_resolving_gh_pr(self, pr_num: int) -> list[str]: return ["FakeCommitSha"] def commit_message(self, ref: str) -> str: return "super awesome commit message" @mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql) @mock.patch( "trymerge.get_drci_classifications", side_effect=mocked_drci_classifications )
DummyGitRepo
python
ray-project__ray
rllib/utils/minibatch_utils.py
{ "start": 10342, "end": 11695 }
class ____: """Iterator for sharding batch into num_shards batches. Args: batch: The input multi-agent batch. num_shards: The number of shards to split the batch into. Yields: A MultiAgentBatch of size len(batch) / num_shards. """ def __init__(self, batch: MultiAgentBatch, num_shards: int): self._batch = batch self._num_shards = num_shards def __iter__(self): for i in range(self._num_shards): # TODO (sven): The following way of sharding a multi-agent batch destroys # the relationship of the different agents' timesteps to each other. # Thus, in case the algorithm requires agent-synchronized data (aka. # "lockstep"), the `ShardBatchIterator` cannot be used. batch_to_send = {} for pid, sub_batch in self._batch.policy_batches.items(): batch_size = math.ceil(len(sub_batch) / self._num_shards) start = batch_size * i end = min(start + batch_size, len(sub_batch)) batch_to_send[pid] = sub_batch[int(start) : int(end)] # TODO (Avnish): int(batch_size) ? How should we shard MA batches really? new_batch = MultiAgentBatch(batch_to_send, int(batch_size)) yield new_batch @DeveloperAPI
ShardBatchIterator
python
ipython__ipython
IPython/testing/plugin/pytest_ipdoctest.py
{ "start": 5492, "end": 8137 }
class ____(Exception): def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None: super().__init__() self.failures = failures def _init_runner_class() -> Type["IPDocTestRunner"]: import doctest from .ipdoctest import IPDocTestRunner class PytestDoctestRunner(IPDocTestRunner): """Runner to collect failures. Note that the out variable in this case is a list instead of a stdout-like object. """ def __init__( self, checker: Optional["IPDoctestOutputChecker"] = None, verbose: Optional[bool] = None, optionflags: int = 0, continue_on_failure: bool = True, ) -> None: super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) self.continue_on_failure = continue_on_failure def report_failure( self, out, test: "doctest.DocTest", example: "doctest.Example", got: str, ) -> None: failure = doctest.DocTestFailure(test, example, got) if self.continue_on_failure: out.append(failure) else: raise failure def report_unexpected_exception( self, out, test: "doctest.DocTest", example: "doctest.Example", exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType], ) -> None: if isinstance(exc_info[1], OutcomeException): raise exc_info[1] if isinstance(exc_info[1], bdb.BdbQuit): outcomes.exit("Quitting debugger") failure = doctest.UnexpectedException(test, example, exc_info) if self.continue_on_failure: out.append(failure) else: raise failure return PytestDoctestRunner def _get_runner( checker: Optional["IPDoctestOutputChecker"] = None, verbose: Optional[bool] = None, optionflags: int = 0, continue_on_failure: bool = True, ) -> "IPDocTestRunner": # We need this in order to do a lazy import on doctest global RUNNER_CLASS if RUNNER_CLASS is None: RUNNER_CLASS = _init_runner_class() # Type ignored because the continue_on_failure argument is only defined on # PytestDoctestRunner, which is lazily defined so can't be used as a type. return RUNNER_CLASS( # type: ignore checker=checker, verbose=verbose, optionflags=optionflags, continue_on_failure=continue_on_failure, )
MultipleDoctestFailures
python
celery__celery
celery/backends/redis.py
{ "start": 6703, "end": 25579 }
class ____(BaseKeyValueStoreBackend, AsyncBackendMixin): """Redis task result store. It makes use of the following commands: GET, MGET, DEL, INCRBY, EXPIRE, SET, SETEX """ ResultConsumer = ResultConsumer #: :pypi:`redis` client module. redis = redis connection_class_ssl = redis.SSLConnection if redis else None #: Maximum number of connections in the pool. max_connections = None supports_autoexpire = True supports_native_join = True #: Maximal length of string value in Redis. #: 512 MB - https://redis.io/topics/data-types _MAX_STR_VALUE_SIZE = 536870912 def __init__(self, host=None, port=None, db=None, password=None, max_connections=None, url=None, connection_pool=None, **kwargs): super().__init__(expires_type=int, **kwargs) _get = self.app.conf.get if self.redis is None: raise ImproperlyConfigured(E_REDIS_MISSING.strip()) if host and '://' in host: url, host = host, None self.max_connections = ( max_connections or _get('redis_max_connections') or self.max_connections) self._ConnectionPool = connection_pool socket_timeout = _get('redis_socket_timeout') socket_connect_timeout = _get('redis_socket_connect_timeout') retry_on_timeout = _get('redis_retry_on_timeout') socket_keepalive = _get('redis_socket_keepalive') health_check_interval = _get('redis_backend_health_check_interval') credential_provider = _get('redis_backend_credential_provider') self.connparams = { 'host': _get('redis_host') or 'localhost', 'port': _get('redis_port') or 6379, 'db': _get('redis_db') or 0, 'password': _get('redis_password'), 'max_connections': self.max_connections, 'socket_timeout': socket_timeout and float(socket_timeout), 'retry_on_timeout': retry_on_timeout or False, 'socket_connect_timeout': socket_connect_timeout and float(socket_connect_timeout), 'client_name': _get('redis_client_name'), } username = _get('redis_username') if username: # We're extra careful to avoid including this configuration value # if it wasn't specified since older versions of py-redis # don't support specifying a username. # Only Redis>6.0 supports username/password authentication. # TODO: Include this in connparams' definition once we drop # support for py-redis<3.4.0. self.connparams['username'] = username if credential_provider: # if credential provider passed as string or query param if isinstance(credential_provider, str): credential_provider_cls = symbol_by_name(credential_provider) credential_provider = credential_provider_cls() if not isinstance(credential_provider, CredentialProvider): raise ValueError( "Credential provider is not an instance of a redis.CredentialProvider or a subclass" ) self.connparams['credential_provider'] = credential_provider # drop username and password if credential provider is configured self.connparams.pop("username", None) self.connparams.pop("password", None) if health_check_interval: self.connparams["health_check_interval"] = health_check_interval # absent in redis.connection.UnixDomainSocketConnection if socket_keepalive: self.connparams['socket_keepalive'] = socket_keepalive # "redis_backend_use_ssl" must be a dict with the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' # (the same as "broker_use_ssl") ssl = _get('redis_backend_use_ssl') if ssl: self.connparams.update(ssl) self.connparams['connection_class'] = self.connection_class_ssl if url: self.connparams = self._params_from_url(url, self.connparams) # If we've received SSL parameters via query string or the # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set # via query string ssl_cert_reqs will be a string so convert it here if ('connection_class' in self.connparams and issubclass(self.connparams['connection_class'], redis.SSLConnection)): ssl_cert_reqs_missing = 'MISSING' ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED, 'CERT_OPTIONAL': CERT_OPTIONAL, 'CERT_NONE': CERT_NONE, 'required': CERT_REQUIRED, 'optional': CERT_OPTIONAL, 'none': CERT_NONE} ssl_cert_reqs = self.connparams.get('ssl_cert_reqs', ssl_cert_reqs_missing) ssl_cert_reqs = ssl_string_to_constant.get(ssl_cert_reqs, ssl_cert_reqs) if ssl_cert_reqs not in ssl_string_to_constant.values(): raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING_INVALID) if ssl_cert_reqs == CERT_OPTIONAL: logger.warning(W_REDIS_SSL_CERT_OPTIONAL) elif ssl_cert_reqs == CERT_NONE: logger.warning(W_REDIS_SSL_CERT_NONE) self.connparams['ssl_cert_reqs'] = ssl_cert_reqs self.url = url self.connection_errors, self.channel_errors = ( get_redis_error_classes() if get_redis_error_classes else ((), ())) self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) def _params_from_url(self, url, defaults): scheme, host, port, username, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'username': username, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path ssl_param_keys = ['ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile', 'ssl_cert_reqs'] if scheme == 'redis': # If connparams or query string contain ssl params, raise error if (any(key in connparams for key in ssl_param_keys) or any(key in query for key in ssl_param_keys)): raise ValueError(E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH) if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ssl_param_keys: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, str) else db connparams['db'] = int(db) # credential provider as query string credential_provider = query.pop("credential_provider", None) if credential_provider: if isinstance(credential_provider, str): credential_provider_cls = symbol_by_name(credential_provider) credential_provider = credential_provider_cls() if not isinstance(credential_provider, CredentialProvider): raise ValueError( "Credential provider is not an instance of a redis.CredentialProvider or a subclass" ) connparams['credential_provider'] = credential_provider # drop username and password if credential provider is configured connparams.pop("username", None) connparams.pop("password", None) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value ) # Query parameters override other parameters connparams.update(query) return connparams def exception_safe_to_retry(self, exc): if isinstance(exc, self.connection_errors): return True return False @cached_property def retry_policy(self): retry_policy = super().retry_policy if "retry_policy" in self._transport_options: retry_policy = retry_policy.copy() retry_policy.update(self._transport_options['retry_policy']) return retry_policy def on_task_call(self, producer, task_id): if not task_join_will_block(): self.result_consumer.consume_from(task_id) def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.mget(keys) def ensure(self, fun, args, **policy): retry_policy = dict(self.retry_policy, **policy) max_retries = retry_policy.get('max_retries') return retry_over_time( fun, self.connection_errors, args, {}, partial(self.on_connection_error, max_retries), **retry_policy) def on_connection_error(self, max_retries, exc, intervals, retries): tts = next(intervals) logger.error( E_LOST.strip(), retries, max_retries or 'Inf', humanize_seconds(tts, 'in ')) return tts def set(self, key, value, **retry_policy): if isinstance(value, str) and len(value) > self._MAX_STR_VALUE_SIZE: raise BackendStoreError('value too large for Redis backend') return self.ensure(self._set, (key, value), **retry_policy) def _set(self, key, value): with self.client.pipeline() as pipe: if self.expires: pipe.setex(key, self.expires, value) else: pipe.set(key, value) pipe.publish(key, value) pipe.execute() def forget(self, task_id): super().forget(task_id) self.result_consumer.cancel_for(task_id) def delete(self, key): self.client.delete(key) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.expire(key, value) def add_to_chord(self, group_id, result): self.client.incr(self.get_key_for_group(group_id, '.t'), 1) def _unpack_chord_result(self, tup, decode, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): _, tid, state, retval = decode(tup) if state in EXCEPTION_STATES: retval = self.exception_to_python(retval) if state in PROPAGATE_STATES: chord_error = _create_chord_error_with_cause( message=f'Dependency {tid} raised {retval!r}', original_exc=retval ) raise chord_error return retval def set_chord_size(self, group_id, chord_size): self.set(self.get_key_for_group(group_id, '.s'), chord_size) def apply_chord(self, header_result_args, body, **kwargs): # If any of the child results of this chord are complex (ie. group # results themselves), we need to save `header_result` to ensure that # the expected structure is retained when we finish the chord and pass # the results onward to the body in `on_chord_part_return()`. We don't # do this is all cases to retain an optimisation in the common case # where a chord header is comprised of simple result objects. if not isinstance(header_result_args[1], _regen): header_result = self.app.GroupResult(*header_result_args) if any(isinstance(nr, GroupResult) for nr in header_result.results): header_result.save(backend=self) @cached_property def _chord_zset(self): return self._transport_options.get('result_chord_ordered', True) @cached_property def _transport_options(self): return self.app.conf.get('result_backend_transport_options', {}) def on_chord_part_return(self, request, state, result, propagate=None, **kwargs): app = self.app tid, gid, group_index = request.id, request.group, request.group_index if not gid or not tid: return if group_index is None: group_index = '+inf' client = self.client jkey = self.get_key_for_group(gid, '.j') tkey = self.get_key_for_group(gid, '.t') skey = self.get_key_for_group(gid, '.s') result = self.encode_result(result, state) encoded = self.encode([1, tid, state, result]) with client.pipeline() as pipe: pipeline = ( pipe.zadd(jkey, {encoded: group_index}).zcount(jkey, "-inf", "+inf") if self._chord_zset else pipe.rpush(jkey, encoded).llen(jkey) ).get(tkey).get(skey) if self.expires: pipeline = pipeline \ .expire(jkey, self.expires) \ .expire(tkey, self.expires) \ .expire(skey, self.expires) _, readycount, totaldiff, chord_size_bytes = pipeline.execute()[:4] totaldiff = int(totaldiff or 0) if chord_size_bytes: try: callback = maybe_signature(request.chord, app=app) total = int(chord_size_bytes) + totaldiff if readycount == total: header_result = GroupResult.restore(gid, app=app) if header_result is not None: # If we manage to restore a `GroupResult`, then it must # have been complex and saved by `apply_chord()` earlier. # # Before we can join the `GroupResult`, it needs to be # manually marked as ready to avoid blocking header_result.on_ready() # We'll `join()` it to get the results and ensure they are # structured as intended rather than the flattened version # we'd construct without any other information. join_func = ( header_result.join_native if header_result.supports_native_join else header_result.join ) with allow_join_result(): resl = join_func( timeout=app.conf.result_chord_join_timeout, propagate=True ) else: # Otherwise simply extract and decode the results we # stashed along the way, which should be faster for large # numbers of simple results in the chord header. decode, unpack = self.decode, self._unpack_chord_result with client.pipeline() as pipe: if self._chord_zset: pipeline = pipe.zrange(jkey, 0, -1) else: pipeline = pipe.lrange(jkey, 0, total) resl, = pipeline.execute() resl = [unpack(tup, decode) for tup in resl] try: callback.delay(resl) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Chord callback for %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Callback error: {exc!r}'), ) finally: with client.pipeline() as pipe: pipe \ .delete(jkey) \ .delete(tkey) \ .delete(skey) \ .execute() except ChordError as exc: logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack(callback, exc) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Join error: {exc!r}'), ) def _create_client(self, **params): return self._get_client()( connection_pool=self._get_pool(**params), ) def _get_client(self): return self.redis.StrictRedis def _get_pool(self, **params): return self.ConnectionPool(**params) @property def ConnectionPool(self): if self._ConnectionPool is None: self._ConnectionPool = self.redis.ConnectionPool return self._ConnectionPool @cached_property def client(self): return self._create_client(**self.connparams) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return super().__reduce__( args, dict(kwargs, expires=self.expires, url=self.url)) if getattr(redis, "sentinel", None): class SentinelManagedSSLConnection( redis.sentinel.SentinelManagedConnection, redis.SSLConnection): """Connect to a Redis server using Sentinel + TLS. Use Sentinel to identify which Redis server is the current master to connect to and when connecting to the Master server, use an SSL Connection. """
RedisBackend
python
has2k1__plotnine
plotnine/stats/stat_ellipse.py
{ "start": 360, "end": 7658 }
class ____(stat): """ Calculate normal confidence interval ellipse {usage} Parameters ---------- {common_parameters} type : Literal["t", "norm", "euclid"], default="t" The type of ellipse. `t` assumes a multivariate t-distribution. `norm` assumes a multivariate normal distribution. `euclid` draws a circle with the radius equal to `level`, representing the euclidean distance from the center. level : float, default=0.95 The confidence level at which to draw the ellipse. segments : int, default=51 Number of segments to be used in drawing the ellipse. See Also -------- plotnine.geom_path : The default `geom` for this `stat`. """ REQUIRED_AES = {"x", "y"} DEFAULT_PARAMS = { "geom": "path", "position": "identity", "na_rm": False, "type": "t", "level": 0.95, "segments": 51, } def compute_group(self, data, scales): import scipy.stats as stats from scipy import linalg level = self.params["level"] segments = self.params["segments"] type_ = self.params["type"] dfn = 2 dfd = len(data) - 1 if dfd < 3: warn("Too few points to calculate an ellipse", PlotnineWarning) return pd.DataFrame({"x": [], "y": []}) m: FloatArray = np.asarray(data[["x", "y"]]) # The stats used to create the ellipse if type_ == "t": res = cov_trob(m) cov = res["cov"] center = res["center"] elif type_ == "norm": cov = np.cov(m, rowvar=False) center = np.mean(m, axis=0) elif type_ == "euclid": cov = np.cov(m, rowvar=False) cov = np.diag(np.repeat(np.diag(cov).min(), 2)) center = np.mean(m, axis=0) else: raise ValueError(f"Unknown value for type={type_}") # numpy's cholesky function does not guarantee upper/lower # triangular factorization. chol_decomp = linalg.cholesky(cov, lower=False) # Parameters of the ellipse if type_ == "euclid": radius = level / chol_decomp.max() else: radius = np.sqrt(dfn * stats.f.ppf(level, dfn, dfd)) space = np.linspace(0, 2 * np.pi, segments) # Catesian coordinates unit_circle = np.column_stack([np.cos(space), np.sin(space)]) res = center + radius * np.dot(unit_circle, chol_decomp) return pd.DataFrame({"x": res[:, 0], "y": res[:, 1]}) def cov_trob( x, wt: Optional[FloatArrayLike] = None, cor=False, center: FloatArrayLike | bool = True, nu=5, maxit=25, tol=0.01, ): """ Covariance Estimation for Multivariate t Distribution Estimates a covariance or correlation matrix assuming the data came from a multivariate t distribution: this provides some degree of robustness to outlier without giving a high breakdown point. **credit**: This function a port of the R function `MASS::cov.trob`. Parameters ---------- x : array data matrix. Missing values (NaNs) are not allowed. wt : array A vector of weights for each case: these are treated as if the case i actually occurred `wt[i]` times. cor : bool Flag to choose between returning the correlation (`cor=True`) or covariance (`cor=False`) matrix. center : array | bool A logical value or a numeric vector providing the location about which the covariance is to be taken. If `center=False`, no centering is done; if `center=True` the MLE of the location vector is used. nu : int 'degrees of freedom' for the multivariate t distribution. Must exceed 2 (so that the covariance matrix is finite). maxit : int Maximum number of iterations in fitting. tol : float Convergence tolerance for fitting. Returns ------- out : dict A dictionary with with the following key-value - `cov` : the fitted covarince matrix. - `center` : the estimated or specified location vector. - `wt` : the specified weights: only returned if the wt argument was given. - `n_obs` : the number of cases used in the fitting. - `cor` : the fitted correlation matrix: only returned if `cor=True`. - `call` : The matched call. - `iter` : The number of iterations used. References ---------- - J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood identity for the multivariate t-distribution. *Communications in Statistics-Simulation and Computation* **23**, 441-453. - Venables, W. N. and Ripley, B. D. (1999) *Modern Applied Statistics with S-PLUS*. Third Edition. Springer. """ from scipy import linalg def test_values(x): if pd.isna(x).any() or np.isinf(x).any(): raise ValueError("Missing or infinite values in 'x'") def scale_simp(x: FloatArray, center: FloatArray, n: int, p: int): return x - np.repeat([center], n, axis=0) x = np.asarray(x) n, p = x.shape test_values(x) ans: dict[str, Any] = {} # wt if wt is None: wt = np.ones(n) else: wt = np.asarray(wt) ans["wt0"] = wt if len(wt) != n: raise ValueError( "length of 'wt' must equal number of observations." ) if any(wt < 0): raise ValueError("Negative weights not allowed.") if not np.sum(wt): raise ValueError("No positive weights.") x = x[wt > 0, :] wt = wt[wt > 0] n, _ = x.shape wt = wt[:, np.newaxis] # pyright: ignore[reportCallIssue,reportArgumentType,reportOptionalSubscript] # loc use_loc = False if isinstance(center, bool): if center: loc = np.sum(wt * x, axis=0) / wt.sum() use_loc = True else: loc = np.zeros(p) else: if len(center) != p: raise ValueError("'center' is not the right length") loc = np.asarray(center) # Default values for the typechecker iteration = 0 X = np.array([], ndmin=x.ndim) w = wt * (1 + p / nu) for iteration in range(maxit): w0 = w X = scale_simp(x, loc, n, p) _, s, v = linalg.svd(np.sqrt(w / np.sum(w)) * X) wX = X @ v.T @ np.diag(np.full(p, 1 / s)) Q = np.squeeze((wX**2) @ np.ones(p)) w = (wt * (nu + p)) / (nu + Q)[:, np.newaxis] if use_loc: loc = np.sum(w * x, axis=0) / w.sum() if all(np.abs(w - w0) < tol): break else: # nobreak _c1 = np.mean(w) - np.mean(wt) > tol _c2 = np.abs(np.mean(w * Q) / p - 1) > tol # pyright: ignore if _c1 and _c2: warn("Convergence probably failed.", PlotnineWarning) _a = np.sqrt(w) * X cov = (_a.T @ _a) / np.sum(wt) if cor: sd = np.sqrt(np.diag(cov)) ans["cor"] = (cov / sd) / np.repeat([sd], p, axis=0).T ans.update( cov=cov, center=loc, n_obs=n, iter=iteration, ) return ans
stat_ellipse
python
google__flatbuffers
python/flatbuffers/number_types.py
{ "start": 2595, "end": 2637 }
class ____(Int32Flags): pass
SOffsetTFlags
python
PyCQA__pylint
tests/functional/ext/docparams/return/missing_return_doc_Numpy.py
{ "start": 2455, "end": 2766 }
class ____: """test_ignores_ignored_argument_names_numpy Example of a method documenting the return type that an implementation should return. """ def foo(self, arg, _): """docstring ... Parameters ---------- arg : int An argument. """
Foo
python
networkx__networkx
networkx/classes/tests/test_special.py
{ "start": 1477, "end": 1611 }
class ____(_TestGraph): def setup_method(self): _TestGraph.setup_method(self) self.Graph = nx.Graph
TestSpecialGraph
python
geekcomputers__Python
Detect_Remove_loop.py
{ "start": 94, "end": 1729 }
class ____: def __init__(self): self.head = None def Insert_At_End(self, new_data): new_node = Node(new_data) if self.head is None: self.head = new_node return current = self.head while current.next: current = current.next current.next = new_node def Detect_and_Remove_Loop(self): slow = fast = self.head while slow and fast and fast.next: slow = slow.next fast = fast.next.next if slow == fast: self.Remove_loop(slow) print("Loop Found") return 1 return 0 def Remove_loop(self, Loop_node): ptr1 = self.head while 1: ptr2 = Loop_node while ptr2.next != Loop_node and ptr2.next != ptr1: ptr2 = ptr2.next if ptr2.next == ptr1: break ptr1 = ptr1.next ptr2.next = None def Display(self): temp = self.head while temp: print(temp.data, "->", end=" ") temp = temp.next print("None") if __name__ == "__main__": L_list = Linked_List() L_list.Insert_At_End(8) L_list.Insert_At_End(5) L_list.Insert_At_End(10) L_list.Insert_At_End(7) L_list.Insert_At_End(6) L_list.Insert_At_End(11) L_list.Insert_At_End(9) print("Linked List with Loop: ") L_list.Display() print("Linked List without Loop: ") L_list.head.next.next.next.next.next.next.next = L_list.head.next.next L_list.Detect_and_Remove_Loop() L_list.Display()
Linked_List
python
pytorch__pytorch
test/dynamo/test_autograd_function.py
{ "start": 694, "end": 1138 }
class ____(torch.autograd.Function): # Test there is graph break in forward function @staticmethod def forward(ctx, foo): result = foo + foo torch._dynamo.graph_break() result = result + foo ctx.save_for_backward(result) return result @staticmethod def backward(ctx, grad_output): (result,) = ctx.saved_tensors return grad_output * math.sqrt(result.numel())
CustomFunc3
python
tiangolo__fastapi
scripts/contributors.py
{ "start": 1143, "end": 1187 }
class ____(BaseModel): name: str
LabelNode
python
keras-team__keras
keras/src/backend/torch/core.py
{ "start": 3312, "end": 22700 }
class ____(KerasVariable): def _initialize(self, value): if isinstance(value, torch.nn.Parameter): # Reuse same parameter self._value = value else: self._value = torch.nn.Parameter( convert_to_tensor(value, dtype=self._dtype), requires_grad=self.trainable, ).to(get_device()) def _direct_assign(self, value): with torch.no_grad(): self.value.copy_(value) def _convert_to_tensor(self, value, dtype=None): return convert_to_tensor(value, dtype=dtype) # Overload native accessor. @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): args = [arg.value if isinstance(arg, Variable) else arg for arg in args] if kwargs is None: kwargs = {} kwargs = { key: value.value if isinstance(value, Variable) else value for key, value in kwargs.items() } return func(*args, **kwargs) def __array__(self, dtype=None): value = convert_to_numpy(self.value) if dtype: return value.astype(dtype) return value @property def value(self): # We cannot chain super() here because it will fail TorchDynamo. The # reason why is unclear. def maybe_use_symbolic_tensor(value): # Create and use a symbolic tensor stub in symbolic calls. if str(get_device()) == "meta" and str(value.device) != "meta": return torch.nn.Parameter( torch.empty( size=self._shape, dtype=to_torch_dtype(self._dtype), device="meta", ), requires_grad=self.trainable, ) return value if in_stateless_scope(): scope = get_stateless_scope() value = scope.get_current_value(self) if value is not None: value = self._maybe_autocast(value) return maybe_use_symbolic_tensor(value) if self._value is None: # Uninitialized variable. Return a placeholder. # This is fine because it's only ever used # in during shape inference / graph tracing # (anything else would be a bug, to be fixed.) value = self._maybe_autocast( self._initializer(self._shape, dtype=self._dtype) ) else: value = self._maybe_autocast(self._value) return maybe_use_symbolic_tensor(value) @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): self._trainable = value if self._value is not None: self._value.requires_grad = value def __eq__(self, other): try: return super().__eq__(other) except Exception: return False def convert_to_tensor(x, dtype=None, sparse=None, ragged=None): if sparse: raise ValueError("`sparse=True` is not supported with torch backend") if ragged: raise ValueError("`ragged=True` is not supported with torch backend") if isinstance(x, Variable) or is_tensor(x): if isinstance(x, Variable): x = x.value device = get_device() if x.device != device: if x.is_meta: x = torch.empty_like(x, device=device) else: x = x.to(device) if dtype is not None: x = x.to(to_torch_dtype(dtype)) return x if dtype is None: if isinstance(x, bool): return torch.as_tensor(x, dtype=torch.bool, device=get_device()) elif isinstance(x, int): return torch.as_tensor(x, dtype=torch.int32, device=get_device()) elif isinstance(x, float): return torch.as_tensor( x, dtype=to_torch_dtype(floatx()), device=get_device() ) # Convert to np in case of any array-like that is not list or tuple. if not isinstance(x, (list, tuple)): x = np.array(x) elif len(x) > 0 and any(isinstance(x1, torch.Tensor) for x1 in x): # Handle list or tuple of torch tensors return torch.stack([convert_to_tensor(x1) for x1 in x]) if isinstance(x, np.ndarray): if x.dtype == np.uint32: # Torch backend does not support uint32. x = x.astype(np.int64) if standardize_dtype(x.dtype) == "bfloat16": # Torch backend does not support converting bfloat16 ndarray. x = x.astype(np.float32) dtype = "bfloat16" dtype = dtype or x.dtype if dtype is None: dtype = result_type( *[getattr(item, "dtype", type(item)) for item in tree.flatten(x)] ) dtype = to_torch_dtype(dtype) return torch.as_tensor(x, dtype=dtype, device=get_device()) def convert_to_numpy(x): def transform(x): if is_tensor(x): if x.requires_grad: x = x.detach() # Tensor has to be moved to CPU before converting to numpy. if x.device != torch.device("cpu"): x = x.cpu() if x.dtype == torch.bfloat16: # Attempting to call .numpy() on a bfloat16 torch tensor leads # to an immediate error. Instead we upcast to float32 and then # convert to the numpy friendly bfloat16 type. # https://github.com/pytorch/pytorch/issues/90574 return np.array(x.to(torch.float32)).astype(ml_dtypes.bfloat16) return np.array(x) if isinstance(x, (list, tuple)): return np.array([transform(e) for e in x]) return transform(x) def is_tensor(x): # Using the built-in `isinstance` is recommended by pytorch # over using torch.is_tensor # see: https://pytorch.org/docs/stable/generated/torch.is_tensor.html # # Also, `torch.is_tensor()` causes issues with dynamo caching when # a torch.Tensor and numpy.ndarray of the same size, shape, and dtype # is passed, if called on a Tensor first the second call with ndarray # will return `True` and vice-versa. return isinstance(x, torch.Tensor) def shape(x): # Convert from `torch.Size` to plain tuple. return tuple(x.shape) def cast(x, dtype): dtype = to_torch_dtype(dtype) if isinstance(x, Variable): x = x.value if is_tensor(x): if x.dtype == dtype: return x else: return x.to(dtype) return convert_to_tensor(x, dtype) # Shape / dtype inference util def compute_output_spec(fn, *args, **kwargs): def has_none_shape(x): """Check for if a `KerasTensor` has dynamic shape.""" if isinstance(x, KerasTensor): return None in x.shape return False def convert_keras_tensor_to_torch(x, fill_value=None): """Convert `KerasTensor`s to `torch.Tensor`s.""" if isinstance(x, KerasTensor): shape = list(x.shape) if fill_value: for i, e in enumerate(shape): if e is None: shape[i] = fill_value return torch.ones( size=shape, dtype=TORCH_DTYPES[x.dtype], device=get_device(), ) return x def convert_torch_to_keras_tensor(x): """Convert `torch.Tensor`s to `KerasTensor`s.""" if is_tensor(x): return KerasTensor(x.shape, standardize_dtype(x.dtype)) return x def symbolic_call(fn, args, kwargs, fill_value): """Call `fn` to infer output shape and dtype.""" try: # First try instantiating all tensors on the `"meta"` device, # which should give a "zero flop" way to trace shape, but does # not have universal support with torch operations. with device_scope("meta"): meta_args, meta_kwargs = tree.map_structure( lambda x: convert_keras_tensor_to_torch(x, fill_value), (args, kwargs), ) return fn(*meta_args, **meta_kwargs) except: with device_scope(DEFAULT_DEVICE): # If the `"meta"` device placement fails, fall back to tracing # eagerly with tensors on the default device. This will be # more robust, but more expensive. eager_args, eager_kwargs = tree.map_structure( lambda x: convert_keras_tensor_to_torch(x, fill_value), (args, kwargs), ) return fn(*eager_args, **eager_kwargs) with StatelessScope(), SymbolicScope(), torch.no_grad(): outputs = symbolic_call(fn, args, kwargs, fill_value=83) none_in_shape = any( builtins.map(has_none_shape, tree.flatten((args, kwargs))) ) if none_in_shape: outputs_1 = outputs outputs_2 = symbolic_call(fn, args, kwargs, fill_value=89) flat_out_1 = tree.flatten(outputs_1) flat_out_2 = tree.flatten(outputs_2) flat_out = [] for x1, x2 in zip(flat_out_1, flat_out_2): shape = list(x1.shape) for i, e in enumerate(x2.shape): if e != shape[i]: shape[i] = None flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype))) outputs = tree.pack_sequence_as(outputs_1, flat_out) output_spec = tree.map_structure(convert_torch_to_keras_tensor, outputs) return output_spec def cond(pred, true_fn, false_fn): # When symbolic execution, take pred as true. if get_device() == "meta": return true_fn() if pred: return true_fn() return false_fn() def vectorized_map(function, elements): return torch.vmap(function)(elements) def map(f, xs): def g(_, x): return (), f(x) _, ys = scan(g, (), xs) return ys def scan(f, init, xs=None, length=None, reverse=False, unroll=1): # Ref: jax.lax.scan if not callable(f): raise TypeError(f"`f` should be a callable. Received: f={f}") if not isinstance(unroll, bool): if not isinstance(unroll, int) or unroll < 1: raise ValueError( "`unroll` must be an positive integer or boolean. " f"Received: unroll={unroll}" ) if xs is None and length is None: raise ValueError("Got no `xs` to scan over and `length` not provided.") input_is_sequence = tree.is_nested(xs) output_is_sequence = tree.is_nested(init) def pack_input(x): return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0] def pack_output(x): return tree.pack_sequence_as(init, x) if output_is_sequence else x[0] if xs is None: xs_flat = [] n = int(length) else: xs_flat = tree.flatten(xs) xs_flat = [convert_to_tensor(elem) for elem in xs_flat] n = int(length) if length is not None else shape(xs_flat[0])[0] init_flat = tree.flatten(init) init_flat = [convert_to_tensor(init) for init in init_flat] init = pack_output(init_flat) dummy_y = [torch.zeros_like(init) for init in init_flat] carry = init ys = [] maybe_reversed = reversed if reverse else lambda x: x for i in maybe_reversed(range(n)): xs_slice = [x[i] for x in xs_flat] packed_xs = pack_input(xs_slice) if len(xs_slice) > 0 else None carry, y = f(carry, packed_xs) ys.append(y if y is not None else dummy_y) stacked_y = tree.map_structure( lambda *ys: torch.stack(ys), *maybe_reversed(ys) ) return carry, stacked_y def associative_scan(f, elems, reverse=False, axis=0): # Ref: jax.lax.associative_scan if not callable(f): raise TypeError(f"`f` should be a callable. Received: f={f}") elems_flat = tree.flatten(elems) elems_flat = [convert_to_tensor(elem) for elem in elems_flat] if reverse: elems_flat = [torch.flip(elem, (axis,)) for elem in elems_flat] def _combine(a_flat, b_flat): a_flat = [convert_to_tensor(a) for a in a_flat] b_flat = [convert_to_tensor(b) for b in b_flat] a = tree.pack_sequence_as(elems, a_flat) b = tree.pack_sequence_as(elems, b_flat) c = f(a, b) c_flat = tree.flatten(c) return c_flat num_elems = int(elems_flat[0].shape[axis]) if not all(int(elem.shape[axis]) == num_elems for elem in elems_flat[1:]): raise ValueError( "Array inputs to associative_scan must have the same " "first dimension. (saw: {})".format( [elem.shape for elem in elems_flat] ) ) def _interleave(a, b, axis): """Given two Tensors of static shape, interleave them along axis.""" assert ( a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1 ) # we want to get a: [a1, a2], b: [b1, b2] # to a: [a1, 0, a2, 0], b: [0, b1, 0, b2] a_shape = list(a.shape) a_shape[axis] = a.shape[axis] * 2 - 1 b_shape = list(b.shape) b_shape[axis] = b.shape[axis] * 2 - 1 a_dil = torch.zeros(a_shape) slice_along_axis(a_dil, 0, None, 2, axis).copy_(a) b_dil = torch.zeros(b_shape) slice_along_axis(b_dil, 0, None, 2, axis).copy_(b) a_pad = [[0, 0] for _ in range(a.dim())] a_pad[axis][-1] = 1 if a.shape[axis] == b.shape[axis] else 0 a_pad = a_pad[::-1] a_pad = tree.flatten(a_pad) b_pad = [[0, 0] for _ in range(b.dim())] b_pad[axis] = [1, 0] if a.shape[axis] == b.shape[axis] else [1, 1] b_pad = b_pad[::-1] b_pad = tree.flatten(b_pad) op = torch.bitwise_or if a.dtype == torch.bool else torch.add return op( torch.nn.functional.pad(a_dil, a_pad), torch.nn.functional.pad(b_dil, b_pad), ) def _scan(elems): num_elems = elems[0].shape[axis] if num_elems < 2: return elems reduced_elems = _combine( [ slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems ], [ slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems ], ) odd_elems = _scan(reduced_elems) if num_elems % 2 == 0: even_elems = _combine( [slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems], [ slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems ], ) else: even_elems = _combine( odd_elems, [ slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems ], ) even_elems = [ torch.cat( [slice_along_axis(elem, 0, 1, axis=axis), result], dim=axis, ) for (elem, result) in zip(elems, even_elems) ] return list( builtins.map( functools.partial(_interleave, axis=axis), even_elems, odd_elems ) ) scans = _scan(elems_flat) if reverse: scans = [torch.flip(scanned, (axis,)) for scanned in scans] return tree.pack_sequence_as(elems, scans) def scatter(indices, values, shape): indices = convert_to_tensor(indices) values = convert_to_tensor(values) zeros = torch.zeros(shape, dtype=values.dtype, device=get_device()) index_length = indices.shape[-1] value_shape = shape[index_length:] indices = torch.reshape(indices, [-1, index_length]) values = torch.reshape(values, [-1] + list(value_shape)) for i in range(indices.shape[0]): index = indices[i] zeros[tuple(index)] += values[i] return zeros def scatter_update(inputs, indices, updates): inputs = convert_to_tensor(inputs) indices = convert_to_tensor(indices, dtype="int64") updates = convert_to_tensor(updates, dtype=inputs.dtype) indices = torch.transpose(indices, 0, 1) outputs = torch.clone(inputs) outputs[tuple(indices)] = updates return outputs def slice(inputs, start_indices, shape): shape_dtype = to_torch_dtype("int64") inputs = convert_to_tensor(inputs) start_indices = convert_to_tensor(start_indices).to(shape_dtype) shape = convert_to_tensor(shape).to(shape_dtype) python_slice = __builtins__["slice"] slices = [ python_slice(start_index, start_index + length) for start_index, length in zip(start_indices, shape) ] return inputs[slices] def slice_update(inputs, start_indices, updates): shape_dtype = to_torch_dtype("int64") inputs = convert_to_tensor(inputs) start_indices = convert_to_tensor(start_indices).to(shape_dtype) updates = convert_to_tensor(updates) python_slice = __builtins__["slice"] slices = [ python_slice(start_index, start_index + update_length) for start_index, update_length in zip(start_indices, updates.shape) ] outputs = torch.clone(inputs) outputs[slices] = updates return outputs def switch(index, branches, *operands): index = convert_to_tensor(index, "int32") index = torch.clamp(index, 0, len(branches) - 1) return branches[index](*operands) def while_loop( cond, body, loop_vars, maximum_iterations=None, ): current_iter = 0 iteration_check = ( lambda iter: maximum_iterations is None or iter < maximum_iterations ) is_tuple = isinstance(loop_vars, (tuple, list)) loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,) loop_vars = tree.map_structure(convert_to_tensor, loop_vars) while cond(*loop_vars) and iteration_check(current_iter): loop_vars = body(*loop_vars) if not isinstance(loop_vars, (list, tuple)): loop_vars = (loop_vars,) loop_vars = tuple(loop_vars) current_iter += 1 return loop_vars if is_tuple else loop_vars[0] def fori_loop(lower, upper, body_fun, init_val): val = init_val for i in range(lower, upper): val = body_fun(i, val) return val def stop_gradient(variable): if isinstance(variable, Variable): variable = variable.value # We can't use `.requires_grad_(False)` here since it only # works when the tensor is a leaf node in the graph. return variable.detach() def unstack(x, num=None, axis=0): return x.unbind(axis) def random_seed_dtype(): # uint32 doesn't exist in torch, use int32 instead. return "int32" def remat(f): """Implementation of rematerialization. Args: f: The function or operation to rematerialize. Returns: A function wrapping f that defines a custom gradient, which recomputes f on the backwards pass of a gradient call. """ def wrapped(*args, **kwargs): return torch.utils.checkpoint.checkpoint(f, *args, use_reentrant=False) return wrapped
Variable
python
jmcnamara__XlsxWriter
xlsxwriter/test/workbook/test_custom_sheet.py
{ "start": 1375, "end": 1966 }
class ____(unittest.TestCase): """ Test the Workbook _check_sheetname() method. """ def setUp(self): self.workbook = MyWorkbook() def tearDown(self): self.workbook.fileclosed = 1 def test_check_chartsheet(self): """Test the _check_sheetname() method""" sheet = self.workbook.add_chartsheet() assert isinstance(sheet, MyChartsheet) def test_check_worksheet(self): """Test the _check_sheetname() method""" sheet = self.workbook.add_worksheet() assert isinstance(sheet, MyWorksheet)
TestCustomWorkBook
python
allegroai__clearml
clearml/utilities/pyhocon/config_tree.py
{ "start": 16474, "end": 16789 }
class ____(list): def __init__(self, iterable=[]): new_list = list(iterable) super(ConfigList, self).__init__(new_list) for index, value in enumerate(new_list): if isinstance(value, ConfigValues): value.parent = self value.key = index
ConfigList
python
kamyu104__LeetCode-Solutions
Python/excel-sheet-column-title.py
{ "start": 32, "end": 321 }
class ____(object): def convertToTitle(self, n): """ :type n: int :rtype: str """ result = [] while n: result += chr((n-1)%26 + ord('A')) n = (n-1)//26 result.reverse() return "".join(result)
Solution
python
pytorch__pytorch
torch/testing/_internal/common_dist_composable.py
{ "start": 98, "end": 518 }
class ____(nn.Module): def __init__(self, device: torch.device): super().__init__() self.l1 = nn.Linear(100, 100, device=device) self.seq = nn.Sequential( nn.ReLU(), nn.Linear(100, 100, device=device), nn.ReLU(), ) self.l2 = nn.Linear(100, 100, device=device) def forward(self, x): return self.l2(self.seq(self.l1(x)))
UnitModule
python
apache__airflow
providers/google/tests/unit/google/cloud/transfers/test_sql_to_gcs.py
{ "start": 2820, "end": 20358 }
class ____: @pytest.mark.db_test @mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.NamedTemporaryFile") @mock.patch("csv.writer") @mock.patch.object(GCSHook, "upload") @mock.patch.object(DummySQLToGCSOperator, "query") @mock.patch.object(DummySQLToGCSOperator, "convert_type") def test_exec(self, mock_convert_type, mock_query, mock_upload, mock_writer, mock_tempfile): cursor_mock = Mock() cursor_mock.description = CURSOR_DESCRIPTION cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) mock_query.return_value = cursor_mock mock_convert_type.return_value = "convert_type_return_value" mock_file = mock_tempfile.return_value mock_file.tell.return_value = 3 mock_file.name = TMP_FILE_NAME # Test CSV operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, approx_max_file_size_bytes=1, export_format="csv", gzip=True, schema=SCHEMA, gcp_conn_id="google_cloud_default", upload_metadata=True, ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 3, "files": [ {"file_name": "test_results_0.csv", "file_mime_type": "text/csv", "file_row_count": 1}, {"file_name": "test_results_1.csv", "file_mime_type": "text/csv", "file_row_count": 1}, {"file_name": "test_results_2.csv", "file_mime_type": "text/csv", "file_row_count": 1}, ], } mock_query.assert_called_once() assert mock_writer.return_value.writerow.call_args_list == [ mock.call(COLUMNS), mock.call(ROW), mock.call(COLUMNS), mock.call(ROW), mock.call(COLUMNS), mock.call(ROW), mock.call(COLUMNS), ] mock_file.flush.assert_has_calls([mock.call(), mock.call(), mock.call(), mock.call()]) csv_calls = [] for i in range(3): csv_calls.append( mock.call( BUCKET, FILENAME.format(i), TMP_FILE_NAME, mime_type="text/csv", gzip=True, metadata={"row_count": 1}, ) ) json_call = mock.call( BUCKET, SCHEMA_FILE, TMP_FILE_NAME, mime_type=APP_JSON, gzip=False, metadata=None ) upload_calls = [json_call, csv_calls[0], csv_calls[1], csv_calls[2]] mock_upload.assert_has_calls(upload_calls) mock_file.close.assert_has_calls([mock.call(), mock.call(), mock.call(), mock.call()]) mock_query.reset_mock() mock_file.flush.reset_mock() mock_upload.reset_mock() mock_file.close.reset_mock() cursor_mock.reset_mock() cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) # Test JSON operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="json", schema=SCHEMA ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 1, "files": [ {"file_name": "test_results_0.csv", "file_mime_type": "application/json", "file_row_count": 3} ], } mock_query.assert_called_once() mock_file.write.call_args_list == [ mock.call(OUTPUT_DATA), mock.call(b"\n"), mock.call(OUTPUT_DATA), mock.call(b"\n"), mock.call(OUTPUT_DATA), mock.call(b"\n"), ] mock_upload.assert_called_once_with( BUCKET, FILENAME.format(0), TMP_FILE_NAME, mime_type=APP_JSON, gzip=False, metadata=None ) mock_file.close.assert_called_once() mock_query.reset_mock() mock_file.flush.reset_mock() mock_upload.reset_mock() mock_file.close.reset_mock() mock_file.write.reset_mock() cursor_mock.reset_mock() cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) # Test Metadata Upload operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="json", schema=SCHEMA, upload_metadata=True, ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 1, "files": [ {"file_name": "test_results_0.csv", "file_mime_type": "application/json", "file_row_count": 3} ], } mock_query.assert_called_once() mock_file.write.call_args_list == [ mock.call(OUTPUT_DATA), mock.call(b"\n"), mock.call(OUTPUT_DATA), mock.call(b"\n"), mock.call(OUTPUT_DATA), mock.call(b"\n"), ] mock_file.flush.assert_called_once() mock_upload.assert_called_once_with( BUCKET, FILENAME.format(0), TMP_FILE_NAME, mime_type=APP_JSON, gzip=False, metadata={"row_count": 3}, ) mock_file.close.assert_called_once() mock_query.reset_mock() mock_file.flush.reset_mock() mock_upload.reset_mock() mock_file.close.reset_mock() cursor_mock.reset_mock() cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) # Test parquet operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="parquet", schema=SCHEMA ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 1, "files": [ { "file_name": "test_results_0.csv", "file_mime_type": "application/octet-stream", "file_row_count": 3, } ], } mock_query.assert_called_once() mock_file.flush.assert_called_once() mock_upload.assert_called_once_with( BUCKET, FILENAME.format(0), TMP_FILE_NAME, mime_type="application/octet-stream", gzip=False, metadata=None, ) mock_file.close.assert_called_once() mock_query.reset_mock() mock_file.flush.reset_mock() mock_upload.reset_mock() mock_file.close.reset_mock() cursor_mock.reset_mock() cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) # Test partition columns operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="parquet", schema=SCHEMA, partition_columns=PARTITION_COLUMNS, ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 3, "files": [ { "file_name": "test_results_0.csv", "file_mime_type": "application/octet-stream", "file_row_count": 1, }, { "file_name": "test_results_1.csv", "file_mime_type": "application/octet-stream", "file_row_count": 1, }, { "file_name": "test_results_2.csv", "file_mime_type": "application/octet-stream", "file_row_count": 1, }, ], } mock_query.assert_called_once() assert mock_file.flush.call_count == 3 assert mock_file.close.call_count == 3 mock_upload.assert_has_calls( [ mock.call( BUCKET, f"column_b={row[1]}/column_c={row[2]}/test_results_{i}.csv", TMP_FILE_NAME, mime_type="application/octet-stream", gzip=False, metadata=None, ) for i, row in enumerate(INPUT_DATA) ] ) mock_query.reset_mock() mock_file.flush.reset_mock() mock_upload.reset_mock() mock_file.close.reset_mock() cursor_mock.reset_mock() cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) # Test null marker cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA)) mock_convert_type.return_value = None operator = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="csv", null_marker="NULL", ) result = operator.execute(context=dict()) assert result == { "bucket": "TEST-BUCKET-1", "total_row_count": 3, "total_files": 1, "files": [{"file_name": "test_results_0.csv", "file_mime_type": "text/csv", "file_row_count": 3}], } mock_writer.return_value.writerow.assert_has_calls( [ mock.call(COLUMNS), mock.call(["NULL", "NULL", "NULL"]), mock.call(["NULL", "NULL", "NULL"]), mock.call(["NULL", "NULL", "NULL"]), ] ) def test__write_local_data_files_csv(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="csv", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", ) cursor = MagicMock() cursor.__iter__.return_value = INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_csv(file.name) assert df.equals(OUTPUT_DF) def test__write_local_data_files_json(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="json", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", ) cursor = MagicMock() cursor.__iter__.return_value = INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_json(file.name, orient="records", lines=True) assert df.equals(OUTPUT_DF) def test__write_local_data_files_parquet(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="parquet", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", ) cursor = MagicMock() cursor.__iter__.return_value = INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_parquet(file.name) assert df.equals(OUTPUT_DF) def test__write_local_data_files_parquet_with_row_size(self): import math import pyarrow.parquet as pq op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="parquet", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", parquet_row_group_size=8, ) input_data = INPUT_DATA * 10 output_df = pd.DataFrame([["convert_type_return_value"] * 3] * 30, columns=COLUMNS) cursor = MagicMock() cursor.__iter__.return_value = input_data cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_parquet(file.name) assert df.equals(output_df) parquet_file = pq.ParquetFile(file.name) assert parquet_file.num_row_groups == math.ceil((len(INPUT_DATA) * 10) / op.parquet_row_group_size) tolerance = 1 for i in range(parquet_file.num_row_groups): row_group_size = parquet_file.metadata.row_group(i).num_rows assert row_group_size == op.parquet_row_group_size or (tolerance := tolerance - 1) >= 0 def test__write_local_data_files_json_with_exclude_columns(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="json", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", exclude_columns=EXCLUDE_COLUMNS, ) cursor = MagicMock() cursor.__iter__.return_value = INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_json(file.name, orient="records", lines=True) assert df.equals(OUTPUT_DF_WITH_EXCLUDE_COLUMNS) def test__write_local_data_files_parquet_with_partition_columns(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="parquet", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", partition_columns=PARTITION_COLUMNS, ) cursor = MagicMock() cursor.__iter__.return_value = INPUT_DATA cursor.description = CURSOR_DESCRIPTION local_data_files = op._write_local_data_files(cursor) concat_dfs = [] for local_data_file in local_data_files: file = local_data_file["file_handle"] file.flush() df = pd.read_parquet(file.name) concat_dfs.append(df) concat_df = pd.concat(concat_dfs, ignore_index=True) assert concat_df.equals(OUTPUT_DF) def test__write_local_data_files_csv_does_not_write_on_empty_rows(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="csv", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", ) cursor = MagicMock() cursor.__iter__.return_value = EMPTY_INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) # Raises StopIteration when next is called because generator returns no files with pytest.raises(StopIteration): next(files) def test__write_local_data_files_csv_writes_empty_file_with_write_on_empty(self): op = DummySQLToGCSOperator( sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, schema_filename=SCHEMA_FILE, export_format="csv", gzip=False, schema=SCHEMA, gcp_conn_id="google_cloud_default", write_on_empty=True, ) cursor = MagicMock() cursor.__iter__.return_value = EMPTY_INPUT_DATA cursor.description = CURSOR_DESCRIPTION files = op._write_local_data_files(cursor) file = next(files)["file_handle"] file.flush() df = pd.read_csv(file.name) assert len(df.index) == 0 @pytest.mark.parametrize( ("filename", "expected_name"), ( ("file_{}.csv", "/"), ("dir/file_{}.csv", "dir"), ("{}.csv", "/"), ("file.csv", "file.csv"), ("dir/file.csv", "dir/file.csv"), ), ) def test__get_openlineage_output_datasets(self, filename, expected_name): op = DummySQLToGCSOperator( task_id=TASK_ID, sql="SELECT * FROM a.b", bucket="my-bucket", filename=filename ) result = op._get_openlineage_output_datasets() assert len(result) == 1 assert result[0].namespace == "gs://my-bucket" assert result[0].name == expected_name
TestBaseSQLToGCSOperator
python
falconry__falcon
falcon/routing/converters.py
{ "start": 1036, "end": 2168 }
class ____(metaclass=abc.ABCMeta): """Abstract base class for URI template field converters.""" CONSUME_MULTIPLE_SEGMENTS: ClassVar[bool] = False """When set to ``True`` it indicates that this converter will consume multiple URL path segments. Currently a converter with ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template effectively meaning that it will consume all of the remaining URL path segments. """ @abc.abstractmethod def convert(self, value: str) -> Any: """Convert a URI template field value to another format or type. Args: value (str or list[str]): Original string to convert. If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a list of strings containing the path segments matched by the converter. Returns: object: Converted field value, or ``None`` if the field can not be converted. """ def _consumes_multiple_segments(converter: object) -> bool: return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)
BaseConverter
python
pytorch__pytorch
test/distributed/elastic/metrics/api_test.py
{ "start": 893, "end": 1026 }
class ____(Parent): # need to decorate the implementation not the abstract method! @prof def func(self): pass
Child
python
ansible__ansible
lib/ansible/_internal/_ssh/_ssh_agent.py
{ "start": 4963, "end": 5263 }
class ____(bytes, VariableSized): def to_blob(self) -> bytes: if length := len(self): return uint32(length).to_blob() + self else: return b"" @classmethod def from_blob(cls, blob: memoryview | bytes) -> t.Self: return cls(blob)
binary_string
python
Farama-Foundation__Gymnasium
gymnasium/spaces/multi_binary.py
{ "start": 293, "end": 6565 }
class ____(Space[NDArray[np.int8]]): """An n-shape binary space. Elements of this space are binary arrays of a shape that is fixed during construction. Example: >>> from gymnasium.spaces import MultiBinary >>> observation_space = MultiBinary(5, seed=42) >>> observation_space.sample() array([1, 0, 1, 0, 1], dtype=int8) >>> observation_space = MultiBinary([3, 2], seed=42) >>> observation_space.sample() array([[1, 0], [1, 0], [1, 1]], dtype=int8) """ def __init__( self, n: NDArray[np.integer[Any]] | Sequence[int] | int, seed: int | np.random.Generator | None = None, ): """Constructor of :class:`MultiBinary` space. Args: n: This will fix the shape of elements of the space. It can either be an integer (if the space is flat) or some sort of sequence (tuple, list or np.ndarray) if there are multiple axes. seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. """ if isinstance(n, int): self.n = n = int(n) input_n = (n,) assert (np.asarray(input_n) > 0).all() # n (counts) have to be positive elif isinstance(n, (Sequence, np.ndarray)): self.n = input_n = tuple(int(i) for i in n) assert (np.asarray(input_n) > 0).all() # n (counts) have to be positive else: raise ValueError( f"Expected n to be an int or a sequence of ints, actual type: {type(n)}" ) super().__init__(input_n, np.int8, seed) @property def shape(self) -> tuple[int, ...]: """Has stricter type than gym.Space - never None.""" return self._shape # type: ignore @property def is_np_flattenable(self): """Checks whether this space can be flattened to a :class:`spaces.Box`.""" return True def sample( self, mask: MaskNDArray | None = None, probability: MaskNDArray | None = None ) -> NDArray[np.int8]: """Generates a single random sample from this space. A sample is drawn by independent, fair coin tosses (one toss per binary variable of the space). Args: mask: An optional ``np.ndarray`` to mask samples with expected shape of ``space.shape``. For ``mask == 0`` then the samples will be ``0``, for a ``mask == 1`` then the samples will be ``1``. For random samples, using a mask value of ``2``. The expected mask shape is the space shape and mask dtype is ``np.int8``. probability: An optional ``np.ndarray`` to mask samples with expected shape of space.shape where each element represents the probability of the corresponding sample element being a 1. The expected mask shape is the space shape and mask dtype is ``np.float64``. Returns: Sampled values from space """ if mask is not None and probability is not None: raise ValueError( f"Only one of `mask` or `probability` can be provided, actual values: mask={mask}, probability={probability}" ) if mask is not None: assert isinstance( mask, np.ndarray ), f"The expected type of the mask is np.ndarray, actual type: {type(mask)}" assert ( mask.dtype == np.int8 ), f"The expected dtype of the mask is np.int8, actual dtype: {mask.dtype}" assert ( mask.shape == self.shape ), f"The expected shape of the mask is {self.shape}, actual shape: {mask.shape}" assert np.all( (mask == 0) | (mask == 1) | (mask == 2) ), f"All values of a mask should be 0, 1 or 2, actual values: {mask}" return np.where( mask == 2, self.np_random.integers(low=0, high=2, size=self.n, dtype=self.dtype), mask.astype(self.dtype), ) elif probability is not None: assert isinstance( probability, np.ndarray ), f"The expected type of the probability is np.ndarray, actual type: {type(probability)}" assert ( probability.dtype == np.float64 ), f"The expected dtype of the probability is np.float64, actual dtype: {probability.dtype}" assert ( probability.shape == self.shape ), f"The expected shape of the probability is {self.shape}, actual shape: {probability}" assert np.all( np.logical_and(probability >= 0, probability <= 1) ), f"All values of the sample probability should be between 0 and 1, actual values: {probability}" return (self.np_random.random(size=self.shape) <= probability).astype( self.dtype ) else: return self.np_random.integers(low=0, high=2, size=self.n, dtype=self.dtype) def contains(self, x: Any) -> bool: """Return boolean specifying if x is a valid member of this space.""" if isinstance(x, Sequence): x = np.array(x) # Promote list to array for contains check return bool( isinstance(x, np.ndarray) and self.shape == x.shape and np.all(np.logical_or(x == 0, x == 1)) ) def to_jsonable(self, sample_n: Sequence[NDArray[np.int8]]) -> list[Sequence[int]]: """Convert a batch of samples from this space to a JSONable data type.""" return np.array(sample_n).tolist() def from_jsonable(self, sample_n: list[Sequence[int]]) -> list[NDArray[np.int8]]: """Convert a JSONable data type to a batch of samples from this space.""" return [np.asarray(sample, self.dtype) for sample in sample_n] def __repr__(self) -> str: """Gives a string representation of this space.""" return f"MultiBinary({self.n})" def __eq__(self, other: Any) -> bool: """Check whether `other` is equivalent to this instance.""" return isinstance(other, MultiBinary) and self.n == other.n
MultiBinary
python
PyCQA__bandit
tests/unit/formatters/test_sarif.py
{ "start": 341, "end": 4983 }
class ____(testtools.TestCase): def setUp(self): super().setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.context = { "filename": self.tmp_fname, "lineno": 4, "linerange": [4], "code": ( "import socket\n\n" "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n" "s.bind(('0.0.0.0', 31137))" ), } self.check_name = "hardcoded_bind_all_interfaces" self.issue = issue.Issue( severity=bandit.MEDIUM, cwe=issue.Cwe.MULTIPLE_BINDS, confidence=bandit.MEDIUM, text="Possible binding to all interfaces.", test_id="B104", ) self.candidates = [ issue.Issue( issue.Cwe.MULTIPLE_BINDS, bandit.LOW, bandit.LOW, "Candidate A", lineno=1, ), issue.Issue( bandit.HIGH, issue.Cwe.MULTIPLE_BINDS, bandit.HIGH, "Candiate B", lineno=2, ), ] self.manager.out_file = self.tmp_fname self.issue.fname = self.context["filename"] self.issue.lineno = self.context["lineno"] self.issue.linerange = self.context["linerange"] self.issue.code = self.context["code"] self.issue.test = self.check_name self.manager.results.append(self.issue) self.manager.metrics = metrics.Metrics() # mock up the metrics for key in ["_totals", "binding.py"]: self.manager.metrics.data[key] = {"loc": 4, "nosec": 2} for criteria, default in constants.CRITERIA: for rank in constants.RANKING: self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0 @mock.patch("bandit.core.manager.BanditManager.get_issue_list") def test_report(self, get_issue_list): self.manager.files_list = ["binding.py"] self.manager.scores = [ { "SEVERITY": [0] * len(constants.RANKING), "CONFIDENCE": [0] * len(constants.RANKING), } ] get_issue_list.return_value = collections.OrderedDict( [(self.issue, self.candidates)] ) with open(self.tmp_fname, "w") as tmp_file: sarif.report( self.manager, tmp_file, self.issue.severity, self.issue.confidence, ) with open(self.tmp_fname) as f: data = json.loads(f.read()) run = data["runs"][0] self.assertEqual(sarif.SCHEMA_URI, data["$schema"]) self.assertEqual(sarif.SCHEMA_VER, data["version"]) driver = run["tool"]["driver"] self.assertEqual("Bandit", driver["name"]) self.assertEqual(bandit.__author__, driver["organization"]) self.assertEqual(bandit.__version__, driver["semanticVersion"]) self.assertEqual("B104", driver["rules"][0]["id"]) self.assertEqual(self.check_name, driver["rules"][0]["name"]) self.assertIn("security", driver["rules"][0]["properties"]["tags"]) self.assertIn( "external/cwe/cwe-605", driver["rules"][0]["properties"]["tags"], ) self.assertEqual( "medium", driver["rules"][0]["properties"]["precision"] ) invocation = run["invocations"][0] self.assertTrue(invocation["executionSuccessful"]) self.assertIsNotNone(invocation["endTimeUtc"]) result = run["results"][0] # If the level is "warning" like in this case, SARIF will remove # from output, as "warning" is the default value. self.assertIsNone(result.get("level")) self.assertEqual(self.issue.text, result["message"]["text"]) physicalLocation = result["locations"][0]["physicalLocation"] self.assertEqual( self.context["linerange"][0], physicalLocation["region"]["startLine"], ) self.assertEqual( self.context["linerange"][0], physicalLocation["region"]["endLine"], ) self.assertIn( self.tmp_fname, physicalLocation["artifactLocation"]["uri"], )
SarifFormatterTests
python
bokeh__bokeh
src/bokeh/sphinxext/_internal/bokeh_color.py
{ "start": 1875, "end": 2752 }
class ____(BokehDirective): has_content = False required_arguments = 1 option_spec = { "module": unchanged, } def run(self): color = self.arguments[0] html = COLOR_DETAIL.render(color=getattr(named, color).to_css(), text=color) node = nodes.raw("", html, format="html") return [node] def setup(app): """ Required Sphinx extension setup function. """ app.add_directive_to_domain("py", "bokeh-color", BokehColorDirective) return PARALLEL_SAFE # ----------------------------------------------------------------------------- # Private API # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Code # -----------------------------------------------------------------------------
BokehColorDirective
python
eventlet__eventlet
eventlet/backdoor.py
{ "start": 292, "end": 891 }
class ____: def __init__(self, f): self.f = f def isatty(self): return True def flush(self): pass def write(self, data, *a, **kw): try: self.f.write(data, *a, **kw) self.f.flush() except OSError as e: if get_errno(e) != errno.EPIPE: raise def readline(self, *a): return self.f.readline(*a).replace('\r\n', '\n') def __getattr__(self, attr): return getattr(self.f, attr) # @@tavis: the `locals` args below mask the built-in function. Should # be renamed.
FileProxy
python
bokeh__bokeh
src/bokeh/events.py
{ "start": 7422, "end": 7587 }
class ____(ConnectionEvent): ''' Announce when a connection to the client has been reconnected. ''' event_name = 'client_reconnected'
ClientReconnected
python
django__django
django/template/base.py
{ "start": 15841, "end": 17121 }
class ____(Lexer): def _tag_re_split_positions(self): last = 0 for match in tag_re.finditer(self.template_string): start, end = match.span() yield last, start yield start, end last = end yield last, len(self.template_string) # This parallels the use of tag_re.split() in Lexer.tokenize(). def _tag_re_split(self): for position in self._tag_re_split_positions(): yield self.template_string[slice(*position)], position def tokenize(self): """ Split a template string into tokens and annotates each token with its start and end position in the source. This is slower than the default lexer so only use it when debug is True. """ # For maintainability, it is helpful if the implementation below can # continue to closely parallel Lexer.tokenize()'s implementation. in_tag = False lineno = 1 result = [] for token_string, position in self._tag_re_split(): if token_string: result.append(self.create_token(token_string, position, lineno, in_tag)) lineno += token_string.count("\n") in_tag = not in_tag return result
DebugLexer
python
viewflow__viewflow
tests/fsm/test_fsm__basics__zero_target.py
{ "start": 57, "end": 119 }
class ____: ZERO = 0 NEW = 1 REMOVED = 2
ReviewState
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/core_models.py
{ "start": 9235, "end": 9721 }
class ____(SharedAssetKwargs): """The attributes of an AssetSpec that can be changed before the AssetsDefinition is created. Typically used by components to allow overriding a default resolution of each AssetSpec. """ key: Optional[ResolvedAssetKey] = None key_prefix: Annotated[ Optional[CoercibleToAssetKeyPrefix], Resolver.default(description="Prefix the existing asset key with the provided value."), ] = None @record
AssetSpecUpdateKwargs
python
numba__numba
numba/tests/test_unsafe_intrinsics.py
{ "start": 5345, "end": 7619 }
class ____(TestCase): def test_zero_count(self): lz = njit(lambda x: leading_zeros(x)) tz = njit(lambda x: trailing_zeros(x)) evens = [2, 42, 126, 128] for T in types.unsigned_domain: self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth) for i in range(T.bitwidth): val = T(2 ** i) self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth) for n in evens: self.assertGreater(tz(T(n)), 0) self.assertEqual(tz(T(n + 1)), 0) for T in types.signed_domain: self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth) for i in range(T.bitwidth - 1): val = T(2 ** i) self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth) self.assertEqual(lz(-val), 0) self.assertEqual(tz(val), tz(-val)) for n in evens: if not T.minval <= n <= T.maxval: continue self.assertGreater(tz(T(n)), 0) self.assertEqual(tz(T(n + 1)), 0) def check_error_msg(self, func): cfunc = njit(lambda *x: func(*x)) func_name = func._name unsupported_types = filter( lambda x: not isinstance(x, types.Integer), types.number_domain ) for typ in sorted(unsupported_types, key=str): with self.assertRaises(TypingError) as e: cfunc(typ(2)) self.assertIn( "{} is only defined for integers, but value passed was '{}'." .format(func_name, typ), str(e.exception), ) # Testing w/ too many/few arguments def check(args, string): with self.assertRaises((TypingError, TypeError)) as e: cfunc(*args) self.assertIn( "{}() ".format(func_name), str(e.exception) ) check((1, 2), "takes 2 positional arguments but 3 were given") check((), "missing 1 required positional argument") def test_trailing_zeros_error(self): self.check_error_msg(trailing_zeros) def test_leading_zeros_error(self): self.check_error_msg(leading_zeros)
TestZeroCounts
python
optuna__optuna
optuna/storages/_grpc/auto_generated/api_pb2_grpc.py
{ "start": 7503, "end": 19470 }
class ____(object): """* Optuna storage service defines APIs to interact with the storage. """ def CreateNewStudy(self, request, context): """* Create a new study. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteStudy(self, request, context): """* Delete a study. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetStudyUserAttribute(self, request, context): """* Set a study's user attribute. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetStudySystemAttribute(self, request, context): """* Set a study's system attribute. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStudyIdFromName(self, request, context): """* Get a study id by its name. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStudyNameFromId(self, request, context): """* Get a study name by its id. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStudyDirections(self, request, context): """* Get study directions. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStudyUserAttributes(self, request, context): """* Get study user attributes. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStudySystemAttributes(self, request, context): """* Get study system attributes. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllStudies(self, request, context): """* Get all studies. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateNewTrial(self, request, context): """* Create a new trial. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetTrialParameter(self, request, context): """* Set a trial parameter. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTrialIdFromStudyIdTrialNumber(self, request, context): """* Get a trial id from its study id and trial number. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetTrialStateValues(self, request, context): """* Set trial state and values. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetTrialIntermediateValue(self, request, context): """* Set a trial intermediate value. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetTrialUserAttribute(self, request, context): """* Set a trial user attribute. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetTrialSystemAttribute(self, request, context): """* Set a trial system attribute. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTrial(self, request, context): """* Get a trial by its ID. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTrials(self, request, context): """* Get trials in a study. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_StorageServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateNewStudy': grpc.unary_unary_rpc_method_handler( servicer.CreateNewStudy, request_deserializer=api__pb2.CreateNewStudyRequest.FromString, response_serializer=api__pb2.CreateNewStudyReply.SerializeToString, ), 'DeleteStudy': grpc.unary_unary_rpc_method_handler( servicer.DeleteStudy, request_deserializer=api__pb2.DeleteStudyRequest.FromString, response_serializer=api__pb2.DeleteStudyReply.SerializeToString, ), 'SetStudyUserAttribute': grpc.unary_unary_rpc_method_handler( servicer.SetStudyUserAttribute, request_deserializer=api__pb2.SetStudyUserAttributeRequest.FromString, response_serializer=api__pb2.SetStudyUserAttributeReply.SerializeToString, ), 'SetStudySystemAttribute': grpc.unary_unary_rpc_method_handler( servicer.SetStudySystemAttribute, request_deserializer=api__pb2.SetStudySystemAttributeRequest.FromString, response_serializer=api__pb2.SetStudySystemAttributeReply.SerializeToString, ), 'GetStudyIdFromName': grpc.unary_unary_rpc_method_handler( servicer.GetStudyIdFromName, request_deserializer=api__pb2.GetStudyIdFromNameRequest.FromString, response_serializer=api__pb2.GetStudyIdFromNameReply.SerializeToString, ), 'GetStudyNameFromId': grpc.unary_unary_rpc_method_handler( servicer.GetStudyNameFromId, request_deserializer=api__pb2.GetStudyNameFromIdRequest.FromString, response_serializer=api__pb2.GetStudyNameFromIdReply.SerializeToString, ), 'GetStudyDirections': grpc.unary_unary_rpc_method_handler( servicer.GetStudyDirections, request_deserializer=api__pb2.GetStudyDirectionsRequest.FromString, response_serializer=api__pb2.GetStudyDirectionsReply.SerializeToString, ), 'GetStudyUserAttributes': grpc.unary_unary_rpc_method_handler( servicer.GetStudyUserAttributes, request_deserializer=api__pb2.GetStudyUserAttributesRequest.FromString, response_serializer=api__pb2.GetStudyUserAttributesReply.SerializeToString, ), 'GetStudySystemAttributes': grpc.unary_unary_rpc_method_handler( servicer.GetStudySystemAttributes, request_deserializer=api__pb2.GetStudySystemAttributesRequest.FromString, response_serializer=api__pb2.GetStudySystemAttributesReply.SerializeToString, ), 'GetAllStudies': grpc.unary_unary_rpc_method_handler( servicer.GetAllStudies, request_deserializer=api__pb2.GetAllStudiesRequest.FromString, response_serializer=api__pb2.GetAllStudiesReply.SerializeToString, ), 'CreateNewTrial': grpc.unary_unary_rpc_method_handler( servicer.CreateNewTrial, request_deserializer=api__pb2.CreateNewTrialRequest.FromString, response_serializer=api__pb2.CreateNewTrialReply.SerializeToString, ), 'SetTrialParameter': grpc.unary_unary_rpc_method_handler( servicer.SetTrialParameter, request_deserializer=api__pb2.SetTrialParameterRequest.FromString, response_serializer=api__pb2.SetTrialParameterReply.SerializeToString, ), 'GetTrialIdFromStudyIdTrialNumber': grpc.unary_unary_rpc_method_handler( servicer.GetTrialIdFromStudyIdTrialNumber, request_deserializer=api__pb2.GetTrialIdFromStudyIdTrialNumberRequest.FromString, response_serializer=api__pb2.GetTrialIdFromStudyIdTrialNumberReply.SerializeToString, ), 'SetTrialStateValues': grpc.unary_unary_rpc_method_handler( servicer.SetTrialStateValues, request_deserializer=api__pb2.SetTrialStateValuesRequest.FromString, response_serializer=api__pb2.SetTrialStateValuesReply.SerializeToString, ), 'SetTrialIntermediateValue': grpc.unary_unary_rpc_method_handler( servicer.SetTrialIntermediateValue, request_deserializer=api__pb2.SetTrialIntermediateValueRequest.FromString, response_serializer=api__pb2.SetTrialIntermediateValueReply.SerializeToString, ), 'SetTrialUserAttribute': grpc.unary_unary_rpc_method_handler( servicer.SetTrialUserAttribute, request_deserializer=api__pb2.SetTrialUserAttributeRequest.FromString, response_serializer=api__pb2.SetTrialUserAttributeReply.SerializeToString, ), 'SetTrialSystemAttribute': grpc.unary_unary_rpc_method_handler( servicer.SetTrialSystemAttribute, request_deserializer=api__pb2.SetTrialSystemAttributeRequest.FromString, response_serializer=api__pb2.SetTrialSystemAttributeReply.SerializeToString, ), 'GetTrial': grpc.unary_unary_rpc_method_handler( servicer.GetTrial, request_deserializer=api__pb2.GetTrialRequest.FromString, response_serializer=api__pb2.GetTrialReply.SerializeToString, ), 'GetTrials': grpc.unary_unary_rpc_method_handler( servicer.GetTrials, request_deserializer=api__pb2.GetTrialsRequest.FromString, response_serializer=api__pb2.GetTrialsReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'optuna.StorageService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) server.add_registered_method_handlers('optuna.StorageService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API.
StorageServiceServicer
python
great-expectations__great_expectations
great_expectations/datasource/fluent/fabric.py
{ "start": 1993, "end": 7416 }
class ____(DataAsset): """Microsoft PowerBI Asset base class.""" _reader_method: ClassVar[FabricReaderMethods] _EXCLUDE_FROM_READER_OPTIONS: ClassVar[Set[str]] = { "batch_definitions", "batch_metadata", "name", "order_by", "type", "id", } @override def test_connection(self) -> None: """ Whatever is needed to test the connection to and/or validity of the asset. This could be a noop. """ LOGGER.debug(f"Testing connection to {self.__class__.__name__} has not been implemented") @override def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]: return [IDDict(batch_request.options)] @override def get_batch(self, batch_request: BatchRequest) -> Batch: self._validate_batch_request(batch_request) reader_options = { "workspace": self._datasource.workspace, "dataset": self._datasource.dataset, **self.dict( exclude=self._EXCLUDE_FROM_READER_OPTIONS, exclude_none=True, exclude_unset=True, by_alias=True, config_provider=self._datasource._config_provider, ), } batch_spec = FabricBatchSpec( reader_method=self._reader_method, reader_options=reader_options ) # TODO: update get_batch_data_and_markers types execution_engine: PandasExecutionEngine = self.datasource.get_execution_engine() data, markers = execution_engine.get_batch_data_and_markers(batch_spec=batch_spec) # batch_definition (along with batch_spec and markers) is only here to satisfy a # legacy constraint when computing usage statistics in a validator. We hope to remove # it in the future. batch_definition = LegacyBatchDefinition( datasource_name=self.datasource.name, data_connector_name=_DATA_CONNECTOR_NAME, data_asset_name=self.name, batch_identifiers=make_batch_identifier(batch_request.options), batch_spec_passthrough=None, ) batch_metadata: BatchMetadata = self._get_batch_metadata_from_batch_request( batch_request=batch_request, ignore_options=("dataframe",) ) return Batch( datasource=self.datasource, data_asset=self, batch_request=batch_request, data=data, metadata=batch_metadata, batch_markers=markers, batch_spec=batch_spec.to_json_dict(), # type: ignore[arg-type] # will be coerced to BatchSpec batch_definition=batch_definition, ) @override def build_batch_request( self, options: Optional[BatchParameters] = None, batch_slice: Optional[BatchSlice] = None, partitioner: Optional[ColumnPartitioner] = None, ) -> BatchRequest: """A batch request that can be used to obtain batches for this DataAsset. Args: options: This is not currently supported and must be {} or None for this data asset. batch_slice: This is not currently supported and must be None for this data asset. partitioner: This is not currently supported and must be None for this data asset. Returns: A BatchRequest object that can be used to obtain a batch from an Asset by calling the get_batch method. """ asset_type_name: str = self.__class__.__name__ if options: raise BuildBatchRequestError( message=f"options is not currently supported for {asset_type_name} " "and must be None or {}." ) if batch_slice is not None: raise BuildBatchRequestError( message=f"batch_slice is not currently supported for {asset_type_name} " "and must be None." ) if partitioner is not None: raise BuildBatchRequestError( message=f"partitioner is not currently supported for {asset_type_name} " "and must be None." ) return BatchRequest( datasource_name=self.datasource.name, data_asset_name=self.name, options={}, ) @override def _validate_batch_request(self, batch_request: BatchRequest) -> None: """Validates the batch_request has the correct form. Args: batch_request: A batch request object to be validated. """ if not ( batch_request.datasource_name == self.datasource.name and batch_request.data_asset_name == self.name and not batch_request.options ): expect_batch_request_form = BatchRequest[None]( datasource_name=self.datasource.name, data_asset_name=self.name, options={}, batch_slice=batch_request._batch_slice_input, # type: ignore[attr-defined] # private attr does exist ) raise gx_exceptions.InvalidBatchRequestError( # noqa: TRY003 # FIXME CoP "BatchRequest should have form:\n" f"{pf(expect_batch_request_form.dict())}\n" f"but actually has form:\n{pf(batch_request.dict())}\n" ) @public_api
_PowerBIAsset
python
getsentry__sentry
src/sentry/utils/sdk_crashes/path_replacer.py
{ "start": 273, "end": 499 }
class ____(PathReplacer): def __init__( self, path: str, ): self.path = path def replace_path(self, path_field: str, path_value: str) -> str | None: return self.path
FixedPathReplacer
python
pandas-dev__pandas
pandas/tests/tseries/offsets/test_fiscal.py
{ "start": 1165, "end": 5001 }
class ____: offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8, weekday=WeekDay.SAT) offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9, weekday=WeekDay.SAT) on_offset_cases = [ # From Wikipedia (see: # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end) (offset_lom_sat_aug, datetime(2006, 8, 26), True), (offset_lom_sat_aug, datetime(2007, 8, 25), True), (offset_lom_sat_aug, datetime(2008, 8, 30), True), (offset_lom_sat_aug, datetime(2009, 8, 29), True), (offset_lom_sat_aug, datetime(2010, 8, 28), True), (offset_lom_sat_aug, datetime(2011, 8, 27), True), (offset_lom_sat_aug, datetime(2012, 8, 25), True), (offset_lom_sat_aug, datetime(2013, 8, 31), True), (offset_lom_sat_aug, datetime(2014, 8, 30), True), (offset_lom_sat_aug, datetime(2015, 8, 29), True), (offset_lom_sat_aug, datetime(2016, 8, 27), True), (offset_lom_sat_aug, datetime(2017, 8, 26), True), (offset_lom_sat_aug, datetime(2018, 8, 25), True), (offset_lom_sat_aug, datetime(2019, 8, 31), True), (offset_lom_sat_aug, datetime(2006, 8, 27), False), (offset_lom_sat_aug, datetime(2007, 8, 28), False), (offset_lom_sat_aug, datetime(2008, 8, 31), False), (offset_lom_sat_aug, datetime(2009, 8, 30), False), (offset_lom_sat_aug, datetime(2010, 8, 29), False), (offset_lom_sat_aug, datetime(2011, 8, 28), False), (offset_lom_sat_aug, datetime(2006, 8, 25), False), (offset_lom_sat_aug, datetime(2007, 8, 24), False), (offset_lom_sat_aug, datetime(2008, 8, 29), False), (offset_lom_sat_aug, datetime(2009, 8, 28), False), (offset_lom_sat_aug, datetime(2010, 8, 27), False), (offset_lom_sat_aug, datetime(2011, 8, 26), False), (offset_lom_sat_aug, datetime(2019, 8, 30), False), # From GMCR (see for example: # http://yahoo.brand.edgar-online.com/Default.aspx? # companyid=3184&formtypeID=7) (offset_lom_sat_sep, datetime(2010, 9, 25), True), (offset_lom_sat_sep, datetime(2011, 9, 24), True), (offset_lom_sat_sep, datetime(2012, 9, 29), True), ] @pytest.mark.parametrize("case", on_offset_cases) def test_is_on_offset(self, case): offset, dt, expected = case assert_is_on_offset(offset, dt, expected) def test_apply(self): offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, weekday=WeekDay.SAT) offset_lom_aug_sat_1 = makeFY5253LastOfMonth( n=1, startingMonth=8, weekday=WeekDay.SAT ) date_seq_lom_aug_sat = [ datetime(2006, 8, 26), datetime(2007, 8, 25), datetime(2008, 8, 30), datetime(2009, 8, 29), datetime(2010, 8, 28), datetime(2011, 8, 27), datetime(2012, 8, 25), datetime(2013, 8, 31), datetime(2014, 8, 30), datetime(2015, 8, 29), datetime(2016, 8, 27), ] tests = [ (offset_lom_aug_sat, date_seq_lom_aug_sat), (offset_lom_aug_sat_1, date_seq_lom_aug_sat), (offset_lom_aug_sat, [datetime(2006, 8, 25)] + date_seq_lom_aug_sat), (offset_lom_aug_sat_1, [datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]), ( makeFY5253LastOfMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_lom_aug_sat)), ), ] for test in tests: offset, data = test current = data[0] for datum in data[1:]: current = current + offset assert current == datum
TestFY5253LastOfMonth
python
spyder-ide__spyder
external-deps/qtconsole/qtconsole/client.py
{ "start": 364, "end": 779 }
class ____(SuperQObject, HBChannel): # A longer timeout than the base class time_to_dead = 3.0 # Emitted when the kernel has died. kernel_died = QtCore.Signal(object) def call_handlers(self, since_last_heartbeat): """ Reimplemented to emit signals instead of making callbacks. """ # Emit the generic signal. self.kernel_died.emit(since_last_heartbeat)
QtHBChannel
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta_not_found_error.py
{ "start": 193, "end": 284 }
class ____(BaseModel): message: str type: Literal["not_found_error"]
BetaNotFoundError
python
readthedocs__readthedocs.org
readthedocs/proxito/middleware.py
{ "start": 1508, "end": 16052 }
class ____(MiddlewareMixin): """The actual middleware we'll be using in prod.""" # None of these need the proxito request middleware (response is needed). # The analytics API isn't listed because it depends on the unresolver, # which depends on the proxito middleware. skip_views = ( "health_check", "search_api", "embed_api", ) def add_proxito_headers(self, request, response): """Add debugging and cache headers to proxito responses.""" project_slug = getattr(request, "path_project_slug", "") version_slug = getattr(request, "path_version_slug", "") path = getattr(response, "proxito_path", "") response["X-RTD-Domain"] = request.get_host() response["X-RTD-Project"] = project_slug if version_slug: response["X-RTD-Version"] = version_slug if path: response["X-RTD-Path"] = path # Include the project & project-version so we can do larger purges if needed cache_tags = [] if project_slug: cache_tags.append(project_slug) if version_slug: cache_tags.append(get_cache_tag(project_slug, version_slug)) if cache_tags: add_cache_tags(response, cache_tags) unresolved_domain = request.unresolved_domain if unresolved_domain: response["X-RTD-Project-Method"] = unresolved_domain.source.name if unresolved_domain.is_from_external_domain: response["X-RTD-Version-Method"] = "domain" else: response["X-RTD-Version-Method"] = "path" def add_user_headers(self, request, response): """ Set specific HTTP headers requested by the user. The headers added come from ``projects.models.HTTPHeader`` associated with the ``Domain`` object. """ unresolved_domain = request.unresolved_domain if unresolved_domain and unresolved_domain.is_from_custom_domain: response_headers = [header.lower() for header in response.headers.keys()] domain = unresolved_domain.domain for http_header in domain.http_headers.all(): if http_header.name.lower() in response_headers: log.error( "Overriding an existing response HTTP header.", http_header=http_header.name, domain=domain.domain, ) log.debug( "Adding custom response HTTP header.", http_header=http_header.name, domain=domain.domain, ) if http_header.only_if_secure_request and not request.is_secure(): continue # HTTP headers here are limited to # ``HTTPHeader.HEADERS_CHOICES`` since adding arbitrary HTTP # headers is potentially dangerous response[http_header.name] = http_header.value def add_hsts_headers(self, request, response): """ Set the Strict-Transport-Security (HSTS) header for docs sites. * For the public domain, set the HSTS header if settings.PUBLIC_DOMAIN_USES_HTTPS * For custom domains, check the HSTS values on the Domain object. The domain object should be saved already in request.domain. """ if not request.is_secure(): # Only set the HSTS header if the request is over HTTPS return response hsts_header_values = [] unresolved_domain = request.unresolved_domain if ( settings.PUBLIC_DOMAIN_USES_HTTPS and unresolved_domain and unresolved_domain.is_from_public_domain ): hsts_header_values = [ "max-age=31536000", "includeSubDomains", "preload", ] elif unresolved_domain and unresolved_domain.is_from_custom_domain: domain = unresolved_domain.domain # TODO: migrate Domains with HSTS set using these fields to # ``HTTPHeader`` and remove this chunk of code from here. if domain.hsts_max_age: hsts_header_values.append(f"max-age={domain.hsts_max_age}") # These other options don't make sense without max_age > 0 if domain.hsts_include_subdomains: hsts_header_values.append("includeSubDomains") if domain.hsts_preload: hsts_header_values.append("preload") if hsts_header_values: # See https://tools.ietf.org/html/rfc6797 response["Strict-Transport-Security"] = "; ".join(hsts_header_values) def add_cache_headers(self, request, response): """Add `Cache-Control: no-cache` header (browser level) for external versions.""" unresolved_domain = request.unresolved_domain if unresolved_domain and unresolved_domain.is_from_external_domain: response["Cache-Control"] = "no-cache" def add_cdn_cache_headers(self, request, response): """ Add Cache-Control headers. If the `CDN-Cache-Control` header isn't already present, set the cache level to public or private, depending if we allow private repos or not. Or if the request was from the `X-RTD-Slug` header, we don't cache the response, since we could be caching a response in another domain. We use ``CDN-Cache-Control``, to control caching at the CDN level only. This doesn't affect caching at the browser level (``Cache-Control``). See https://developers.cloudflare.com/cache/about/cdn-cache-control. """ unresolved_domain = request.unresolved_domain # Never trust projects resolving from the X-RTD-Slug header, # we don't want to cache their content on domains from other # projects, see GHSA-mp38-vprc-7hf5. if unresolved_domain and unresolved_domain.is_from_http_header: private_response(response, force=True) # SECURITY: Return early, we never want to cache this response. return # Mark the response as private or cache it, if it hasn't been marked as so already. if settings.ALLOW_PRIVATE_REPOS: private_response(response, force=False) else: cache_response(response, force=False) def add_x_robots_tag_headers(self, request, response): """Add `X-Robots-Tag: noindex` header for external versions.""" unresolved_domain = request.unresolved_domain if unresolved_domain and unresolved_domain.is_from_external_domain: response["X-Robots-Tag"] = "noindex" def _set_request_attributes(self, request, unresolved_domain): """ Set attributes in the request from the unresolved domain. - Set ``request.unresolved_domain`` to the unresolved domain. """ request.unresolved_domain = unresolved_domain def process_request(self, request): # noqa # Initialize our custom request attributes. request.unresolved_domain = None request.unresolved_url = None skip = any(request.path.startswith(reverse(view)) for view in self.skip_views) if skip: log.debug("Not processing Proxito middleware") return None try: unresolved_domain = unresolver.unresolve_domain_from_request(request) except SuspiciousHostnameError as exc: log.debug("Weird variation on our hostname.", domain=exc.domain) # Raise a contextualized 404 that will be handled by proxito's 404 handler raise DomainDNSHttp404( http_status=400, domain=exc.domain, ) from exc except (InvalidSubdomainError, InvalidExternalDomainError) as exc: log.debug("Invalid project set on the subdomain.") # Raise a contextualized 404 that will be handled by proxito's 404 handler raise ProjectHttp404( domain=exc.domain, ) from exc except InvalidCustomDomainError as exc: # Some person is CNAMEing to us without configuring a domain - 404. log.debug("CNAME 404.", domain=exc.domain) # Raise a contextualized 404 that will be handled by proxito's 404 handler raise DomainDNSHttp404( domain=exc.domain, ) from exc except InvalidXRTDSlugHeaderError as exc: raise SuspiciousOperation("Invalid X-RTD-Slug header.") from exc self._set_request_attributes(request, unresolved_domain) response = self._get_https_redirect(request) if response: return response # Remove multiple slashes from URL's if "//" in request.path: url_parsed = urlparse(request.get_full_path()) clean_path = re.sub("//+", "/", url_parsed.path) new_parsed = url_parsed._replace(path=clean_path) final_url = new_parsed.geturl() # This protects against a couple issues: # * First is a URL like `//` which urlparse will return as a path of '' # * Second is URLs like `//google.com` which urlparse will return as `//google.com` # We make sure there is _always_ a single slash in front to ensure relative redirects, # instead of `//` redirects which are actually alternative domains. final_url = "/" + final_url.lstrip("/") log.debug( "Proxito Slash Redirect.", from_url=request.get_full_path(), to_url=final_url, ) response = redirect(final_url) cache_response(response, cache_tags=[unresolved_domain.project.slug]) return response project = unresolved_domain.project log.debug( "Proxito Project.", project_slug=project.slug, ) return None def add_hosting_integrations_headers(self, request, response): """ Add HTTP headers to communicate to Cloudflare Workers. We have configured Cloudflare Workers to inject the addons and remove the old flyout integration based on HTTP headers. This method uses two different headers for these purposes: - ``X-RTD-Force-Addons``: inject ``readthedocs-addons.js`` and remove old flyout integration (via ``readthedocs-doc-embed.js``). Enabled on all projects by default starting on Oct 7, 2024. """ addons = False project_slug = getattr(request, "path_project_slug", "") if project_slug: addons = AddonsConfig.objects.filter(project__slug=project_slug).first() if addons: if addons.enabled: response["X-RTD-Force-Addons"] = "true" def add_cors_headers(self, request, response): """ Add CORS headers only to files from docs. DocDiff addons requires making a request from ``RTD_EXTERNAL_VERSION_DOMAIN`` to ``PUBLIC_DOMAIN`` to be able to compare both DOMs and show the visual differences. This request needs ``Access-Control-Allow-Origin`` HTTP headers to be accepted by browsers. However, we cannot allow passing credentials, since we don't want cross-origin requests to be able to access private versions. We set this header to `*`, we don't care about the origin of the request. And we don't have the need nor want to allow passing credentials from cross-origin requests. See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin. """ # TODO: se should add these headers to files from docs only, # proxied APIs and other endpoints should not have CORS headers. # These attributes aren't currently set for proxied APIs, but we shuold # find a better way to do this. project_slug = getattr(request, "path_project_slug", "") version_slug = getattr(request, "path_version_slug", "") if project_slug and version_slug: response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = "*" response.headers[ACCESS_CONTROL_ALLOW_METHODS] = "HEAD, OPTIONS, GET" return response def _get_https_redirect(self, request): """ Get a redirect response if the request should be redirected to HTTPS. A request should be redirected to HTTPS if any of the following conditions are met: - It's from a custom domain and the domain has HTTPS enabled. - It's from a public domain, and the public domain uses HTTPS. """ if request.is_secure(): # The request is already HTTPS, so we skip redirecting it. return None unresolved_domain = request.unresolved_domain # HTTPS redirect for custom domains. if unresolved_domain.is_from_custom_domain: domain = unresolved_domain.domain if domain.https: return redirect_to_https(request, project=unresolved_domain.project) return None # HTTPS redirect for public domains. if ( unresolved_domain.is_from_public_domain or unresolved_domain.is_from_external_domain ) and settings.PUBLIC_DOMAIN_USES_HTTPS: return redirect_to_https(request, project=unresolved_domain.project) return None def add_resolver_headers(self, request, response): if request.unresolved_url is not None: # TODO: add more ``X-RTD-Resolver-*`` headers uri_filename = iri_to_uri(request.unresolved_url.filename) header_value = escape(uri_filename) response["X-RTD-Resolver-Filename"] = header_value def process_response(self, request, response): # noqa self.add_proxito_headers(request, response) self.add_cache_headers(request, response) self.add_cdn_cache_headers(request, response) self.add_x_robots_tag_headers(request, response) self.add_hsts_headers(request, response) self.add_user_headers(request, response) self.add_hosting_integrations_headers(request, response) self.add_resolver_headers(request, response) self.add_cors_headers(request, response) return response
ProxitoMiddleware
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/subscription.py
{ "start": 481, "end": 2152 }
class ____(graphene.ObjectType): """The root for all subscriptions to retrieve real-time data from the Dagster instance.""" class Meta: name = "Subscription" pipelineRunLogs = graphene.Field( graphene.NonNull(GraphenePipelineRunLogsSubscriptionPayload), runId=graphene.Argument(graphene.NonNull(graphene.ID)), cursor=graphene.Argument( graphene.String, description=( "A cursor retrieved from the API. Pass 'HEAD' to stream from the current event" " onward." ), ), description="Retrieve real-time event logs after applying a filter on run id and cursor.", ) capturedLogs = graphene.Field( graphene.NonNull(GrapheneCapturedLogs), logKey=graphene.Argument(non_null_list(graphene.String)), cursor=graphene.Argument(graphene.String), description="Retrieve real-time compute logs.", ) locationStateChangeEvents = graphene.Field( graphene.NonNull(GrapheneLocationStateChangeSubscription), description=( "Retrieve real-time events when a location in the workspace undergoes a state change." ), ) def subscribe_pipelineRunLogs(self, graphene_info: ResolveInfo, runId, cursor=None): return gen_events_for_run(graphene_info, runId, cursor) def subscribe_capturedLogs(self, graphene_info: ResolveInfo, logKey, cursor=None): return gen_captured_log_data(graphene_info, logKey, cursor) def subscribe_locationStateChangeEvents(self, graphene_info: ResolveInfo): return gen_location_state_changes(graphene_info)
GrapheneSubscription
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/distributions/special_math_test.py
{ "start": 12670, "end": 12735 }
class ____(NdtrGradientTest): _use_log = True
LogNdtrGradientTest
python
PyCQA__pylint
tests/functional/m/member/member_checks.py
{ "start": 4330, "end": 4421 }
class ____(type): def __getattr__(cls, attr): return attr
MetaWithDynamicGetattr
python
sqlalchemy__sqlalchemy
test/sql/test_metadata.py
{ "start": 97338, "end": 102012 }
class ____(fixtures.TestBase, AssertsCompiledSQL): def test_default_schema_metadata_fk(self): m = MetaData(schema="foo") t1 = Table("t1", m, Column("x", Integer)) t2 = Table("t2", m, Column("x", Integer, ForeignKey("t1.x"))) assert t2.c.x.references(t1.c.x) def test_ad_hoc_schema_equiv_fk(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer), schema="foo") t2 = Table( "t2", m, Column("x", Integer, ForeignKey("t1.x")), schema="foo" ) assert_raises( exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x) ) def test_default_schema_metadata_fk_alt_remote(self): m = MetaData(schema="foo") t1 = Table("t1", m, Column("x", Integer)) t2 = Table( "t2", m, Column("x", Integer, ForeignKey("t1.x")), schema="bar" ) assert t2.c.x.references(t1.c.x) def test_default_schema_metadata_fk_alt_local_raises(self): m = MetaData(schema="foo") t1 = Table("t1", m, Column("x", Integer), schema="bar") t2 = Table("t2", m, Column("x", Integer, ForeignKey("t1.x"))) assert_raises( exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x) ) def test_default_schema_metadata_fk_alt_local(self): m = MetaData(schema="foo") t1 = Table("t1", m, Column("x", Integer), schema="bar") t2 = Table("t2", m, Column("x", Integer, ForeignKey("bar.t1.x"))) assert t2.c.x.references(t1.c.x) @testing.combinations( (schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema"), (schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema"), # note we don't yet support lower-case table() or # lower-case column() for this # ( # schema.CreateTable(table("t", column("q", Integer))), # "CREATE TABLE t (q INTEGER)", # ), ( schema.CreateTable(Table("t", MetaData(), Column("q", Integer))), "CREATE TABLE t (q INTEGER)", ), ( schema.DropTable(Table("t", MetaData(), Column("q", Integer))), "DROP TABLE t", ), ( schema.CreateIndex( Index( "foo", "x", _table=Table("t", MetaData(), Column("x", Integer)), ) ), "CREATE INDEX foo ON t (x)", ), ( schema.DropIndex( Index( "foo", "x", _table=Table("t", MetaData(), Column("x", Integer)), ) ), "DROP INDEX foo", ), (schema.CreateSequence(Sequence("my_seq")), "CREATE SEQUENCE my_seq"), (schema.DropSequence(Sequence("my_seq")), "DROP SEQUENCE my_seq"), ) def test_stringify_schema_elements(self, element, expected): eq_ignore_whitespace(str(element), expected) def test_create_drop_schema(self): self.assert_compile( schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema" ) self.assert_compile( schema.CreateSchema("sa_schema", if_not_exists=True), "CREATE SCHEMA IF NOT EXISTS sa_schema", ) self.assert_compile( schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema" ) self.assert_compile( schema.DropSchema("sa_schema", if_exists=True), "DROP SCHEMA IF EXISTS sa_schema", ) self.assert_compile( schema.DropSchema("sa_schema", cascade=True), "DROP SCHEMA sa_schema CASCADE", ) def test_iteration(self): metadata = MetaData() table1 = Table( "table1", metadata, Column("col1", Integer, primary_key=True), schema="someschema", ) table2 = Table( "table2", metadata, Column("col1", Integer, primary_key=True), Column("col2", Integer, ForeignKey("someschema.table1.col1")), schema="someschema", ) t1 = str(schema.CreateTable(table1).compile(bind=testing.db)) t2 = str(schema.CreateTable(table2).compile(bind=testing.db)) if testing.db.dialect.preparer(testing.db.dialect).omit_schema: assert t1.index("CREATE TABLE table1") > -1 assert t2.index("CREATE TABLE table2") > -1 else: assert t1.index("CREATE TABLE someschema.table1") > -1 assert t2.index("CREATE TABLE someschema.table2") > -1
SchemaTest
python
Lightning-AI__lightning
src/lightning/pytorch/trainer/connectors/data_connector.py
{ "start": 2017, "end": 12317 }
class ____: def __init__(self, trainer: "pl.Trainer"): self.trainer = trainer self._datahook_selector: Optional[_DataHookSelector] = None def on_trainer_init( self, val_check_interval: Optional[Union[int, float, str, timedelta, dict]], reload_dataloaders_every_n_epochs: int, check_val_every_n_epoch: Optional[int], ) -> None: self.trainer.datamodule = None if check_val_every_n_epoch is not None and not isinstance(check_val_every_n_epoch, int): raise MisconfigurationException( f"`check_val_every_n_epoch` should be an integer, found {check_val_every_n_epoch!r}." ) if check_val_every_n_epoch is None and isinstance(val_check_interval, float): raise MisconfigurationException( "`val_check_interval` should be an integer or a time-based duration (str 'DD:HH:MM:SS', " "datetime.timedelta, or dict kwargs for timedelta) when `check_val_every_n_epoch=None`." ) self.trainer.check_val_every_n_epoch = check_val_every_n_epoch if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0): raise MisconfigurationException( f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}." ) self.trainer.reload_dataloaders_every_n_epochs = reload_dataloaders_every_n_epochs def prepare_data(self) -> None: trainer = self.trainer # on multi-gpu jobs we only want to manipulate (download, etc) on node_rank=0, local_rank=0 # or in the case where each node needs to do its own manipulation in which case just local_rank=0 local_rank_zero = trainer.local_rank == 0 global_rank_zero = trainer.local_rank == 0 and trainer.node_rank == 0 datamodule = trainer.datamodule lightning_module = trainer.lightning_module # handle datamodule prepare data: if datamodule is not None and is_overridden("prepare_data", datamodule): prepare_data_per_node = datamodule.prepare_data_per_node with _InfiniteBarrier(): if (prepare_data_per_node and local_rank_zero) or (not prepare_data_per_node and global_rank_zero): call._call_lightning_datamodule_hook(trainer, "prepare_data") # handle lightning module prepare data: if lightning_module is not None and is_overridden("prepare_data", lightning_module): prepare_data_per_node = lightning_module.prepare_data_per_node with _InfiniteBarrier(): if (prepare_data_per_node and local_rank_zero) or (not prepare_data_per_node and global_rank_zero): call._call_lightning_module_hook(trainer, "prepare_data") def attach_data( self, model: "pl.LightningModule", train_dataloaders: Optional[TRAIN_DATALOADERS] = None, val_dataloaders: Optional[EVAL_DATALOADERS] = None, test_dataloaders: Optional[EVAL_DATALOADERS] = None, predict_dataloaders: Optional[EVAL_DATALOADERS] = None, datamodule: Optional["pl.LightningDataModule"] = None, ) -> None: # set up the passed in dataloaders (if needed) self.attach_dataloaders( model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, test_dataloaders=test_dataloaders, predict_dataloaders=predict_dataloaders, ) self.attach_datamodule(model, datamodule=datamodule) # Attach the trainer to the LightningModule model.trainer = self.trainer def attach_dataloaders( self, model: "pl.LightningModule", train_dataloaders: Optional[TRAIN_DATALOADERS] = None, val_dataloaders: Optional[EVAL_DATALOADERS] = None, test_dataloaders: Optional[EVAL_DATALOADERS] = None, predict_dataloaders: Optional[EVAL_DATALOADERS] = None, ) -> None: trainer = self.trainer trainer.fit_loop._combined_loader = None trainer.fit_loop.epoch_loop.val_loop._combined_loader = None trainer.validate_loop._combined_loader = None trainer.test_loop._combined_loader = None trainer.predict_loop._combined_loader = None trainer.fit_loop._data_source.instance = train_dataloaders if train_dataloaders is not None else model trainer.fit_loop.epoch_loop.val_loop._data_source.instance = ( val_dataloaders if val_dataloaders is not None else model ) trainer.validate_loop._data_source.instance = val_dataloaders if val_dataloaders is not None else model trainer.test_loop._data_source.instance = test_dataloaders if test_dataloaders is not None else model trainer.predict_loop._data_source.instance = predict_dataloaders if predict_dataloaders is not None else model def attach_datamodule( self, model: "pl.LightningModule", datamodule: Optional["pl.LightningDataModule"] = None ) -> None: # If we have a datamodule, attach necessary hooks + dataloaders self._datahook_selector = _DataHookSelector(model, datamodule) if datamodule is None: return trainer = self.trainer trainer.fit_loop._data_source.instance = datamodule trainer.fit_loop.epoch_loop.val_loop._data_source.instance = datamodule trainer.validate_loop._data_source.instance = datamodule trainer.test_loop._data_source.instance = datamodule trainer.predict_loop._data_source.instance = datamodule trainer.datamodule = datamodule datamodule.trainer = trainer def _requires_distributed_sampler(self, dataloader: DataLoader) -> bool: return ( self.trainer._accelerator_connector.use_distributed_sampler and self.trainer._accelerator_connector.is_distributed and not isinstance(dataloader.sampler, DistributedSampler) and not has_iterable_dataset(dataloader) ) def _prepare_dataloader(self, dataloader: object, shuffle: bool, mode: RunningStage) -> object: """This function handles the following functionalities: - Injecting a `DistributedDataSamplerWrapper` into the `DataLoader` if on a distributed environment - Wrapping the dataloader based on strategy-specific logic """ # don't do anything if it's not a dataloader if not isinstance(dataloader, DataLoader): return dataloader if ( self._requires_distributed_sampler(dataloader) # sets the distributed sampler or mode == RunningStage.PREDICTING # to track indices for the predictions ): sampler = self._resolve_sampler(dataloader, shuffle=shuffle, mode=mode) return _update_dataloader(dataloader, sampler, mode=mode) return dataloader def _resolve_sampler( self, dataloader: DataLoader, shuffle: bool, mode: Optional[RunningStage] = None ) -> Union[Sampler, Iterable]: if self._requires_distributed_sampler(dataloader): distributed_sampler_kwargs = self.trainer.distributed_sampler_kwargs assert distributed_sampler_kwargs is not None sampler = _get_distributed_sampler( dataloader, shuffle, mode=mode, overfit_batches=self.trainer.overfit_batches, **distributed_sampler_kwargs, ) # update docs too once this is resolved trainer_fn = self.trainer.state.fn if ( isinstance(sampler, DistributedSampler) and sampler.num_replicas > 1 and trainer_fn in (TrainerFn.VALIDATING, TrainerFn.TESTING) ): rank_zero_warn( f"Using `DistributedSampler` with the dataloaders. During `trainer.{trainer_fn.value}()`, it is" " recommended to use `Trainer(devices=1, num_nodes=1)` to ensure each sample/batch gets evaluated" " exactly once. Otherwise, multi-device settings use `DistributedSampler` that replicates" " some samples to make sure all devices have same batch size in case of uneven inputs.", category=PossibleUserWarning, ) return sampler return dataloader.sampler def _get_distributed_sampler( dataloader: DataLoader, shuffle: bool, overfit_batches: Union[int, float], mode: Optional[RunningStage] = None, **kwargs: Any, ) -> DistributedSampler: """This function is used to created the distributed sampler injected within the user DataLoader.""" kwargs["shuffle"] = shuffle and not overfit_batches kwargs.setdefault("seed", int(os.getenv("PL_GLOBAL_SEED", 0))) if mode == RunningStage.PREDICTING: return UnrepeatedDistributedSamplerWrapper(dataloader.sampler, **kwargs) if isinstance(dataloader.sampler, (RandomSampler, SequentialSampler)): return DistributedSampler(dataloader.dataset, **kwargs) return DistributedSamplerWrapper(dataloader.sampler, **kwargs) def _resolve_overfit_batches(combined_loader: CombinedLoader, mode: RunningStage) -> None: """Resolve overfit batches by disabling shuffling. When overfit_batches > 0, this function ensures that sequential sampling is used without shuffling for consistent batches across epochs. Training and validation use different sets of data. """ all_have_sequential_sampler = all( isinstance(dl.sampler, SequentialSampler) for dl in combined_loader.flattened if hasattr(dl, "sampler") ) if all_have_sequential_sampler: return rank_zero_warn( f"You requested to overfit but enabled {mode.dataloader_prefix} dataloader shuffling." f" We are turning off the {mode.dataloader_prefix} dataloader shuffling for you." ) updated = [ _update_dataloader(dl, sampler=SequentialSampler(dl.dataset), mode=mode) if hasattr(dl, "dataset") else dl for dl in combined_loader.flattened ] combined_loader.flattened = updated @dataclass
_DataConnector
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py
{ "start": 967, "end": 1212 }
class ____(str, Enum): """SSL mode for Azure Database for PostgreSQL connections.""" disable = "disable" allow = "allow" prefer = "prefer" require = "require" verify_ca = "verify-ca" verify_full = "verify-full"
SSLMode
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/matchClass1.py
{ "start": 9496, "end": 9527 }
class ____(Parent[T]): ...
Child1
python
getsentry__sentry
src/sentry/integrations/messaging/linkage.py
{ "start": 12880, "end": 14972 }
class ____(LinkageView, ABC): _ALLOWED_ROLES = frozenset(["admin", "manager", "owner"]) @classmethod def is_valid_role(cls, org_member: OrganizationMember) -> bool: return org_member.role in cls._ALLOWED_ROLES @method_decorator(never_cache) def handle(self, request: HttpRequest, signed_params: str) -> HttpResponseBase: if request.method not in ("GET", "POST"): return self.render_error_page( request, status=405, body_text="HTTP 405: Method not allowed" ) try: params = unsign(signed_params, salt=self.salt) except (SignatureExpired, BadSignature) as e: logger.warning("handle.signature_error", exc_info=e) self.capture_metric("failure", tags={"error": str(e)}) return render_to_response( "sentry/integrations/slack/expired-link.html", status=400, request=request, ) integration_id: str = params["integration_id"] slack_id: str = params["slack_id"] organization_id: str | None = params.get("organization_id") integration = integration_service.get_integration( integration_id=integration_id, status=ObjectStatus.ACTIVE ) if integration is None: logger.info( "integration.not_found", extra={ "user_id": request.user.id, "integration_id": integration_id, "slack_id": slack_id, "organization_id": organization_id, }, ) self.capture_metric("failure") return self.render_error_page( request, status=404, body_text="HTTP 404: Could not find the Slack integration." ) return self.execute(request, integration, params) @abstractmethod def execute( self, request: HttpRequest, integration: RpcIntegration, params: Mapping[str, Any] ) -> HttpResponseBase: raise NotImplementedError
TeamLinkageView