language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 4776,
"end": 4997
} | class ____(BaseModel):
model_config = {
"extra": "allow",
"json_schema_extra": {
"$ref": create_definition_ref("io.k8s.api.core.v1.ResourceRequirements")
},
}
| ResourceRequirements |
python | scrapy__scrapy | scrapy/extensions/feedexport.py | {
"start": 6174,
"end": 8831
} | class ____(BlockingFeedStorage):
def __init__(
self,
uri: str,
access_key: str | None = None,
secret_key: str | None = None,
acl: str | None = None,
endpoint_url: str | None = None,
*,
feed_options: dict[str, Any] | None = None,
session_token: str | None = None,
region_name: str | None = None,
):
try:
import boto3.session # noqa: PLC0415
except ImportError:
raise NotConfigured("missing boto3 library")
u = urlparse(uri)
assert u.hostname
self.bucketname: str = u.hostname
self.access_key: str | None = u.username or access_key
self.secret_key: str | None = u.password or secret_key
self.session_token: str | None = session_token
self.keyname: str = u.path[1:] # remove first "/"
self.acl: str | None = acl
self.endpoint_url: str | None = endpoint_url
self.region_name: str | None = region_name
boto3_session = boto3.session.Session()
self.s3_client = boto3_session.client(
"s3",
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
aws_session_token=self.session_token,
endpoint_url=self.endpoint_url,
region_name=self.region_name,
)
if feed_options and feed_options.get("overwrite", True) is False:
logger.warning(
"S3 does not support appending to files. To "
"suppress this warning, remove the overwrite "
"option from your FEEDS setting or set it to True."
)
@classmethod
def from_crawler(
cls,
crawler: Crawler,
uri: str,
*,
feed_options: dict[str, Any] | None = None,
) -> Self:
return cls(
uri,
access_key=crawler.settings["AWS_ACCESS_KEY_ID"],
secret_key=crawler.settings["AWS_SECRET_ACCESS_KEY"],
session_token=crawler.settings["AWS_SESSION_TOKEN"],
acl=crawler.settings["FEED_STORAGE_S3_ACL"] or None,
endpoint_url=crawler.settings["AWS_ENDPOINT_URL"] or None,
region_name=crawler.settings["AWS_REGION_NAME"] or None,
feed_options=feed_options,
)
def _store_in_thread(self, file: IO[bytes]) -> None:
file.seek(0)
kwargs: dict[str, Any] = {"ExtraArgs": {"ACL": self.acl}} if self.acl else {}
self.s3_client.upload_fileobj(
Bucket=self.bucketname, Key=self.keyname, Fileobj=file, **kwargs
)
file.close()
| S3FeedStorage |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/schedule_definition.py | {
"start": 18771,
"end": 45312
} | class ____(IHasInternalInit):
"""Defines a schedule that targets a job.
Args:
name (Optional[str]): The name of the schedule to create. Defaults to the job name plus
``_schedule``.
cron_schedule (Union[str, Sequence[str]]): A valid cron string or sequence of cron strings
specifying when the schedule will run, e.g., ``45 23 * * 6`` for a schedule that runs
at 11:45 PM every Saturday. If a sequence is provided, then the schedule will run for
the union of all execution times for the provided cron strings, e.g.,
``['45 23 * * 6', '30 9 * * 0]`` for a schedule that runs at 11:45 PM every Saturday and
9:30 AM every Sunday.
execution_fn (Callable[ScheduleEvaluationContext]): The core evaluation function for the schedule, which is run at an interval to determine whether a run should be launched or not. Takes a :py:class:`~dagster.ScheduleEvaluationContext`.
This function must return a generator, which must yield either a single :py:class:`~dagster.SkipReason`
or one or more :py:class:`~dagster.RunRequest` objects.
run_config (Optional[Union[RunConfig, Mapping]]): The config that parameterizes this execution,
as a dict.
run_config_fn (Optional[Callable[[ScheduleEvaluationContext], [Mapping]]]): A function that
takes a :py:class:`~dagster.ScheduleEvaluationContext` object and returns the run configuration that
parameterizes this execution, as a dict. **Note**: Only one of the following may be set: You may set ``run_config``, ``run_config_fn``, or ``execution_fn``.
tags (Optional[Mapping[str, str]]): A set of key-value tags that annotate the schedule
and can be used for searching and filtering in the UI. If no `execution_fn` is provided,
then these will also be automatically attached to runs launched by the schedule.
tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Mapping[str, str]]]]): A
function that generates tags to attach to the schedule's runs. Takes a
:py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string
key-value pairs). **Note**: Only one of the following may be set: ``tags``, ``tags_fn``, or ``execution_fn``.
should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs
at schedule execution time to determine whether a schedule should execute or skip. Takes
a :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
execution_timezone (Optional[str]): Timezone in which the schedule should run.
Supported strings for timezones are the ones provided by the
`IANA time zone database <https://www.iana.org/time-zones>`_ - e.g. ``"America/Los_Angeles"``.
description (Optional[str]): A human-readable description of the schedule.
job (Optional[Union[GraphDefinition, JobDefinition]]): The job that should execute when this
schedule runs.
default_status (DefaultScheduleStatus): If set to ``RUNNING``, the schedule will start as running. The default status can be overridden from the Dagster UI or via the GraphQL API.
required_resource_keys (Optional[Set[str]]): The set of resource keys required by the schedule.
target (Optional[Union[CoercibleToAssetSelection, AssetsDefinition, JobDefinition, UnresolvedAssetJobDefinition]]):
The target that the schedule will execute.
It can take :py:class:`~dagster.AssetSelection` objects and anything coercible to it (e.g. `str`, `Sequence[str]`, `AssetKey`, `AssetsDefinition`).
It can also accept :py:class:`~dagster.JobDefinition` (a function decorated with `@job` is an instance of `JobDefinition`) and `UnresolvedAssetJobDefinition` (the return value of :py:func:`~dagster.define_asset_job`) objects.
This parameter will replace `job` and `job_name`.
metadata (Optional[Mapping[str, Any]]): A set of metadata entries that annotate the
schedule. Values will be normalized to typed `MetadataValue` objects. Not currently
shown in the UI but available at runtime via
`ScheduleEvaluationContext.repository_def.get_schedule_def(<name>).metadata`.
"""
def with_updated_job(self, new_job: ExecutableDefinition) -> "ScheduleDefinition":
"""Returns a copy of this schedule with the job replaced.
Args:
new_job (ExecutableDefinition): The job that should execute when this
schedule runs.
"""
return self.with_attributes(job=new_job)
def with_attributes(
self,
*,
job: Optional[ExecutableDefinition] = None,
metadata: Optional[RawMetadataMapping] = None,
) -> "ScheduleDefinition":
"""Returns a copy of this schedule with attributes replaced."""
if job:
job_name = None # job name will be derived from the passed job if provided
job_to_use = job
elif self._target.has_job_def:
job_name = None
job_to_use = self.job
else:
job_name = self.job_name
job_to_use = None
return ScheduleDefinition.dagster_internal_init(
name=self.name,
cron_schedule=self._cron_schedule,
job_name=job_name,
execution_timezone=self.execution_timezone,
execution_fn=self._execution_fn,
description=self.description,
job=job_to_use,
default_status=self.default_status,
environment_vars=self._environment_vars,
required_resource_keys=self._raw_required_resource_keys,
# run_config, run_config_fn, tags_fn, should_execute are not copied because the schedule constructor
# incorporates them into the execution_fn defined in the constructor. Since we are
# copying the execution_fn, we don't need to copy these, and it would actually be an
# error to do so (since you can't pass an execution_fn and any of these values
# simultaneously).
run_config=None,
run_config_fn=None,
tags=self.tags,
tags_fn=None,
metadata=metadata if metadata is not None else self.metadata,
should_execute=None,
target=None,
owners=self._owners,
)
def __init__(
self,
name: Optional[str] = None,
*,
cron_schedule: Optional[Union[str, Sequence[str]]] = None,
job_name: Optional[str] = None,
run_config: Optional[Union["RunConfig", Mapping[str, Any]]] = None,
run_config_fn: Optional[ScheduleRunConfigFunction] = None,
tags: Optional[Mapping[str, str]] = None,
tags_fn: Optional[ScheduleTagsFunction] = None,
metadata: Optional[RawMetadataMapping] = None,
should_execute: Optional[ScheduleShouldExecuteFunction] = None,
environment_vars: Optional[Mapping[str, str]] = None,
execution_timezone: Optional[str] = None,
execution_fn: Optional[ScheduleExecutionFunction] = None,
description: Optional[str] = None,
job: Optional[ExecutableDefinition] = None,
default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,
required_resource_keys: Optional[set[str]] = None,
target: Optional[
Union[
"CoercibleToAssetSelection",
"AssetsDefinition",
"JobDefinition",
"UnresolvedAssetJobDefinition",
]
] = None,
owners: Optional[Sequence[str]] = None,
):
from dagster._core.definitions.run_config import convert_config_input
self._cron_schedule = check.inst_param(cron_schedule, "cron_schedule", (str, Sequence))
if not isinstance(self._cron_schedule, str):
check.sequence_param(self._cron_schedule, "cron_schedule", of_type=str) # type: ignore
if not is_valid_cron_schedule(self._cron_schedule): # type: ignore
raise DagsterInvalidDefinitionError(
f"Found invalid cron schedule '{self._cron_schedule}' for schedule '{name}''. "
"Dagster recognizes standard cron expressions consisting of 5 fields."
)
if has_out_of_range_cron_interval(self._cron_schedule): # type: ignore
warnings.warn(
"Found a cron schedule with an interval greater than the expected range for"
f" schedule '{name}'. Dagster currently normalizes this to an interval that may"
" fire more often than expected. You may want to break this cron schedule up into"
" a sequence of cron schedules. See"
" https://github.com/dagster-io/dagster/issues/15294 for more information."
)
if (
sum(
[
int(target is not None),
int(job_name is not None),
int(job is not None),
]
)
> 1
):
raise DagsterInvalidDefinitionError(
"Attempted to provide more than one of 'job', 'job_name', and 'target'"
"params to ScheduleDefinition. Must provide only one."
)
if target:
self._target = AutomationTarget.from_coercible(
target,
automation_name=check.not_none(
name, "If you specify target you must specify schedule name"
),
)
elif job:
self._target = AutomationTarget.from_coercible(job)
elif job_name:
self._target = AutomationTarget(
resolvable_to_job=check.str_param(job_name, "job_name"),
)
else:
check.failed("Must provide target, job, or job_name")
if name:
self._name = check_valid_name(name)
elif job_name:
self._name = job_name + "_schedule"
elif job:
self._name = job.name + "_schedule"
self._description = check.opt_str_param(description, "description")
self._environment_vars = check.opt_nullable_mapping_param(
environment_vars, "environment_vars", key_type=str, value_type=str
)
self._execution_timezone = check.opt_str_param(execution_timezone, "execution_timezone")
if execution_fn and (run_config_fn or tags_fn or should_execute or run_config):
raise DagsterInvalidDefinitionError(
"Attempted to provide both execution_fn and individual run_config/tags_fn arguments "
"to ScheduleDefinition. Must provide only one of the two."
)
elif execution_fn:
self._execution_fn: Optional[Union[Callable[..., Any], DecoratedScheduleFunction]] = (
None
)
if isinstance(execution_fn, DecoratedScheduleFunction):
self._execution_fn = execution_fn
else:
self._execution_fn = check.opt_callable_param(execution_fn, "execution_fn")
self._tags = normalize_tags(
tags, allow_private_system_tags=False, warning_stacklevel=5
) # reset once owners is out of beta_param
self._tags_fn = None
self._run_config_fn = None
else:
if run_config_fn and run_config:
raise DagsterInvalidDefinitionError(
"Attempted to provide both run_config_fn and run_config as arguments"
" to ScheduleDefinition. Must provide only one of the two."
)
def _default_run_config_fn(context: ScheduleEvaluationContext) -> CoercibleToRunConfig:
return dict(
check.opt_mapping_param(
convert_config_input(run_config), "run_config", key_type=str
)
)
self._run_config_fn = check.opt_callable_param(
run_config_fn, "run_config_fn", default=_default_run_config_fn
)
if tags_fn and tags:
raise DagsterInvalidDefinitionError(
"Attempted to provide both tags_fn and tags as arguments"
" to ScheduleDefinition. Must provide only one of the two."
)
self._tags = normalize_tags(
tags, allow_private_system_tags=False, warning_stacklevel=5
) # reset once owners is out of beta_param
if tags_fn:
self._tags_fn = check.opt_callable_param(
tags_fn, "tags_fn", default=lambda _context: cast("Mapping[str, str]", {})
)
else:
tags_fn = lambda _context: self._tags or {}
self._tags_fn = tags_fn
self._should_execute: ScheduleShouldExecuteFunction = check.opt_callable_param(
should_execute, "should_execute", default=lambda _context: True
)
# Several type-ignores are present in this function to work around bugs in mypy
# inference.
def _execution_fn(context: ScheduleEvaluationContext) -> RunRequestIterator:
with user_code_error_boundary(
ScheduleExecutionError,
lambda: (
f"Error occurred during the execution of should_execute for schedule {name}"
),
):
if not self._should_execute(context):
yield SkipReason(f"should_execute function for {name} returned false.")
return
with user_code_error_boundary(
ScheduleExecutionError,
lambda: (
f"Error occurred during the execution of run_config_fn for schedule {name}"
),
):
_run_config_fn = check.not_none(self._run_config_fn)
evaluated_run_config = copy.deepcopy(
_run_config_fn(context)
if has_at_least_one_parameter(_run_config_fn)
else _run_config_fn() # type: ignore # (strict type guard)
)
with user_code_error_boundary(
ScheduleExecutionError,
lambda: f"Error occurred during the execution of tags_fn for schedule {name}",
):
evaluated_tags = normalize_tags(
tags_fn(context),
allow_private_system_tags=False,
warning_stacklevel=5, # reset once owners is out of beta_param
)
yield RunRequest(
run_key=None,
run_config=evaluated_run_config,
tags=evaluated_tags,
)
self._execution_fn = _execution_fn
if self._execution_timezone:
try:
# Verify that the timezone can be loaded
get_timezone(self._execution_timezone)
except Exception as e:
raise DagsterInvalidDefinitionError(
f"Invalid execution timezone {self._execution_timezone} for {name}"
) from e
self._default_status = check.inst_param(
default_status, "default_status", DefaultScheduleStatus
)
resource_arg_names: set[str] = (
{arg.name for arg in get_resource_args(self._execution_fn.decorated_fn)}
if isinstance(self._execution_fn, DecoratedScheduleFunction)
else set()
)
check.param_invariant(
len(required_resource_keys or []) == 0 or len(resource_arg_names) == 0,
"Cannot specify resource requirements in both @schedule decorator and as arguments to"
" the decorated function",
)
self._raw_required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
self._required_resource_keys = self._raw_required_resource_keys or resource_arg_names
self._metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str)
)
if owners:
for owner in owners:
validate_definition_owner(owner, "schedule", self._name)
self._owners = owners
@staticmethod
def dagster_internal_init(
*,
name: Optional[str],
cron_schedule: Optional[Union[str, Sequence[str]]],
job_name: Optional[str],
run_config: Optional[Any],
run_config_fn: Optional[ScheduleRunConfigFunction],
tags: Optional[Mapping[str, str]],
tags_fn: Optional[ScheduleTagsFunction],
metadata: Optional[RawMetadataMapping],
should_execute: Optional[ScheduleShouldExecuteFunction],
environment_vars: Optional[Mapping[str, str]],
execution_timezone: Optional[str],
execution_fn: Optional[ScheduleExecutionFunction],
description: Optional[str],
job: Optional[ExecutableDefinition],
default_status: DefaultScheduleStatus,
required_resource_keys: Optional[set[str]],
target: Optional[
Union[
"CoercibleToAssetSelection",
"AssetsDefinition",
"JobDefinition",
"UnresolvedAssetJobDefinition",
]
],
owners: Optional[Sequence[str]],
) -> "ScheduleDefinition":
return ScheduleDefinition(
name=name,
cron_schedule=cron_schedule,
job_name=job_name,
run_config=run_config,
run_config_fn=run_config_fn,
tags=tags,
tags_fn=tags_fn,
metadata=metadata,
should_execute=should_execute,
environment_vars=environment_vars,
execution_timezone=execution_timezone,
execution_fn=execution_fn,
description=description,
job=job,
default_status=default_status,
required_resource_keys=required_resource_keys,
target=target,
owners=owners,
)
def __call__(self, *args, **kwargs) -> ScheduleEvaluationFunctionReturn:
from dagster._core.definitions.decorators.schedule_decorator import (
DecoratedScheduleFunction,
)
from dagster._core.definitions.sensor_definition import get_context_param_name
if not isinstance(self._execution_fn, DecoratedScheduleFunction):
raise DagsterInvalidInvocationError(
"Schedule invocation is only supported for schedules created via the schedule "
"decorators."
)
context_param_name = get_context_param_name(self._execution_fn.decorated_fn)
context = get_or_create_schedule_context(self._execution_fn.decorated_fn, *args, **kwargs)
context_param = {context_param_name: context} if context_param_name else {}
resources = validate_and_get_schedule_resource_dict(
context.resources, self._name, self._required_resource_keys
)
result = self._execution_fn.decorated_fn(**context_param, **resources)
if isinstance(result, dict):
return copy.deepcopy(result)
else:
return result
@public
@property
def name(self) -> str:
"""str: The name of the schedule."""
return self._name
@public
@property
def job_name(self) -> str:
"""str: The name of the job targeted by this schedule."""
return self._target.job_name
@public
@property
def description(self) -> Optional[str]:
"""Optional[str]: A description for this schedule."""
return self._description
@public
@property
def cron_schedule(self) -> Union[str, Sequence[str]]:
"""Union[str, Sequence[str]]: The cron schedule representing when this schedule will be evaluated."""
return self._cron_schedule # type: ignore
@public
@deprecated(
breaking_version="2.0",
additional_warn_text="Setting this property no longer has any effect.",
)
@property
def environment_vars(self) -> Optional[Mapping[str, str]]:
"""Mapping[str, str]: Environment variables to export to the cron schedule."""
return self._environment_vars
@public
@property
def required_resource_keys(self) -> set[str]:
"""Set[str]: The set of keys for resources that must be provided to this schedule."""
return self._required_resource_keys
@public
@property
def execution_timezone(self) -> Optional[str]:
"""Optional[str]: The timezone in which this schedule will be evaluated."""
return self._execution_timezone
@public
@property
def tags(self) -> Mapping[str, str]:
"""Mapping[str, str]: The tags for this schedule."""
return self._tags
@public
@property
def metadata(self) -> Mapping[str, MetadataValue]:
"""Mapping[str, str]: The metadata for this schedule."""
return self._metadata
@property
def owners(self) -> Optional[Sequence[str]]:
return self._owners
@property
def has_job(self) -> bool:
return self._target.has_job_def
@public
@property
def job(self) -> Union[JobDefinition, UnresolvedAssetJobDefinition]:
"""Union[JobDefinition, UnresolvedAssetJobDefinition]: The job that is
targeted by this schedule.
"""
if self._target.has_job_def:
return self._target.job_def
raise DagsterInvalidDefinitionError("No job was provided to ScheduleDefinition.")
def evaluate_tick(self, context: "ScheduleEvaluationContext") -> ScheduleExecutionData:
"""Evaluate schedule using the provided context.
Args:
context (ScheduleEvaluationContext): The context with which to evaluate this schedule.
Returns:
ScheduleExecutionData: Contains list of run requests, or skip message if present.
"""
from dagster._core.instance.types import CachingDynamicPartitionsLoader
check.inst_param(context, "context", ScheduleEvaluationContext)
execution_fn: Callable[..., ScheduleEvaluationFunctionReturn]
if isinstance(self._execution_fn, DecoratedScheduleFunction):
execution_fn = self._execution_fn.wrapped_fn
else:
execution_fn = cast(
"Callable[..., ScheduleEvaluationFunctionReturn]",
self._execution_fn,
)
result = list(ensure_gen(execution_fn(context)))
skip_message: Optional[str] = None
run_requests: list[RunRequest] = []
if not result or result == [None]:
run_requests = []
skip_message = "Schedule function returned an empty result"
elif len(result) == 1:
item = check.inst(result[0], (SkipReason, RunRequest))
if isinstance(item, RunRequest):
run_requests = [item]
skip_message = None
elif isinstance(item, SkipReason):
run_requests = []
skip_message = item.skip_message
else:
# NOTE: mypy is not correctly reading this cast-- not sure why
# (pyright reads it fine). Hence the type-ignores below.
result = cast("list[RunRequest]", check.is_list(result, of_type=RunRequest))
check.invariant(
not any(not request.run_key for request in result),
"Schedules that return multiple RunRequests must specify a run_key in each"
" RunRequest",
)
run_requests = result
skip_message = None
dynamic_partitions_store = (
CachingDynamicPartitionsLoader(context.instance) if context.instance_ref else None
)
# clone all the run requests with resolved tags and config
with partition_loading_context(
context._scheduled_execution_time, # noqa
dynamic_partitions_store,
):
resolved_run_requests = []
for run_request in run_requests:
if run_request.partition_key and not run_request.has_resolved_partition():
if context.repository_def is None:
raise DagsterInvariantViolationError(
"Must provide repository def to build_schedule_context when yielding"
" partitioned run requests"
)
scheduled_target = context.repository_def.get_job(self._target.job_name)
resolved_request = run_request.with_resolved_tags_and_config(
target_definition=scheduled_target,
dynamic_partitions_requests=[],
dynamic_partitions_store=dynamic_partitions_store,
)
else:
resolved_request = run_request
resolved_run_requests.append(
resolved_request.with_replaced_attrs(
tags=merge_dicts(resolved_request.tags, DagsterRun.tags_for_schedule(self))
)
)
return ScheduleExecutionData(
run_requests=resolved_run_requests,
skip_message=skip_message,
log_key=context.log_key if context.has_captured_logs() else None,
)
@property
def target(self) -> AutomationTarget:
return self._target
@public
@property
def default_status(self) -> DefaultScheduleStatus:
"""DefaultScheduleStatus: The default status for this schedule when it is first loaded in
a code location.
"""
return self._default_status
@property
def has_anonymous_job(self) -> bool:
return bool(self._target and self._target.job_name.startswith(ANONYMOUS_ASSET_JOB_PREFIX))
| ScheduleDefinition |
python | python__mypy | mypy/stats.py | {
"start": 15711,
"end": 15980
} | class ____(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_any(self, t: AnyType) -> bool:
return not is_special_form_any(t)
def is_imprecise2(t: Type) -> bool:
return t.accept(HasAnyQuery2())
| HasAnyQuery |
python | xlwings__xlwings | tests/test_range.py | {
"start": 3527,
"end": 24094
} | class ____(TestBase):
def test_iterator(self):
self.wb1.sheets[0].range("A20").value = [[1.0, 2.0], [3.0, 4.0]]
r = self.wb1.sheets[0].range("A20:B21")
self.assertEqual([c.value for c in r], [1.0, 2.0, 3.0, 4.0])
# check that reiterating on same range works properly
self.assertEqual([c.value for c in r], [1.0, 2.0, 3.0, 4.0])
def test_sheet(self):
self.assertEqual(
self.wb1.sheets[1].range("A1").sheet.name, self.wb1.sheets[1].name
)
def test_len(self):
self.assertEqual(len(self.wb1.sheets[0].range("A1:C4")), 12)
def test_count(self):
self.assertEqual(
len(self.wb1.sheets[0].range("A1:C4")),
self.wb1.sheets[0].range("A1:C4").count,
)
def test_row(self):
self.assertEqual(self.wb1.sheets[0].range("B3:F5").row, 3)
def test_column(self):
self.assertEqual(self.wb1.sheets[0].range("B3:F5").column, 2)
def test_row_count(self):
self.assertEqual(self.wb1.sheets[0].range("B3:F5").rows.count, 3)
def test_column_count(self):
self.assertEqual(self.wb1.sheets[0].range("B3:F5").columns.count, 5)
def raw_value(self):
pass # TODO
def test_clear_content(self):
self.wb1.sheets[0].range("G1").value = 22
self.wb1.sheets[0].range("G1").clear_contents()
self.assertEqual(self.wb1.sheets[0].range("G1").value, None)
def test_clear_formats(self):
self.wb1.sheets[0].range("G1").value = 22
self.wb1.sheets[0].range("G1").color = (255, 0, 0)
self.wb1.sheets[0].range("G1").clear_formats()
self.assertEqual(self.wb1.sheets[0].range("G1").value, 22)
self.assertEqual(self.wb1.sheets[0].range("G1").color, None)
def test_clear(self):
self.wb1.sheets[0].range("G1").value = 22
self.wb1.sheets[0].range("G1").clear()
self.assertEqual(self.wb1.sheets[0].range("G1").value, None)
def test_end(self):
self.wb1.sheets[0].range("A1:C5").value = 1.0
self.assertEqual(
self.wb1.sheets[0].range("A1").end("d"), self.wb1.sheets[0].range("A5")
)
self.assertEqual(
self.wb1.sheets[0].range("A1").end("down"), self.wb1.sheets[0].range("A5")
)
self.assertEqual(
self.wb1.sheets[0].range("C5").end("u"), self.wb1.sheets[0].range("C1")
)
self.assertEqual(
self.wb1.sheets[0].range("C5").end("up"), self.wb1.sheets[0].range("C1")
)
self.assertEqual(
self.wb1.sheets[0].range("A1").end("right"), self.wb1.sheets[0].range("C1")
)
self.assertEqual(
self.wb1.sheets[0].range("A1").end("r"), self.wb1.sheets[0].range("C1")
)
self.assertEqual(
self.wb1.sheets[0].range("C5").end("left"), self.wb1.sheets[0].range("A5")
)
self.assertEqual(
self.wb1.sheets[0].range("C5").end("l"), self.wb1.sheets[0].range("A5")
)
def test_formula(self):
self.wb1.sheets[0].range("A1").formula = "=SUM(A2:A10)"
self.assertEqual(self.wb1.sheets[0].range("A1").formula, "=SUM(A2:A10)")
def test_formula2(self):
self.wb1.sheets[0].range("A1").formula2 = "=UNIQUE(A2:A10)"
self.assertEqual(self.wb1.sheets[0].range("A1").formula2, "=UNIQUE(A2:A10)")
def test_formula_array(self):
self.wb1.sheets[0].range("A1").value = [[1, 4], [2, 5], [3, 6]]
self.wb1.sheets[0].range("D1").formula_array = "=SUM(A1:A3*B1:B3)"
self.assertEqual(self.wb1.sheets[0].range("D1").value, 32.0)
def test_column_width(self):
self.wb1.sheets[0].range("A1:B2").column_width = 10.0
result = self.wb1.sheets[0].range("A1").column_width
self.assertEqual(10.0, result)
self.wb1.sheets[0].range("A1:B2").value = "ensure cells are used"
self.wb1.sheets[0].range("B2").column_width = 20.0
result = self.wb1.sheets[0].range("A1:B2").column_width
self.assertEqual(None, result)
def test_row_height(self):
self.wb1.sheets[0].range("A1:B2").row_height = 15.0
result = self.wb1.sheets[0].range("A1").row_height
self.assertEqual(15.0, result)
self.wb1.sheets[0].range("A1:B2").value = "ensure cells are used"
self.wb1.sheets[0].range("B2").row_height = 20.0
result = self.wb1.sheets[0].range("A1:B2").row_height
self.assertEqual(None, result)
def test_width(self):
"""test_width: Width depends on default style text size,
so do not test absolute widths"""
self.wb1.sheets[0].range("A1:D4").column_width = 10.0
result_before = self.wb1.sheets[0].range("A1").width
self.wb1.sheets[0].range("A1:D4").column_width = 12.0
result_after = self.wb1.sheets[0].range("A1").width
self.assertTrue(result_after > result_before)
def test_height(self):
self.wb1.sheets[0].range("A1:D4").row_height = 60.0
result = self.wb1.sheets[0].range("A1:D4").height
self.assertEqual(240.0, result)
def test_left(self):
self.assertEqual(self.wb1.sheets[0].range("A1").left, 0.0)
self.wb1.sheets[0].range("A1").column_width = 20.0
self.assertEqual(
self.wb1.sheets[0].range("B1").left, self.wb1.sheets[0].range("A1").width
)
def test_top(self):
self.assertEqual(self.wb1.sheets[0].range("A1").top, 0.0)
self.wb1.sheets[0].range("A1").row_height = 20.0
self.assertEqual(
self.wb1.sheets[0].range("A2").top, self.wb1.sheets[0].range("A1").height
)
def test_number_format_cell(self):
format_string = "mm/dd/yy;@"
self.wb1.sheets[0].range("A1").number_format = format_string
result = self.wb1.sheets[0].range("A1").number_format
self.assertEqual(format_string, result)
def test_number_format_range(self):
format_string = "mm/dd/yy;@"
self.wb1.sheets[0].range("A1:D4").number_format = format_string
result = self.wb1.sheets[0].range("A1:D4").number_format
self.assertEqual(format_string, result)
def test_get_address(self):
wb1 = self.app1.books.open(os.path.join(this_dir, "test book.xlsx"))
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address()
self.assertEqual(res, "$A$1:$C$3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False)
self.assertEqual(res, "$A1:$C3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(True, False)
self.assertEqual(res, "A$1:C$3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False, False)
self.assertEqual(res, "A1:C3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(include_sheetname=True)
self.assertEqual(res, "'Sheet1'!$A$1:$C$3")
res = wb1.sheets[1].range((1, 1), (3, 3)).get_address(include_sheetname=True)
self.assertEqual(res, "'Sheet2'!$A$1:$C$3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(external=True)
self.assertEqual(res, "'[test book.xlsx]Sheet1'!$A$1:$C$3")
def test_address(self):
self.assertEqual(self.wb1.sheets[0].range("A1:B2").address, "$A$1:$B$2")
def test_current_region(self):
values = [[1.0, 2.0], [3.0, 4.0]]
self.wb1.sheets[0].range("A20").value = values
self.assertEqual(self.wb1.sheets[0].range("B21").current_region.value, values)
def test_autofit_range(self):
self.wb1.sheets[0].range("A1:D4").value = "test_string"
self.wb1.sheets[0].range("A1:D4").row_height = 40
self.wb1.sheets[0].range("A1:D4").column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range("A1:D4").row_height)
self.assertEqual(40, self.wb1.sheets[0].range("A1:D4").column_width)
self.wb1.sheets[0].range("A1:D4").autofit()
self.assertTrue(40 != self.wb1.sheets[0].range("A1:D4").column_width)
self.assertTrue(40 != self.wb1.sheets[0].range("A1:D4").row_height)
self.wb1.sheets[0].range("A1:D4").row_height = 40
self.assertEqual(40, self.wb1.sheets[0].range("A1:D4").row_height)
self.wb1.sheets[0].range("A1:D4").rows.autofit()
self.assertTrue(40 != self.wb1.sheets[0].range("A1:D4").row_height)
self.wb1.sheets[0].range("A1:D4").column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range("A1:D4").column_width)
self.wb1.sheets[0].range("A1:D4").columns.autofit()
self.assertTrue(40 != self.wb1.sheets[0].range("A1:D4").column_width)
self.wb1.sheets[0].range("A1:D4").rows.autofit()
self.wb1.sheets[0].range("A1:D4").columns.autofit()
def test_autofit_col(self):
self.wb1.sheets[0].range("A1:D4").value = "test_string"
self.wb1.sheets[0].range("A:D").column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range("A:D").column_width)
self.wb1.sheets[0].range("A:D").autofit()
self.assertTrue(40 != self.wb1.sheets[0].range("A:D").column_width)
# Just checking if they don't throw an error
self.wb1.sheets[0].range("A:D").rows.autofit()
self.wb1.sheets[0].range("A:D").columns.autofit()
def test_autofit_row(self):
self.wb1.sheets[0].range("A1:D4").value = "test_string"
self.wb1.sheets[0].range("1:10").row_height = 40
self.assertEqual(40, self.wb1.sheets[0].range("1:10").row_height)
self.wb1.sheets[0].range("1:10").autofit()
self.assertTrue(40 != self.wb1.sheets[0].range("1:10").row_height)
# Just checking if they don't throw an error
self.wb1.sheets[0].range("1:1000000").rows.autofit()
self.wb1.sheets[0].range("1:1000000").columns.autofit()
def test_color(self):
rgb = (30, 100, 200)
self.wb1.sheets[0].range("A1").color = rgb
self.assertEqual(rgb, self.wb1.sheets[0].range("A1").color)
self.wb1.sheets[0].range("A2").color = RgbColor.rgbAqua
self.assertEqual((0, 255, 255), self.wb1.sheets[0].range("A2").color)
self.wb1.sheets[0].range("A2").color = None
self.assertEqual(self.wb1.sheets[0].range("A2").color, None)
self.wb1.sheets[0].range("A1:D4").color = rgb
self.assertEqual(rgb, self.wb1.sheets[0].range("A1:D4").color)
def test_len_rows(self):
self.assertEqual(len(self.wb1.sheets[0].range("A1:C4").rows), 4)
def test_count_rows(self):
self.assertEqual(
len(self.wb1.sheets[0].range("A1:C4").rows),
self.wb1.sheets[0].range("A1:C4").rows.count,
)
def test_len_cols(self):
self.assertEqual(len(self.wb1.sheets[0].range("A1:C4").columns), 3)
def test_count_cols(self):
self.assertEqual(
len(self.wb1.sheets[0].range("A1:C4").columns),
self.wb1.sheets[0].range("A1:C4").columns.count,
)
def test_shape(self):
self.assertEqual(self.wb1.sheets[0].range("A1:C4").shape, (4, 3))
def test_size(self):
self.assertEqual(self.wb1.sheets[0].range("A1:C4").size, 12)
def test_table(self):
data = [
[1, 2.222, 3.333],
["Test1", None, "éöà"],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999],
]
self.wb1.sheets[0].range("A1").value = data
if sys.platform.startswith("win") and self.wb1.app.version == "14.0":
self.wb1.sheets[0].range(
"A3:B3"
).number_format = "dd/mm/yyyy" # Hack for Excel 2010 bug, see GH #43
cells = self.wb1.sheets[0].range("A1").expand("table").value
self.assertEqual(cells, data)
def test_vertical(self):
data = [
[1, 2.222, 3.333],
["Test1", None, "éöà"],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999],
]
self.wb1.sheets[0].range("A10").value = data
if sys.platform.startswith("win") and self.wb1.app.version == "14.0":
self.wb1.sheets[0].range(
"A12:B12"
).number_format = "dd/mm/yyyy" # Hack for Excel 2010 bug, see GH #43
cells = self.wb1.sheets[0].range("A10").expand("vertical").value
self.assertEqual(cells, [row[0] for row in data])
cells = self.wb1.sheets[0].range("A10").expand("d").value
self.assertEqual(cells, [row[0] for row in data])
cells = self.wb1.sheets[0].range("A10").expand("down").value
self.assertEqual(cells, [row[0] for row in data])
def test_horizontal(self):
data = [
[1, 2.222, 3.333],
["Test1", None, "éöà"],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999],
]
self.wb1.sheets[0].range("A20").value = data
cells = self.wb1.sheets[0].range("A20").expand("horizontal").value
self.assertEqual(cells, data[0])
cells = self.wb1.sheets[0].range("A20").expand("r").value
self.assertEqual(cells, data[0])
cells = self.wb1.sheets[0].range("A20").expand("right").value
self.assertEqual(cells, data[0])
def test_hyperlink(self):
address = "www.xlwings.org"
# Naked address
self.wb1.sheets[0].range("A1").add_hyperlink(address)
self.assertEqual(self.wb1.sheets[0].range("A1").value, address)
hyperlink = self.wb1.sheets[0].range("A1").hyperlink
if not hyperlink.endswith("/"):
hyperlink += "/"
self.assertEqual(hyperlink, "http://" + address + "/")
# Address + FriendlyName
self.wb1.sheets[0].range("A2").add_hyperlink(address, "test_link")
self.assertEqual(self.wb1.sheets[0].range("A2").value, "test_link")
hyperlink = self.wb1.sheets[0].range("A2").hyperlink
if not hyperlink.endswith("/"):
hyperlink += "/"
self.assertEqual(hyperlink, "http://" + address + "/")
def test_hyperlink_formula(self):
self.wb1.sheets[0].range(
"B10"
).formula = '=HYPERLINK("http://xlwings.org", "xlwings")'
self.assertEqual(
self.wb1.sheets[0].range("B10").hyperlink, "http://xlwings.org"
)
def test_insert_cell(self):
self.wb1.sheets[0].range("A1:C1").value = "test"
self.wb1.sheets[0].range("A1").insert()
self.assertIsNone(self.wb1.sheets[0].range("A1").value)
self.assertEqual(self.wb1.sheets[0].range("A2").value, "test")
def test_insert_row(self):
self.wb1.sheets[0].range("A1:C1").value = "test"
self.wb1.sheets[0].range("1:1").insert()
self.assertEqual(self.wb1.sheets[0].range("A1:C1").value, [None, None, None])
self.assertEqual(
self.wb1.sheets[0].range("A2:C2").value, ["test", "test", "test"]
)
def test_insert_column(self):
self.wb1.sheets[0].range("A1:A3").value = "test"
self.wb1.sheets[0].range("A:A").insert()
self.assertEqual(self.wb1.sheets[0].range("A1:A3").value, [None, None, None])
self.assertEqual(
self.wb1.sheets[0].range("B1:B3").value, ["test", "test", "test"]
)
def test_insert_cell_shift_down(self):
self.wb1.sheets[0].range("A1:C1").value = "test"
self.wb1.sheets[0].range("A1").insert(shift="down")
self.assertIsNone(self.wb1.sheets[0].range("A1").value)
self.assertEqual(self.wb1.sheets[0].range("A2").value, "test")
def test_insert_cell_shift_right(self):
self.wb1.sheets[0].range("A1:C1").value = "test"
self.wb1.sheets[0].range("A1").insert(shift="right")
self.assertIsNone(self.wb1.sheets[0].range("A1").value)
self.assertEqual(
self.wb1.sheets[0].range("B1:D1").value, ["test", "test", "test"]
)
def test_delete_cell(self):
self.wb1.sheets[0].range("A1").value = ["one", "two", "three"]
self.wb1.sheets[0].range("A1").delete()
self.assertIsNone(self.wb1.sheets[0].range("C1").value)
self.assertEqual(self.wb1.sheets[0].range("A1").value, "two")
def test_delete_row(self):
self.wb1.sheets[0].range("A1:C1").value = "one"
self.wb1.sheets[0].range("A2:C2").value = "two"
self.wb1.sheets[0].range("1:1").delete()
self.assertEqual(self.wb1.sheets[0].range("A1:C1").value, ["two", "two", "two"])
self.assertEqual(self.wb1.sheets[0].range("A2:C2").value, [None, None, None])
def test_delete_column(self):
self.wb1.sheets[0].range("A1:A1").value = "one"
self.wb1.sheets[0].range("B1:B2").value = "two"
self.wb1.sheets[0].range("C1:C2").value = "two"
self.wb1.sheets[0].range("A:A").delete()
self.assertEqual(self.wb1.sheets[0].range("C1:C2").value, [None, None])
self.assertEqual(self.wb1.sheets[0].range("A1:A2").value, ["two", "two"])
def test_delete_cell_shift_up(self):
self.wb1.sheets[0].range("A1").value = ["one", "two", "three"]
self.wb1.sheets[0].range("A1").delete("up")
self.assertIsNone(self.wb1.sheets[0].range("A1").value)
self.assertEqual(self.wb1.sheets[0].range("B1:C1").value, ["two", "three"])
def test_delete_cell_shift_left(self):
self.wb1.sheets[0].range("A1").value = ["one", "two", "three"]
self.wb1.sheets[0].range("A1").delete("left")
self.assertIsNone(self.wb1.sheets[0].range("C1").value)
self.assertEqual(self.wb1.sheets[0].range("A1").value, "two")
def test_copy_destination(self):
sheet = self.wb1.sheets[0]
sheet.range("A1:B1").value = "test"
sheet.range("A1:B1").copy(destination=sheet.range("A2"))
self.assertEqual(sheet.range("A1:B1").value, sheet.range("A2:B2").value)
def test_copy_clipboard(self):
sheet = self.wb1.sheets[0]
sheet.range("A1:B1").value = "test"
sheet.range("A1:B1").copy()
def test_paste(self):
sheet = self.wb1.sheets[0]
sheet.range("A1:B1").value = "test"
sheet.range("A1:B1").color = (34, 34, 34)
sheet.range("A1:B1").copy()
sheet.range("A2").paste()
self.assertEqual(sheet["A1:B1"].value, sheet["A2:B2"].value)
self.assertEqual(sheet["A1:B1"].color, sheet["A2:B2"].color)
def test_paste_values(self):
sheet = self.wb1.sheets[0]
sheet.range("A1:B1").value = "test"
sheet.range("A1:B1").color = (34, 34, 34)
sheet.range("A1:B1").copy()
sheet.range("A2").paste(paste="values")
self.assertEqual(sheet["A1:B1"].value, sheet["A2:B2"].value)
self.assertNotEqual(sheet["A1:B1"].color, sheet["A2:B2"].color)
def test_resize(self):
r = self.wb1.sheets[0].range("A1").resize(4, 5)
self.assertEqual(r.address, "$A$1:$E$4")
r = self.wb1.sheets[0].range("A1").resize(row_size=4)
self.assertEqual(r.address, "$A$1:$A$4")
r = self.wb1.sheets[0].range("A1:B4").resize(column_size=5)
self.assertEqual(r.address, "$A$1:$E$4")
r = self.wb1.sheets[0].range("A1:B4").resize(row_size=5)
self.assertEqual(r.address, "$A$1:$B$5")
r = self.wb1.sheets[0].range("A1:B4").resize()
self.assertEqual(r.address, "$A$1:$B$4")
r = self.wb1.sheets[0].range("A1:C5").resize(row_size=1)
self.assertEqual(r.address, "$A$1:$C$1")
with self.assertRaises(AssertionError):
self.wb1.sheets[0].range("A1:B4").resize(row_size=0)
with self.assertRaises(AssertionError):
self.wb1.sheets[0].range("A1:B4").resize(column_size=0)
def test_offset(self):
o = self.wb1.sheets[0].range("A1:B3").offset(3, 4)
self.assertEqual(o.address, "$E$4:$F$6")
o = self.wb1.sheets[0].range("A1:B3").offset(row_offset=3)
self.assertEqual(o.address, "$A$4:$B$6")
o = self.wb1.sheets[0].range("A1:B3").offset(column_offset=4)
self.assertEqual(o.address, "$E$1:$F$3")
def test_last_cell(self):
self.assertEqual(self.wb1.sheets[0].range("B3:F5").last_cell.row, 5)
self.assertEqual(self.wb1.sheets[0].range("B3:F5").last_cell.column, 6)
def test_select(self):
self.wb2.sheets[0].range("C10").select()
self.assertEqual(
self.app2.selection.address, self.wb2.sheets[0].range("C10").address
)
def test_wrap_text(self):
self.assertFalse(self.wb1.sheets[0]["A1"].wrap_text)
self.wb1.sheets[0]["A1"].wrap_text = True
self.assertTrue(self.wb1.sheets[0]["A1"].wrap_text)
| TestRangeAttributes |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 388636,
"end": 389061
} | class ____(StatNode):
"""
critical_section - the CriticalSectionStatNode that owns this
"""
child_attrs = []
uptree_ref_attrs = ["critical_section"]
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
code.putln(
f"__Pyx_PyCriticalSection{self.length_tag}_End(&{ Naming.critical_section_variable});"
)
| CriticalSectionExitNode |
python | coleifer__peewee | tests/model_save.py | {
"start": 619,
"end": 4175
} | class ____(ModelTestCase):
requires = [T1, T2, T3, T4]
def test_auto_field(self):
# AutoField will be inserted if the PK is not set, after which the new
# ID will be populated.
t11 = T1(value=1)
self.assertEqual(t11.save(), 1)
self.assertTrue(t11.pk is not None)
# Calling save() a second time will issue an update.
t11.value = 100
self.assertEqual(t11.save(), 1)
# Verify the record was updated.
t11_db = T1[t11.pk]
self.assertEqual(t11_db.value, 100)
# We can explicitly specify the value of an auto-incrementing
# primary-key, but we must be sure to call save(force_insert=True),
# otherwise peewee will attempt to do an update.
t12 = T1(pk=1337, value=2)
self.assertEqual(t12.save(), 0)
self.assertEqual(T1.select().count(), 1)
self.assertEqual(t12.save(force_insert=True), 1)
# Attempting to force-insert an already-existing PK will fail with an
# integrity error.
with self.database.atomic():
with self.assertRaises(IntegrityError):
t12.value = 3
t12.save(force_insert=True)
query = T1.select().order_by(T1.value).tuples()
self.assertEqual(list(query), [(1337, 2), (t11.pk, 100)])
@requires_pglike
def test_server_default_pk(self):
# The new value of the primary-key will be returned to us, since
# postgres supports RETURNING.
t2 = T2(value=1)
self.assertEqual(t2.save(), 1)
self.assertEqual(t2.pk, 3)
# Saving after the PK is set will issue an update.
t2.value = 100
self.assertEqual(t2.save(), 1)
t2_db = T2[3]
self.assertEqual(t2_db.value, 100)
# If we just set the pk and try to save, peewee issues an update which
# doesn't have any effect.
t22 = T2(pk=2, value=20)
self.assertEqual(t22.save(), 0)
self.assertEqual(T2.select().count(), 1)
# We can force-insert the value we specify explicitly.
self.assertEqual(t22.save(force_insert=True), 1)
self.assertEqual(T2[2].value, 20)
def test_integer_field_pk(self):
# For a non-auto-incrementing primary key, we have to use force_insert.
t3 = T3(pk=2, value=1)
self.assertEqual(t3.save(), 0) # Oops, attempts to do an update.
self.assertEqual(T3.select().count(), 0)
# Force to be an insert.
self.assertEqual(t3.save(force_insert=True), 1)
# Now we can update the value and call save() to issue an update.
t3.value = 100
self.assertEqual(t3.save(), 1)
# Verify data is correct.
t3_db = T3[2]
self.assertEqual(t3_db.value, 100)
def test_composite_pk(self):
t4 = T4(pk1=1, pk2=2, value=10)
# Will attempt to do an update on non-existant rows.
self.assertEqual(t4.save(), 0)
self.assertEqual(t4.save(force_insert=True), 1)
# Modifying part of the composite PK and attempt an update will fail.
t4.pk2 = 3
t4.value = 30
self.assertEqual(t4.save(), 0)
t4.pk2 = 2
self.assertEqual(t4.save(), 1)
t4_db = T4[1, 2]
self.assertEqual(t4_db.value, 30)
@requires_pglike
def test_returning_object(self):
query = T2.insert(value=10).returning(T2).objects()
t2_db, = list(query)
self.assertEqual(t2_db.pk, 3)
self.assertEqual(t2_db.value, 10)
| TestPrimaryKeySaveHandling |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 127300,
"end": 128383
} | class ____(TestCase):
"""Tests for ``always_reversible()``"""
def test_regular_reversed(self):
self.assertEqual(
list(reversed(range(10))), list(mi.always_reversible(range(10)))
)
self.assertEqual(
list(reversed([1, 2, 3])), list(mi.always_reversible([1, 2, 3]))
)
self.assertEqual(
reversed([1, 2, 3]).__class__,
mi.always_reversible([1, 2, 3]).__class__,
)
def test_nonseq_reversed(self):
# Create a non-reversible generator from a sequence
with self.assertRaises(TypeError):
reversed(x for x in range(10))
self.assertEqual(
list(reversed(range(10))),
list(mi.always_reversible(x for x in range(10))),
)
self.assertEqual(
list(reversed([1, 2, 3])),
list(mi.always_reversible(x for x in [1, 2, 3])),
)
self.assertNotEqual(
reversed((1, 2)).__class__,
mi.always_reversible(x for x in (1, 2)).__class__,
)
| AlwaysReversibleTests |
python | fastapi__sqlmodel | docs_src/tutorial/where/tutorial006_py310.py | {
"start": 71,
"end": 1549
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str
secret_name: str
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age <= 35)
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 94335,
"end": 95443
} | class ____:
xlPTClassic = 20 # from enum XlPivotFormatType
xlPTNone = 21 # from enum XlPivotFormatType
xlReport1 = 0 # from enum XlPivotFormatType
xlReport10 = 9 # from enum XlPivotFormatType
xlReport2 = 1 # from enum XlPivotFormatType
xlReport3 = 2 # from enum XlPivotFormatType
xlReport4 = 3 # from enum XlPivotFormatType
xlReport5 = 4 # from enum XlPivotFormatType
xlReport6 = 5 # from enum XlPivotFormatType
xlReport7 = 6 # from enum XlPivotFormatType
xlReport8 = 7 # from enum XlPivotFormatType
xlReport9 = 8 # from enum XlPivotFormatType
xlTable1 = 10 # from enum XlPivotFormatType
xlTable10 = 19 # from enum XlPivotFormatType
xlTable2 = 11 # from enum XlPivotFormatType
xlTable3 = 12 # from enum XlPivotFormatType
xlTable4 = 13 # from enum XlPivotFormatType
xlTable5 = 14 # from enum XlPivotFormatType
xlTable6 = 15 # from enum XlPivotFormatType
xlTable7 = 16 # from enum XlPivotFormatType
xlTable8 = 17 # from enum XlPivotFormatType
xlTable9 = 18 # from enum XlPivotFormatType
| PivotFormatType |
python | wandb__wandb | wandb/vendor/pygments/lexers/csound.py | {
"start": 3055,
"end": 4218
} | class ____(CsoundLexer):
"""
For `Csound <http://csound.github.io>`_ scores.
.. versionadded:: 2.1
"""
name = 'Csound Score'
aliases = ['csound-score', 'csound-sco']
filenames = ['*.sco']
tokens = {
'partial statement': [
include('preprocessor directives'),
(r'\d+e[+-]?\d+|(\d+\.\d*|\d*\.\d+)(e[+-]?\d+)?', Number.Float),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'"', String, 'single-line string'),
(r'[+\-*/%^!=<>|&#~.]', Operator),
(r'[]()[]', Punctuation),
(r'\w+', Comment.Preproc)
],
'statement': [
include('whitespace or macro call'),
newline + ('#pop',),
include('partial statement')
],
'root': [
newline,
include('whitespace or macro call'),
(r'[{}]', Punctuation, 'statement'),
(r'[abefimq-tv-z]|[nN][pP]?', Keyword, 'statement')
],
'single-line string': [
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
| CsoundScoreLexer |
python | catalyst-team__catalyst | catalyst/contrib/layers/lama.py | {
"start": 4448,
"end": 6469
} | class ____(nn.Module):
"""@TODO: Docs. Contribution is welcome."""
available_groups = [
"last",
"avg",
"avg_droplast",
"max",
"max_droplast",
"sigmoid",
"sigmoid_droplast",
"softmax",
"softmax_droplast",
"tanh",
"tanh_droplast",
]
def __init__(self, in_features, groups=None):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.in_features = in_features
self.groups = groups or [
"last",
"avg_droplast",
"max_droplast",
"softmax_droplast",
]
self.out_features = in_features * len(self.groups)
groups = {}
for key in self.groups:
if isinstance(key, str):
groups[key] = _get_pooling(key, self.in_features)
elif isinstance(key, dict):
key_key = key.pop("key")
groups[key_key] = _get_pooling(key_key, in_features, **key)
else:
raise NotImplementedError()
self.groups = nn.ModuleDict(groups)
def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor:
"""
Forward method of the LAMA.
Args:
x: tensor of size
(batch_size, history_len, feature_size)
mask: mask to use for attention compute
Returns:
torch.Tensor: LAMA pooling result
"""
batch_size, history_len, feature_size = x.shape
features_list = []
for pooling_fn in self.groups.values():
features = pooling_fn(x, mask)
features_list.append(features)
x = torch.cat(features_list, dim=1)
x = x.view(batch_size, -1)
return x
__all__ = [
"TemporalLastPooling",
"TemporalAvgPooling",
"TemporalMaxPooling",
"TemporalDropLastWrapper",
"TemporalAttentionPooling",
"TemporalConcatPooling",
"LamaPooling",
]
| LamaPooling |
python | Textualize__textual | src/textual/_parser.py | {
"start": 190,
"end": 268
} | class ____(Exception):
"""Base class for parse related errors."""
| ParseError |
python | matplotlib__matplotlib | lib/matplotlib/projections/geo.py | {
"start": 14082,
"end": 17605
} | class ____(GeoAxes):
name = 'lambert'
class LambertTransform(_GeoTransform):
"""The base Lambert transform."""
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
_GeoTransform.__init__(self, resolution)
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, values):
# docstring inherited
longitude, latitude = values.T
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = np.maximum( # Prevent divide-by-zero problems
1 + np.sin(clat)*sin_lat + np.cos(clat)*cos_lat*cos_diff_long,
1e-15)
k = np.sqrt(2 / inner_k)
x = k * cos_lat*np.sin(diff_long)
y = k * (np.cos(clat)*sin_lat - np.sin(clat)*cos_lat*cos_diff_long)
return np.column_stack([x, y])
def inverted(self):
# docstring inherited
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
class InvertedLambertTransform(_GeoTransform):
def __init__(self, center_longitude, center_latitude, resolution):
_GeoTransform.__init__(self, resolution)
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, values):
# docstring inherited
x, y = values.T
clong = self._center_longitude
clat = self._center_latitude
p = np.maximum(np.hypot(x, y), 1e-9)
c = 2 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
latitude = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
longitude = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.column_stack([longitude, latitude])
def inverted(self):
# docstring inherited
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
def __init__(self, *args, center_longitude=0, center_latitude=0, **kwargs):
self._longitude_cap = np.pi / 2
self._center_longitude = center_longitude
self._center_latitude = center_latitude
super().__init__(*args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.clear()
def clear(self):
# docstring inherited
super().clear()
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| LambertAxes |
python | google__pytype | pytype/errors/error_types.py | {
"start": 5274,
"end": 5455
} | class ____(ProtocolError):
def __init__(self, left_type, other_type, missing):
super().__init__(left_type, other_type)
self.missing = missing
| ProtocolMissingAttributesError |
python | sphinx-doc__sphinx | sphinx/transforms/post_transforms/code.py | {
"start": 2639,
"end": 4509
} | class ____(SphinxTransform):
"""Trim doctest flags like ``# doctest: +FLAG`` from python code-blocks.
see :confval:`trim_doctest_flags` for more information.
"""
default_priority = HighlightLanguageTransform.default_priority + 1
def apply(self, **kwargs: Any) -> None:
for lbnode in self.document.findall(nodes.literal_block):
if self.is_pyconsole(lbnode):
self.strip_doctest_flags(lbnode)
for dbnode in self.document.findall(nodes.doctest_block):
self.strip_doctest_flags(dbnode)
def strip_doctest_flags(self, node: TextElement) -> None:
if not node.get('trim_flags', self.config.trim_doctest_flags):
return
source = node.rawsource
source = doctest.blankline_re.sub('', source)
source = doctest.doctestopt_re.sub('', source)
node.rawsource = source
node[:] = [nodes.Text(source)]
@staticmethod
def is_pyconsole(node: nodes.literal_block) -> bool:
if node.rawsource != node.astext():
return False # skip parsed-literal node
language = node.get('language')
if language in {'pycon', 'pycon3'}:
return True
elif language in {'py', 'python', 'py3', 'python3', 'default'}:
return node.rawsource.startswith('>>>')
elif language == 'guess':
try:
lexer = guess_lexer(node.rawsource)
return isinstance(lexer, PythonConsoleLexer)
except Exception:
pass
return False
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_post_transform(HighlightLanguageTransform)
app.add_post_transform(TrimDoctestFlagsTransform)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| TrimDoctestFlagsTransform |
python | Textualize__textual | docs/examples/guide/input/binding01.py | {
"start": 150,
"end": 734
} | class ____(App):
CSS_PATH = "binding01.tcss"
BINDINGS = [
("r", "add_bar('red')", "Add Red"),
("g", "add_bar('green')", "Add Green"),
("b", "add_bar('blue')", "Add Blue"),
]
def compose(self) -> ComposeResult:
yield Footer()
def action_add_bar(self, color: str) -> None:
bar = Bar(color)
bar.styles.background = Color.parse(color).with_alpha(0.5)
self.mount(bar)
self.call_after_refresh(self.screen.scroll_end, animate=False)
if __name__ == "__main__":
app = BindingApp()
app.run()
| BindingApp |
python | pennersr__django-allauth | allauth/socialaccount/providers/pocket/client.py | {
"start": 377,
"end": 3563
} | class ____(OAuthClient):
def _get_request_token(self):
"""
Obtain a temporary request token to authorize an access token and to
sign the request to obtain the access token
"""
if self.request_token is None:
redirect_url = build_absolute_uri(self.request, self.callback_url)
headers = {
"X-Accept": "application/json",
}
data = {
"consumer_key": self.consumer_key,
"redirect_uri": redirect_url,
}
response = (
get_adapter()
.get_requests_session()
.post(
url=self.request_token_url,
json=data,
headers=headers,
)
)
if response.status_code != HTTPStatus.OK:
raise OAuthError(
_("Invalid response while obtaining request token" ' from "%s".')
% get_token_prefix(self.request_token_url)
)
self.request_token = response.json()["code"]
self.request.session[
"oauth_%s_request_token" % get_token_prefix(self.request_token_url)
] = self.request_token
return self.request_token
def get_redirect(self, authorization_url, extra_params):
"""
Returns a ``HttpResponseRedirect`` object to redirect the user
to the Pocket authorization URL.
"""
request_token = self._get_request_token()
params = {
"request_token": request_token,
"redirect_uri": self.request.build_absolute_uri(self.callback_url),
}
params.update(extra_params)
url = authorization_url + "?" + urlencode(params)
return HttpResponseRedirect(url)
def get_access_token(self):
"""
Obtain the access token to access private resources at the API
endpoint.
"""
if self.access_token is None:
request_token = self._get_rt_from_session()
url = self.access_token_url
headers = {
"X-Accept": "application/json",
}
data = {
"consumer_key": self.consumer_key,
"code": request_token,
}
response = (
get_adapter()
.get_requests_session()
.post(url=url, headers=headers, json=data)
)
if response.status_code != HTTPStatus.OK:
raise OAuthError(
_("Invalid response while obtaining access token" ' from "%s".')
% get_token_prefix(self.request_token_url)
)
r = response.json()
self.access_token = {
"oauth_token": request_token,
"oauth_token_secret": r["access_token"],
"username": r["username"],
}
self.request.session[
"oauth_%s_access_token" % get_token_prefix(self.request_token_url)
] = self.access_token
return self.access_token
| PocketOAuthClient |
python | sympy__sympy | sympy/tensor/array/expressions/array_expressions.py | {
"start": 2852,
"end": 4356
} | class ____(Expr):
"""
An element of an array.
"""
_diff_wrt = True
is_symbol = True
is_commutative = True
def __new__(cls, name, indices):
if isinstance(name, str):
name = Symbol(name)
name = _sympify(name)
if not isinstance(indices, collections.abc.Iterable):
indices = (indices,)
indices = _sympify(tuple(indices))
cls._check_shape(name, indices)
obj = Expr.__new__(cls, name, indices)
return obj
@classmethod
def _check_shape(cls, name, indices):
indices = tuple(indices)
if hasattr(name, "shape"):
index_error = IndexError("number of indices does not match shape of the array")
if len(indices) != len(name.shape):
raise index_error
if any((i >= s) == True for i, s in zip(indices, name.shape)):
raise ValueError("shape is out of bounds")
if any((i < 0) == True for i in indices):
raise ValueError("shape contains negative values")
@property
def name(self):
return self._args[0]
@property
def indices(self):
return self._args[1]
def _eval_derivative(self, s):
if not isinstance(s, ArrayElement):
return S.Zero
if s == self:
return S.One
if s.name != self.name:
return S.Zero
return Mul.fromiter(KroneckerDelta(i, j) for i, j in zip(self.indices, s.indices))
| ArrayElement |
python | django-haystack__django-haystack | test_haystack/mocks.py | {
"start": 243,
"end": 401
} | class ____(BaseRouter):
def for_read(self, **hints):
return "slave"
def for_write(self, **hints):
return "master"
| MockMasterSlaveRouter |
python | encode__starlette | starlette/staticfiles.py | {
"start": 498,
"end": 907
} | class ____(Response):
NOT_MODIFIED_HEADERS = (
"cache-control",
"content-location",
"date",
"etag",
"expires",
"vary",
)
def __init__(self, headers: Headers):
super().__init__(
status_code=304,
headers={name: value for name, value in headers.items() if name in self.NOT_MODIFIED_HEADERS},
)
| NotModifiedResponse |
python | PrefectHQ__prefect | src/prefect/server/database/query_components.py | {
"start": 28606,
"end": 40088
} | class ____(BaseQueryComponents):
# --- Sqlite-specific SqlAlchemy bindings
def insert(self, obj: type[orm_models.Base]) -> sqlite.Insert:
return sqlite.insert(obj)
# --- Sqlite-specific JSON handling
@property
def uses_json_strings(self) -> bool:
return True
def cast_to_json(self, json_obj: sa.ColumnElement[T]) -> sa.ColumnElement[T]:
return sa.func.json(json_obj)
def build_json_object(
self, *args: Union[str, sa.ColumnElement[Any]]
) -> sa.ColumnElement[Any]:
return sa.func.json_object(*args)
def json_arr_agg(self, json_array: sa.ColumnElement[Any]) -> sa.ColumnElement[Any]:
return sa.func.json_group_array(json_array)
# --- Sqlite-optimized subqueries
def make_timestamp_intervals(
self,
start_time: datetime.datetime,
end_time: datetime.datetime,
interval: datetime.timedelta,
) -> sa.Select[tuple[datetime.datetime, datetime.datetime]]:
start = sa.bindparam("start_time", start_time, Timestamp)
# subtract interval because recursive where clauses are effectively evaluated on a t-1 lag
stop = sa.bindparam("end_time", end_time - interval, Timestamp)
step = sa.bindparam("interval", interval, sa.Interval)
one = sa.literal(1, literal_execute=True)
# recursive CTE to mimic the behavior of `generate_series`, which is
# only available as a compiled extension
base_case = sa.select(
start.label("interval_start"),
sa.func.date_add(start, step).label("interval_end"),
one.label("counter"),
).cte(recursive=True)
recursive_case = sa.select(
base_case.c.interval_end,
sa.func.date_add(base_case.c.interval_end, step),
base_case.c.counter + one,
).where(
base_case.c.interval_start < stop,
# don't compute more than 500 intervals
base_case.c.counter < 500,
)
cte = base_case.union_all(recursive_case)
return sa.select(cte.c.interval_start, cte.c.interval_end)
@db_injector
def set_state_id_on_inserted_flow_runs_statement(
self,
db: PrefectDBInterface,
inserted_flow_run_ids: Sequence[UUID],
insert_flow_run_states: Iterable[dict[str, Any]],
) -> sa.Update:
"""Given a list of flow run ids and associated states, set the state_id
to the appropriate state for all flow runs"""
fr_model, frs_model = db.FlowRun, db.FlowRunState
# sqlite requires a correlated subquery to update from another table
subquery = (
sa.select(frs_model.id)
.where(
frs_model.flow_run_id == fr_model.id,
frs_model.id.in_([r["id"] for r in insert_flow_run_states]),
)
.limit(1)
.scalar_subquery()
)
stmt = (
sa.update(fr_model)
.where(
fr_model.id.in_(inserted_flow_run_ids),
)
.values(state_id=subquery)
# no need to synchronize as these flow runs are entirely new
.execution_options(synchronize_session=False)
)
return stmt
@db_injector
def _get_scheduled_flow_runs_join(
self,
db: PrefectDBInterface,
work_queue_query: sa.CTE,
limit_per_queue: Optional[int],
scheduled_before: Optional[DateTime],
) -> tuple[sa.FromClause, sa.ColumnExpressionArgument[bool]]:
# precompute for readability
FlowRun = db.FlowRun
scheduled_before_clause = (
FlowRun.next_scheduled_start_time <= scheduled_before
if scheduled_before is not None
else sa.true()
)
# select scheduled flow runs, ordered by scheduled start time per queue
scheduled_flow_runs = (
sa.select(
(
sa.func.row_number()
.over(
partition_by=[FlowRun.work_queue_name],
order_by=FlowRun.next_scheduled_start_time,
)
.label("rank")
),
FlowRun,
)
.where(FlowRun.state_type == StateType.SCHEDULED, scheduled_before_clause)
.subquery("scheduled_flow_runs")
)
# sqlite short-circuits the `min` comparison on nulls, so we use `999999`
# as an "unlimited" limit.
limit = 999999 if limit_per_queue is None else limit_per_queue
# in the join, only keep flow runs whose rank is less than or equal to the
# available slots for each queue
join_criteria = sa.and_(
scheduled_flow_runs.c.work_queue_name == db.WorkQueue.name,
scheduled_flow_runs.c.rank
<= sa.func.min(
sa.func.coalesce(work_queue_query.c.available_slots, limit), limit
),
)
return scheduled_flow_runs, join_criteria
# -------------------------------------------------------
# Workers
# -------------------------------------------------------
@property
def _get_scheduled_flow_runs_from_work_pool_template_path(self) -> str:
"""
Template for the query to get scheduled flow runs from a work pool
"""
return "sqlite/get-runs-from-worker-queues.sql.jinja"
@db_injector
def _build_flow_run_graph_v2_query(
self, db: PrefectDBInterface
) -> sa.Select[FlowRunGraphV2Node]:
"""Postgresql version of the V2 FlowRun graph data query
This SQLA query is built just once and then cached per DB interface
"""
# the parameters this query takes as inputs
param_flow_run_id = sa.bindparam("flow_run_id", type_=UUIDTypeDecorator)
param_since = sa.bindparam("since", type_=Timestamp)
param_max_nodes = sa.bindparam("max_nodes", type_=sa.Integer)
Flow, FlowRun, TaskRun = db.Flow, db.FlowRun, db.TaskRun
input = sa.func.json_each(TaskRun.task_inputs).table_valued(
"key", "value", name="input"
)
argument = sa.func.json_each(
input.c.value, type_=postgresql.JSON()
).table_valued("key", sa.column("value", postgresql.JSON()), name="argument")
edges = (
sa.select(
sa.case((FlowRun.id.is_not(None), "flow-run"), else_="task-run").label(
"kind"
),
sa.func.coalesce(FlowRun.id, TaskRun.id).label("id"),
sa.func.coalesce(Flow.name + " / " + FlowRun.name, TaskRun.name).label(
"label"
),
sa.func.coalesce(FlowRun.state_type, TaskRun.state_type).label(
"state_type"
),
sa.func.coalesce(
FlowRun.start_time,
FlowRun.expected_start_time,
TaskRun.start_time,
TaskRun.expected_start_time,
).label("start_time"),
sa.func.coalesce(
FlowRun.end_time,
TaskRun.end_time,
sa.case(
(
TaskRun.state_type == StateType.COMPLETED,
TaskRun.expected_start_time,
),
else_=sa.null(),
),
).label("end_time"),
argument.c.value["id"].astext.label("parent"),
(input.c.key == "__parents__").label("has_encapsulating_task"),
)
.join_from(TaskRun, input, onclause=sa.true(), isouter=True)
.join(argument, onclause=sa.true(), isouter=True)
.join(
FlowRun,
isouter=True,
onclause=FlowRun.parent_task_run_id == TaskRun.id,
)
.join(Flow, isouter=True, onclause=Flow.id == FlowRun.flow_id)
.where(
TaskRun.flow_run_id == param_flow_run_id,
TaskRun.state_type != StateType.PENDING,
sa.func.coalesce(
FlowRun.start_time,
FlowRun.expected_start_time,
TaskRun.start_time,
TaskRun.expected_start_time,
).is_not(None),
)
# -- the order here is important to speed up building the two sets of
# -- edges in the with_parents and with_children CTEs below
.order_by(sa.func.coalesce(FlowRun.id, TaskRun.id))
).cte("edges")
children, parents = edges.alias("children"), edges.alias("parents")
with_encapsulating = (
sa.select(
children.c.id,
sa.func.json_group_array(parents.c.id).label("encapsulating_ids"),
)
.join(parents, onclause=parents.c.id == children.c.parent)
.where(children.c.has_encapsulating_task.is_(True))
.group_by(children.c.id)
).cte("with_encapsulating")
with_parents = (
sa.select(
children.c.id,
sa.func.json_group_array(parents.c.id).label("parent_ids"),
)
.join(parents, onclause=parents.c.id == children.c.parent)
.where(children.c.has_encapsulating_task.is_distinct_from(True))
.group_by(children.c.id)
.cte("with_parents")
)
with_children = (
sa.select(
parents.c.id, sa.func.json_group_array(children.c.id).label("child_ids")
)
.join(children, onclause=children.c.parent == parents.c.id)
.where(children.c.has_encapsulating_task.is_distinct_from(True))
.group_by(parents.c.id)
.cte("with_children")
)
graph = (
sa.select(
edges.c.kind,
edges.c.id,
edges.c.label,
edges.c.state_type,
edges.c.start_time,
edges.c.end_time,
with_parents.c.parent_ids,
with_children.c.child_ids,
with_encapsulating.c.encapsulating_ids,
)
.distinct()
.join(with_parents, isouter=True, onclause=with_parents.c.id == edges.c.id)
.join(
with_children, isouter=True, onclause=with_children.c.id == edges.c.id
)
.join(
with_encapsulating,
isouter=True,
onclause=with_encapsulating.c.id == edges.c.id,
)
.cte("nodes")
)
query = (
sa.select(
graph.c.kind,
graph.c.id,
graph.c.label,
graph.c.state_type,
graph.c.start_time,
graph.c.end_time,
sa.type_coerce(graph.c.parent_ids, UUIDList),
sa.type_coerce(graph.c.child_ids, UUIDList),
sa.type_coerce(graph.c.encapsulating_ids, UUIDList),
)
.where(sa.or_(graph.c.end_time.is_(None), graph.c.end_time >= param_since))
.order_by(graph.c.start_time, graph.c.end_time)
.limit(param_max_nodes)
)
return cast(sa.Select[FlowRunGraphV2Node], query)
| AioSqliteQueryComponents |
python | pytorch__pytorch | torch/distributed/_tools/sac_estimator.py | {
"start": 3258,
"end": 4241
} | class ____:
"""
A class for storing Activation Checkpointing statistics corresponding to a module.
Attributes:
func_names (List[str]): List of operator names.
runtimes (List[float]): List of operator runtimes in millliseconds.
memory (List[int]): List of operator memory usage in bytes.
view_like_ops (List[int]): Indices of view-like operators.
rand_ops (List[int]): Indices of random operators.
saved_autograd_ops (List[int]): Indices of operator results saved by autograd engine.
inplace_ops (List[Tuple[int, int]]): Tuple of indices of op and its first parent for Inplace operators.
force_store_random (bool): Whether to force store random operator results.
"""
func_names: list[str]
runtimes: list[float]
memory: list[int]
view_like_ops: list[int]
rand_ops: list[int]
saved_autograd_ops: list[int]
inplace_ops: list[tuple[int, int]]
force_store_random: bool
| SACStats |
python | kamyu104__LeetCode-Solutions | Python/scramble-string.py | {
"start": 33,
"end": 952
} | class ____(object):
# @return a boolean
def isScramble(self, s1, s2):
if not s1 or not s2 or len(s1) != len(s2):
return False
if s1 == s2:
return True
result = [[[False for j in xrange(len(s2))] for i in xrange(len(s1))] for n in xrange(len(s1) + 1)]
for i in xrange(len(s1)):
for j in xrange(len(s2)):
if s1[i] == s2[j]:
result[1][i][j] = True
for n in xrange(2, len(s1) + 1):
for i in xrange(len(s1) - n + 1):
for j in xrange(len(s2) - n + 1):
for k in xrange(1, n):
if result[k][i][j] and result[n - k][i + k][j + k] or\
result[k][i][j + n - k] and result[n - k][i + k][j]:
result[n][i][j] = True
break
return result[n][0][0]
| Solution |
python | GoogleCloudPlatform__python-docs-samples | firestore/cloud-async-client/distributed_counters.py | {
"start": 689,
"end": 1016
} | class ____:
"""
A shard is a distributed counter. Each shard can support being incremented
once per second. Multiple shards are needed within a Counter to allow
more frequent incrementing.
"""
def __init__(self):
self._count = 0
def to_dict(self):
return {"count": self._count}
| Shard |
python | apache__airflow | providers/apache/livy/src/airflow/providers/apache/livy/hooks/livy.py | {
"start": 1248,
"end": 1752
} | class ____(Enum):
"""Batch session states."""
NOT_STARTED = "not_started"
STARTING = "starting"
RUNNING = "running"
IDLE = "idle"
BUSY = "busy"
SHUTTING_DOWN = "shutting_down"
ERROR = "error"
DEAD = "dead"
KILLED = "killed"
SUCCESS = "success"
def sanitize_endpoint_prefix(endpoint_prefix: str | None) -> str:
"""Ensure that the endpoint prefix is prefixed with a slash."""
return f"/{endpoint_prefix.strip('/')}" if endpoint_prefix else ""
| BatchState |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 79907,
"end": 80227
} | class ____(sgqlc.types.Enum):
"""Properties by which release connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order releases by creation time
* `NAME`: Order releases alphabetically by name
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT", "NAME")
| ReleaseOrderField |
python | django__django | tests/m2m_and_m2o/tests.py | {
"start": 121,
"end": 2276
} | class ____(TestCase):
def test_related_objects_have_name_attribute(self):
for field_name in ("test_issue_client", "test_issue_cc"):
obj = User._meta.get_field(field_name)
self.assertEqual(field_name, obj.field.related_query_name())
def test_m2m_and_m2o(self):
r = User.objects.create(username="russell")
g = User.objects.create(username="gustav")
i1 = Issue(num=1)
i1.client = r
i1.save()
i2 = Issue(num=2)
i2.client = r
i2.save()
i2.cc.add(r)
i3 = Issue(num=3)
i3.client = g
i3.save()
i3.cc.add(r)
self.assertQuerySetEqual(
Issue.objects.filter(client=r.id),
[
1,
2,
],
lambda i: i.num,
)
self.assertQuerySetEqual(
Issue.objects.filter(client=g.id),
[
3,
],
lambda i: i.num,
)
self.assertQuerySetEqual(Issue.objects.filter(cc__id__exact=g.id), [])
self.assertQuerySetEqual(
Issue.objects.filter(cc__id__exact=r.id),
[
2,
3,
],
lambda i: i.num,
)
# These queries combine results from the m2m and the m2o relationships.
# They're three ways of saying the same thing.
self.assertQuerySetEqual(
Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id)),
[
1,
2,
3,
],
lambda i: i.num,
)
self.assertQuerySetEqual(
Issue.objects.filter(cc__id__exact=r.id)
| Issue.objects.filter(client=r.id),
[
1,
2,
3,
],
lambda i: i.num,
)
self.assertQuerySetEqual(
Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id)),
[
1,
2,
3,
],
lambda i: i.num,
)
| RelatedObjectTests |
python | sympy__sympy | sympy/physics/mechanics/tests/test_actuator.py | {
"start": 22033,
"end": 30270
} | class ____:
@pytest.fixture(autouse=True)
# Set up common variables that will be used in multiple tests
def _duffing_spring_fixture(self):
self.linear_stiffness = Symbol('beta')
self.nonlinear_stiffness = Symbol('alpha')
self.equilibrium_length = Symbol('l')
self.pA = Point('pA')
self.pB = Point('pB')
self.pathway = LinearPathway(self.pA, self.pB)
self.q = dynamicsymbols('q')
self.N = ReferenceFrame('N')
# Simples tests to check that DuffingSpring is a subclass of ForceActuator and ActuatorBase
def test_is_force_actuator_subclass(self):
assert issubclass(DuffingSpring, ForceActuator)
def test_is_actuator_base_subclass(self):
assert issubclass(DuffingSpring, ActuatorBase)
@pytest.mark.parametrize(
# Create parametrized tests that allows running the same test function multiple times with different sets of arguments
(
'linear_stiffness, '
'expected_linear_stiffness, '
'nonlinear_stiffness, '
'expected_nonlinear_stiffness, '
'equilibrium_length, '
'expected_equilibrium_length, '
'force'
),
[
(
1,
S.One,
1,
S.One,
0,
S.Zero,
-sqrt(dynamicsymbols('q')**2)-(sqrt(dynamicsymbols('q')**2))**3,
),
(
Symbol('beta'),
Symbol('beta'),
Symbol('alpha'),
Symbol('alpha'),
0,
S.Zero,
-Symbol('beta')*sqrt(dynamicsymbols('q')**2)-Symbol('alpha')*(sqrt(dynamicsymbols('q')**2))**3,
),
(
Symbol('beta'),
Symbol('beta'),
Symbol('alpha'),
Symbol('alpha'),
S.Zero,
S.Zero,
-Symbol('beta')*sqrt(dynamicsymbols('q')**2)-Symbol('alpha')*(sqrt(dynamicsymbols('q')**2))**3,
),
(
Symbol('beta'),
Symbol('beta'),
Symbol('alpha'),
Symbol('alpha'),
Symbol('l'),
Symbol('l'),
-Symbol('beta') * (sqrt(dynamicsymbols('q')**2) - Symbol('l')) - Symbol('alpha') * (sqrt(dynamicsymbols('q')**2) - Symbol('l'))**3,
),
]
)
# Check if DuffingSpring correctly initializes its attributes
# It tests various combinations of linear & nonlinear stiffness, equilibriun length, and the resulting force expression
def test_valid_constructor(
self,
linear_stiffness,
expected_linear_stiffness,
nonlinear_stiffness,
expected_nonlinear_stiffness,
equilibrium_length,
expected_equilibrium_length,
force,
):
self.pB.set_pos(self.pA, self.q*self.N.x)
spring = DuffingSpring(linear_stiffness, nonlinear_stiffness, self.pathway, equilibrium_length)
assert isinstance(spring, DuffingSpring)
assert hasattr(spring, 'linear_stiffness')
assert isinstance(spring.linear_stiffness, ExprType)
assert spring.linear_stiffness == expected_linear_stiffness
assert hasattr(spring, 'nonlinear_stiffness')
assert isinstance(spring.nonlinear_stiffness, ExprType)
assert spring.nonlinear_stiffness == expected_nonlinear_stiffness
assert hasattr(spring, 'pathway')
assert isinstance(spring.pathway, LinearPathway)
assert spring.pathway == self.pathway
assert hasattr(spring, 'equilibrium_length')
assert isinstance(spring.equilibrium_length, ExprType)
assert spring.equilibrium_length == expected_equilibrium_length
assert hasattr(spring, 'force')
assert isinstance(spring.force, ExprType)
assert spring.force == force
@pytest.mark.parametrize('linear_stiffness', [None, NonSympifyable()])
def test_invalid_constructor_linear_stiffness_not_sympifyable(self, linear_stiffness):
with pytest.raises(SympifyError):
_ = DuffingSpring(linear_stiffness, self.nonlinear_stiffness, self.pathway, self.equilibrium_length)
@pytest.mark.parametrize('nonlinear_stiffness', [None, NonSympifyable()])
def test_invalid_constructor_nonlinear_stiffness_not_sympifyable(self, nonlinear_stiffness):
with pytest.raises(SympifyError):
_ = DuffingSpring(self.linear_stiffness, nonlinear_stiffness, self.pathway, self.equilibrium_length)
def test_invalid_constructor_pathway_not_pathway_base(self):
with pytest.raises(TypeError):
_ = DuffingSpring(self.linear_stiffness, self.nonlinear_stiffness, NonSympifyable(), self.equilibrium_length)
@pytest.mark.parametrize('equilibrium_length', [None, NonSympifyable()])
def test_invalid_constructor_equilibrium_length_not_sympifyable(self, equilibrium_length):
with pytest.raises(SympifyError):
_ = DuffingSpring(self.linear_stiffness, self.nonlinear_stiffness, self.pathway, equilibrium_length)
@pytest.mark.parametrize(
'property_name, fixture_attr_name',
[
('linear_stiffness', 'linear_stiffness'),
('nonlinear_stiffness', 'nonlinear_stiffness'),
('pathway', 'pathway'),
('equilibrium_length', 'equilibrium_length')
]
)
# Check if certain properties of DuffingSpring object are immutable after initialization
# Ensure that once DuffingSpring is created, its key properties cannot be changed
def test_properties_are_immutable(self, property_name, fixture_attr_name):
spring = DuffingSpring(self.linear_stiffness, self.nonlinear_stiffness, self.pathway, self.equilibrium_length)
with pytest.raises(AttributeError):
setattr(spring, property_name, getattr(self, fixture_attr_name))
@pytest.mark.parametrize(
'equilibrium_length, expected',
[
(0, 'DuffingSpring(beta, alpha, LinearPathway(pA, pB), equilibrium_length=0)'),
(Symbol('l'), 'DuffingSpring(beta, alpha, LinearPathway(pA, pB), equilibrium_length=l)'),
]
)
# Check the __repr__ method of DuffingSpring class
# Check if the actual string representation of DuffingSpring instance matches the expected string for each provided parameter values
def test_repr(self, equilibrium_length, expected):
spring = DuffingSpring(self.linear_stiffness, self.nonlinear_stiffness, self.pathway, equilibrium_length)
assert repr(spring) == expected
def test_to_loads(self):
self.pB.set_pos(self.pA, self.q*self.N.x)
spring = DuffingSpring(self.linear_stiffness, self.nonlinear_stiffness, self.pathway, self.equilibrium_length)
# Calculate the displacement from the equilibrium length
displacement = self.q - self.equilibrium_length
# Make sure this matches the computation in DuffingSpring class
force = -self.linear_stiffness * displacement - self.nonlinear_stiffness * displacement**3
# The expected loads on pA and pB due to the spring
expected_loads = [Force(self.pA, force * self.N.x), Force(self.pB, -force * self.N.x)]
# Compare expected loads to what is returned from DuffingSpring.to_loads()
calculated_loads = spring.to_loads()
for calculated, expected in zip(calculated_loads, expected_loads):
assert calculated.point == expected.point
for dim in self.N: # Assuming self.N is the reference frame
calculated_component = calculated.vector.dot(dim)
expected_component = expected.vector.dot(dim)
# Substitute all symbols with numeric values
substitutions = {self.q: 1, Symbol('l'): 1, Symbol('alpha'): 1, Symbol('beta'): 1} # Add other necessary symbols as needed
diff = (calculated_component - expected_component).subs(substitutions).evalf()
# Check if the absolute value of the difference is below a threshold
assert Abs(diff) < 1e-9, f"The forces do not match. Difference: {diff}"
| TestDuffingSpring |
python | numba__numba | numba/tests/test_blackscholes.py | {
"start": 2127,
"end": 3599
} | class ____(TestCase):
def test_array_expr(self):
OPT_N = 400
stockPrice = randfloat(self.random.random_sample(OPT_N), 5.0, 30.0)
optionStrike = randfloat(self.random.random_sample(OPT_N), 1.0, 100.0)
optionYears = randfloat(self.random.random_sample(OPT_N), 0.25, 10.0)
args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY
callResultGold, putResultGold = blackscholes_arrayexpr.py_func(*args)
callResultNumba, putResultNumba = blackscholes_arrayexpr(*args)
delta = np.abs(callResultGold - callResultNumba)
self.assertAlmostEqual(delta.max(), 0)
def test_scalar(self):
OPT_N = 400
callResultGold = np.zeros(OPT_N)
putResultGold = np.zeros(OPT_N)
callResultNumba = np.zeros(OPT_N)
putResultNumba = np.zeros(OPT_N)
stockPrice = randfloat(self.random.random_sample(OPT_N), 5.0, 30.0)
optionStrike = randfloat(self.random.random_sample(OPT_N), 1.0, 100.0)
optionYears = randfloat(self.random.random_sample(OPT_N), 0.25, 10.0)
args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY
blackscholes_scalar.py_func(callResultGold, putResultGold, *args)
blackscholes_scalar(callResultNumba, putResultNumba, *args)
delta = np.abs(callResultGold - callResultNumba)
self.assertAlmostEqual(delta.max(), 0)
if __name__ == "__main__":
unittest.main()
| TestBlackScholes |
python | apache__airflow | airflow-core/tests/unit/ti_deps/deps/test_not_in_retry_period_dep.py | {
"start": 1194,
"end": 2513
} | class ____:
def _get_task_instance(self, state, end_date=None, retry_delay=timedelta(minutes=15)):
task = Mock(retry_delay=retry_delay, retry_exponential_backoff=0)
ti = TaskInstance(task=task, state=state, dag_version_id=mock.MagicMock())
ti.end_date = end_date
return ti
@time_machine.travel("2016-01-01 15:44")
def test_still_in_retry_period(self):
"""
Task instances that are in their retry period should fail this dep
"""
ti = self._get_task_instance(State.UP_FOR_RETRY, end_date=datetime(2016, 1, 1, 15, 30))
assert ti.is_premature
assert not NotInRetryPeriodDep().is_met(ti=ti)
@time_machine.travel("2016-01-01 15:46")
def test_retry_period_finished(self):
"""
Task instance's that have had their retry period elapse should pass this dep
"""
ti = self._get_task_instance(State.UP_FOR_RETRY, end_date=datetime(2016, 1, 1))
assert not ti.is_premature
assert NotInRetryPeriodDep().is_met(ti=ti)
def test_not_in_retry_period(self):
"""
Task instance's that are not up for retry can not be in their retry period
"""
ti = self._get_task_instance(State.SUCCESS)
assert NotInRetryPeriodDep().is_met(ti=ti)
| TestNotInRetryPeriodDep |
python | pandas-dev__pandas | pandas/tests/frame/test_constructors.py | {
"start": 122273,
"end": 123527
} | class ____:
# Until 2.0, we do not preserve non-nano dt64/td64 when passed as ndarray,
# but do preserve it when passed as DTA/TDA
@pytest.fixture(params=[True, False])
def as_td(self, request):
return request.param
@pytest.fixture
def arr(self, as_td):
values = np.arange(5).astype(np.int64).view("M8[s]")
if as_td:
values = values - values[0]
return TimedeltaArray._simple_new(values, dtype=values.dtype)
else:
return DatetimeArray._simple_new(values, dtype=values.dtype)
def test_index_allow_non_nano(self, arr):
idx = Index(arr)
assert idx.dtype == arr.dtype
def test_dti_tdi_allow_non_nano(self, arr, as_td):
if as_td:
idx = pd.TimedeltaIndex(arr)
else:
idx = DatetimeIndex(arr)
assert idx.dtype == arr.dtype
def test_series_allow_non_nano(self, arr):
ser = Series(arr)
assert ser.dtype == arr.dtype
def test_frame_allow_non_nano(self, arr):
df = DataFrame(arr)
assert df.dtypes[0] == arr.dtype
def test_frame_from_dict_allow_non_nano(self, arr):
df = DataFrame({0: arr})
assert df.dtypes[0] == arr.dtype
| TestAllowNonNano |
python | Textualize__rich | rich/markdown.py | {
"start": 12448,
"end": 12756
} | class ____(TextElement):
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
url = token.attrs.get("href", "#")
return cls(token.content, str(url))
def __init__(self, text: str, href: str):
self.text = Text(text)
self.href = href
| Link |
python | simonw__datasette | datasette/resources.py | {
"start": 1088,
"end": 2899
} | class ____(Resource):
"""A canned query in a database."""
name = "query"
parent_class = DatabaseResource
def __init__(self, database: str, query: str):
super().__init__(parent=database, child=query)
@classmethod
async def resources_sql(cls, datasette) -> str:
from datasette.plugins import pm
from datasette.utils import await_me_maybe
# Get all databases from catalog
db = datasette.get_internal_database()
result = await db.execute("SELECT database_name FROM catalog_databases")
databases = [row[0] for row in result.rows]
# Gather all canned queries from all databases
query_pairs = []
for database_name in databases:
# Call the hook to get queries (including from config via default plugin)
for queries_result in pm.hook.canned_queries(
datasette=datasette,
database=database_name,
actor=None, # Get ALL queries for resource enumeration
):
queries = await await_me_maybe(queries_result)
if queries:
for query_name in queries.keys():
query_pairs.append((database_name, query_name))
# Build SQL
if not query_pairs:
return "SELECT NULL AS parent, NULL AS child WHERE 0"
# Generate UNION ALL query
selects = []
for db_name, query_name in query_pairs:
# Escape single quotes by doubling them
db_escaped = db_name.replace("'", "''")
query_escaped = query_name.replace("'", "''")
selects.append(
f"SELECT '{db_escaped}' AS parent, '{query_escaped}' AS child"
)
return " UNION ALL ".join(selects)
| QueryResource |
python | pytorch__pytorch | test/distributed/test_device_mesh.py | {
"start": 47530,
"end": 58185
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 8
@with_comms
def test_broadcast_1d(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank
mesh_broadcast(local_tensor, mesh, mesh_dim=0)
self.assertEqual(local_tensor, torch.zeros(3, 3))
@with_comms
def test_scatter_1d(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
scatter_tensor_shape = [3, 3, 3]
for scatter_dim in range(len(scatter_tensor_shape)):
shard_placement = Shard(scatter_dim)
scatter_tensor_shape[scatter_dim] *= self.world_size
# make the random seed same across rank
torch.manual_seed(0)
global_tensor = torch.randn(scatter_tensor_shape, device=self.device_type)
splitted_list, _ = shard_placement._split_tensor(
global_tensor, mesh.size(), with_padding=True, contiguous=True
)
recv_tensor = torch.empty_like(splitted_list[mesh.get_rank()])
# scatter on dim > 0 would generate non-contiguous tensor, verify that works
mesh_scatter(recv_tensor, splitted_list, mesh, mesh_dim=0)
self.assertEqual(recv_tensor, splitted_list[mesh.get_rank()])
@with_comms
def test_scatter_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
tensor_to_split = torch.randn(
device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type
)
for shard_dim in range(tensor_to_split.ndim):
shard_placement = Shard(shard_dim)
tensor_to_scatter = tensor_to_split.clone()
tensor_splitted_list = list(
torch.chunk(tensor_to_split, self.world_size, dim=shard_dim)
)
for _ in range(self.world_size - len(tensor_splitted_list)):
tensor_splitted_list.append(torch.tensor([], device=self.device_type))
padded_tensor_list, pad_sizes = shard_placement._split_tensor(
tensor_to_scatter,
device_mesh.size(),
with_padding=True,
contiguous=True,
)
scattered_tensor = torch.empty_like(padded_tensor_list[my_rank])
mesh_scatter(scattered_tensor, padded_tensor_list, device_mesh, mesh_dim=0)
if pad_sizes[my_rank] != 0:
scattered_tensor = unpad_tensor(
scattered_tensor, shard_dim, pad_sizes[my_rank]
)
if scattered_tensor.numel() == 0:
# We need to check numel() instead of size if a tensor is ([]) after unpadding,
# since the size could be ([0, 8]) after unpadding.
self.assertEqual(
scattered_tensor.numel(), tensor_splitted_list[my_rank].numel()
)
else:
self.assertEqual(
scattered_tensor.size(), tensor_splitted_list[my_rank].size()
)
self.assertEqual(scattered_tensor, tensor_splitted_list[my_rank])
@with_comms
def test_all_gather_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
tensor_to_split = torch.ones(
device_mesh.size() + 3,
device_mesh.size() + 1,
device=self.device_type,
)
for shard_dim in range(tensor_to_split.ndim):
shard_placement = Shard(shard_dim)
tensor_padded_list, pad_sizes = shard_placement._split_tensor(
tensor_to_split,
device_mesh.size(),
with_padding=True,
contiguous=True,
)
local_tensor = tensor_padded_list[my_rank]
big_tensor = funcol.all_gather_tensor(
local_tensor, gather_dim=shard_dim, group=(device_mesh, 0)
)
big_tensor_chunks = list(
torch.chunk(big_tensor, device_mesh.size(), dim=shard_dim)
)
unpadded_list = [
(
unpad_tensor(big_tensor, shard_dim, pad_sizes[i])
if pad_sizes[i] > 0
else big_tensor
)
for i, big_tensor in enumerate(big_tensor_chunks)
]
all_gathered_tensor = torch.cat(unpadded_list, dim=shard_dim)
self.assertEqual(all_gathered_tensor.size(), tensor_to_split.size())
self.assertEqual(all_gathered_tensor, tensor_to_split)
@with_comms
def test_reduce_scatter_contiguous(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
# Init the tensor
step = self.world_size * 2
total_elem = step**2
tensor = torch.arange(0, total_elem).view(step, -1).to(device=self.device_type)
tensor = tensor * (my_rank + 1)
# Get non-contiguous tensor by slicing
tensor_to_reduce = tensor[::2, :2]
tensor_contiguous = tensor_to_reduce.clone().contiguous()
# Partial to Shard to trigger reduce_scatter
tensor_to_reduce = DTensor.from_local(
tensor_to_reduce, device_mesh, [_Partial()]
)
tensor_contiguous = DTensor.from_local(
tensor_contiguous, device_mesh, [_Partial()]
)
new_tensor = tensor_to_reduce.redistribute(device_mesh, [Shard(0)])
new_tensor_contiguous = tensor_contiguous.redistribute(device_mesh, [Shard(0)])
# The output for contiguous and non-contiguous tensors of the same value
# should return the same reducescatter value.
new_tensor_local = new_tensor._local_tensor
new_tensor_contiguous_local = new_tensor_contiguous._local_tensor
self.assertEqual(new_tensor_local, new_tensor_contiguous_local)
self.assertEqual(list(new_tensor_local.size()), [1, 2])
# Check the reduce numerical value
sum_base = (1 + self.world_size) * self.world_size / 2
first_elem = my_rank * sum_base * step * 2
expected_tensor = torch.tensor(
[[first_elem, first_elem + sum_base]],
dtype=new_tensor_local.dtype,
device=self.device_type,
)
self.assertEqual(new_tensor_local, expected_tensor)
@with_comms
def test_reduce_scatter_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
tensor_to_split = (
torch.ones(
device_mesh.size() + 3,
device_mesh.size() + 1,
device=self.device_type,
)
* self.rank
)
for shard_dim in range(tensor_to_split.ndim):
shard_placement = Shard(shard_dim)
tensor_to_scatter = tensor_to_split.clone()
tensor_splitted_list = list(
torch.chunk(tensor_to_split, self.world_size, dim=shard_dim)
)
for _ in range(self.world_size - len(tensor_splitted_list)):
tensor_splitted_list.append(torch.tensor([], device=self.device_type))
padded_tensor_list, pad_sizes = shard_placement._split_tensor(
tensor_to_scatter,
device_mesh.size(),
with_padding=True,
contiguous=True,
)
tensor_to_reduce = torch.cat(padded_tensor_list, shard_dim)
res_num = ((0 + self.world_size - 1) * self.world_size) / 2
scattered_tensor = funcol.reduce_scatter_tensor(
tensor_to_reduce,
reduceOp="sum",
scatter_dim=shard_dim,
group=(device_mesh, 0),
)
# unpad scattered_tensor
if pad_sizes[my_rank] > 0:
scattered_tensor = unpad_tensor(
scattered_tensor, shard_dim, pad_sizes[my_rank]
)
if scattered_tensor.numel() == 0:
# We need to check numel() instead of size if a tensor is ([]) after unpadding,
# since the size could be ([0, 8]) after unpadding.
self.assertEqual(
scattered_tensor.numel(), tensor_splitted_list[my_rank].numel()
)
else:
self.assertEqual(
scattered_tensor.size(), tensor_splitted_list[my_rank].size()
)
self.assertEqual(
scattered_tensor,
torch.ones_like(tensor_splitted_list[my_rank]) * res_num,
)
@with_comms
def test_broadcast_nd(self):
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank
# check all dim groups
dim_to_subgroups = mesh.get_all_groups()
for dim, dim_group in enumerate(dim_to_subgroups):
dim_group_size = get_world_size(dim_group)
global_ranks = [
get_global_rank(dim_group, i) for i in range(dim_group_size)
]
cloned_local_tensor = local_tensor.clone()
mesh_broadcast(cloned_local_tensor, mesh, mesh_dim=dim)
res_num = global_ranks[0]
self.assertEqual(cloned_local_tensor, torch.ones(3, 3) * res_num)
@with_comms
def test_scatter_nd(self):
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
# check all dim groups
dim_to_subgroups = mesh.get_all_groups()
for dim, dim_group in enumerate(dim_to_subgroups):
dim_group_size = get_world_size(dim_group)
global_ranks = [
get_global_rank(dim_group, i) for i in range(dim_group_size)
]
scattered_tensors = [
torch.ones(3, 3, device=self.device_type) * global_rank
for global_rank in global_ranks
]
received_tensor = torch.empty_like(
scattered_tensors[mesh.get_coordinate()[dim]]
)
mesh_scatter(received_tensor, scattered_tensors, mesh, mesh_dim=dim)
self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank)
| DeviceMeshCollectiveTest |
python | jazzband__django-oauth-toolkit | tests/test_scopes.py | {
"start": 771,
"end": 952
} | class ____(ScopedProtectedResourceView):
required_scopes = ["scope1"]
def get(self, request, *args, **kwargs):
return "This is a protected resource"
| ScopeResourceView |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/base.py | {
"start": 811,
"end": 9889
} | class ____(BaseCallbackHandler, ABC):
"""
Callback manager that handles callbacks for events within LlamaIndex.
The callback manager provides a way to call handlers on event starts/ends.
Additionally, the callback manager traces the current stack of events.
It does this by using a few key attributes.
- trace_stack - The current stack of events that have not ended yet.
When an event ends, it's removed from the stack.
Since this is a contextvar, it is unique to each
thread/task.
- trace_map - A mapping of event ids to their children events.
On the start of events, the bottom of the trace stack
is used as the current parent event for the trace map.
- trace_id - A simple name for the current trace, usually denoting the
entrypoint (query, index_construction, insert, etc.)
Args:
handlers (List[BaseCallbackHandler]): list of handlers to use.
Usage:
with callback_manager.event(CBEventType.QUERY) as event:
event.on_start(payload={key, val})
...
event.on_end(payload={key, val})
"""
def __init__(self, handlers: Optional[List[BaseCallbackHandler]] = None):
"""Initialize the manager with a list of handlers."""
from llama_index.core import global_handler
handlers = handlers or []
# add eval handlers based on global defaults
if global_handler is not None:
new_handler = global_handler
# go through existing handlers, check if any are same type as new handler
# if so, error
for existing_handler in handlers:
if isinstance(existing_handler, type(new_handler)):
raise ValueError(
"Cannot add two handlers of the same type "
f"{type(new_handler)} to the callback manager."
)
handlers.append(new_handler)
# if we passed in no handlers, use the global default
if len(handlers) == 0:
from llama_index.core.settings import Settings
# hidden var access to prevent recursion in getter
cb_manager = Settings._callback_manager
if cb_manager is not None:
handlers = cb_manager.handlers
self.handlers: List[BaseCallbackHandler] = handlers
self._trace_map: Dict[str, List[str]] = defaultdict(list)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: Optional[str] = None,
parent_id: Optional[str] = None,
**kwargs: Any,
) -> str:
"""Run handlers when an event starts and return id of event."""
event_id = event_id or str(uuid.uuid4())
# if no trace is running, start a default trace
try:
parent_id = parent_id or global_stack_trace.get()[-1]
except IndexError:
self.start_trace("llama-index")
parent_id = global_stack_trace.get()[-1]
parent_id = cast(str, parent_id)
self._trace_map[parent_id].append(event_id)
for handler in self.handlers:
if event_type not in handler.event_starts_to_ignore:
handler.on_event_start(
event_type,
payload,
event_id=event_id,
parent_id=parent_id,
**kwargs,
)
if event_type not in LEAF_EVENTS:
# copy the stack trace to prevent conflicts with threads/coroutines
current_trace_stack = global_stack_trace.get().copy()
current_trace_stack.append(event_id)
global_stack_trace.set(current_trace_stack)
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Run handlers when an event ends."""
event_id = event_id or str(uuid.uuid4())
for handler in self.handlers:
if event_type not in handler.event_ends_to_ignore:
handler.on_event_end(event_type, payload, event_id=event_id, **kwargs)
if event_type not in LEAF_EVENTS:
# copy the stack trace to prevent conflicts with threads/coroutines
current_trace_stack = global_stack_trace.get().copy()
current_trace_stack.pop()
global_stack_trace.set(current_trace_stack)
def add_handler(self, handler: BaseCallbackHandler) -> None:
"""Add a handler to the callback manager."""
self.handlers.append(handler)
def remove_handler(self, handler: BaseCallbackHandler) -> None:
"""Remove a handler from the callback manager."""
self.handlers.remove(handler)
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
"""Set handlers as the only handlers on the callback manager."""
self.handlers = handlers
@contextmanager
def event(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: Optional[str] = None,
) -> Generator["EventContext", None, None]:
"""
Context manager for lanching and shutdown of events.
Handles sending on_evnt_start and on_event_end to handlers for specified event.
Usage:
with callback_manager.event(CBEventType.QUERY, payload={key, val}) as event:
...
event.on_end(payload={key, val}) # optional
"""
# create event context wrapper
event = EventContext(self, event_type, event_id=event_id)
event.on_start(payload=payload)
payload = None
try:
yield event
except Exception as e:
# data already logged to trace?
if not hasattr(e, "event_added"):
payload = {EventPayload.EXCEPTION: e}
e.event_added = True # type: ignore
if not event.finished:
event.on_end(payload=payload)
raise
finally:
# ensure event is ended
if not event.finished:
event.on_end(payload=payload)
@contextmanager
def as_trace(self, trace_id: str) -> Generator[None, None, None]:
"""Context manager tracer for lanching and shutdown of traces."""
self.start_trace(trace_id=trace_id)
try:
yield
except Exception as e:
# event already added to trace?
if not hasattr(e, "event_added"):
self.on_event_start(
CBEventType.EXCEPTION, payload={EventPayload.EXCEPTION: e}
)
e.event_added = True # type: ignore
raise
finally:
# ensure trace is ended
self.end_trace(trace_id=trace_id)
def start_trace(self, trace_id: Optional[str] = None) -> None:
"""Run when an overall trace is launched."""
current_trace_stack_ids = global_stack_trace_ids.get().copy()
if trace_id is not None:
if len(current_trace_stack_ids) == 0:
self._reset_trace_events()
for handler in self.handlers:
handler.start_trace(trace_id=trace_id)
current_trace_stack_ids = [trace_id]
else:
current_trace_stack_ids.append(trace_id)
global_stack_trace_ids.set(current_trace_stack_ids)
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
"""Run when an overall trace is exited."""
current_trace_stack_ids = global_stack_trace_ids.get().copy()
if trace_id is not None and len(current_trace_stack_ids) > 0:
current_trace_stack_ids.pop()
if len(current_trace_stack_ids) == 0:
for handler in self.handlers:
handler.end_trace(trace_id=trace_id, trace_map=self._trace_map)
current_trace_stack_ids = []
global_stack_trace_ids.set(current_trace_stack_ids)
def _reset_trace_events(self) -> None:
"""Helper function to reset the current trace."""
self._trace_map = defaultdict(list)
global_stack_trace.set([BASE_TRACE_EVENT])
@property
def trace_map(self) -> Dict[str, List[str]]:
return self._trace_map
@classmethod
def __get_pydantic_core_schema__(
cls, source: Type[Any], handler: GetCoreSchemaHandler
) -> CoreSchema:
return core_schema.any_schema()
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> Dict[str, Any]:
json_schema = handler(core_schema)
return handler.resolve_ref_schema(json_schema)
| CallbackManager |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 232177,
"end": 240099
} | class ____:
def test_invalid_init(self):
with pytest.raises(TypeError):
x509.ProfessionInfo(
None,
None, # type:ignore[arg-type]
None,
None,
None,
)
with pytest.raises(TypeError):
x509.ProfessionInfo(
"spam", # type:ignore[arg-type]
[],
[],
None,
None,
)
with pytest.raises(TypeError):
x509.ProfessionInfo(
None,
[42], # type:ignore[list-item]
[],
None,
None,
)
with pytest.raises(TypeError):
x509.ProfessionInfo(
None,
[],
"spam", # type:ignore[arg-type]
None,
None,
)
with pytest.raises(TypeError):
x509.ProfessionInfo(
None,
[],
[],
42, # type:ignore[arg-type]
None,
)
with pytest.raises(TypeError):
x509.ProfessionInfo(
None,
[],
[],
None,
42, # type:ignore[arg-type]
)
def test_eq(self):
info1 = x509.ProfessionInfo(None, [], [], None, None)
info2 = x509.ProfessionInfo(None, [], [], None, None)
assert info1 == info2
info1 = x509.ProfessionInfo(None, [], None, None, None)
info2 = x509.ProfessionInfo(None, [], None, None, None)
assert info1 == info2
info1 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info2 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
assert info1 == info2
def test_ne(self):
info1 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info2 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
None,
)
info3 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
None,
None,
)
info4 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[],
None,
None,
)
info5 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[],
[],
None,
None,
)
info6 = x509.ProfessionInfo(None, ["spam"], [], None, None)
info7 = x509.ProfessionInfo(
None, [], [x509.ObjectIdentifier("1.2.3")], None, None
)
info8 = x509.ProfessionInfo(None, [], [], "spam", None)
info9 = x509.ProfessionInfo(None, [], [], None, b"\x01\x02\x03")
info10 = x509.ProfessionInfo(None, [], [], None, None)
info11 = x509.ProfessionInfo(None, [], None, None, None)
assert info1 != info2
assert info1 != info2
assert info1 != info3
assert info1 != info4
assert info1 != info5
assert info1 != info6
assert info1 != info7
assert info1 != info8
assert info1 != info9
assert info1 != info10
assert info1 != info11
assert info1 != object()
def test_repr(self):
info = x509.ProfessionInfo(None, [], [], None, None)
assert repr(info) == (
"<ProfessionInfo("
"naming_authority=None, "
"profession_items=[], "
"profession_oids=[], "
"registration_number=None, "
"add_profession_info=None)>"
)
info = x509.ProfessionInfo(None, [], None, None, None)
assert repr(info) == (
"<ProfessionInfo("
"naming_authority=None, "
"profession_items=[], "
"profession_oids=None, "
"registration_number=None, "
"add_profession_info=None)>"
)
info = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
assert repr(info) == (
"<ProfessionInfo("
"naming_authority=<NamingAuthority("
"id=<ObjectIdentifier(oid=1.2.3, name=Unknown OID)>, "
"url=https://example.com, text=spam)>, "
"profession_items=['spam'], "
"profession_oids="
"[<ObjectIdentifier(oid=1.2.3.4, name=Unknown OID)>], "
"registration_number=eggs, "
"add_profession_info=b'\\x01\\x02\\x03')>"
)
def test_hash(self):
info1 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info2 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info3 = x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
["spam"],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info4 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None),
[],
[x509.ObjectIdentifier("1.2.3.4")],
"eggs",
b"\x01\x02\x03",
)
info5 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None),
[],
[],
"eggs",
b"\x01\x02\x03",
)
info6 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None),
[],
[],
None,
b"\x01\x02\x03",
)
info7 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None), [], [], None, None
)
info8 = x509.ProfessionInfo(
x509.NamingAuthority(None, None, None), [], None, None, None
)
info9 = x509.ProfessionInfo(None, [], None, None, None)
assert hash(info1) == hash(info2)
assert hash(info1) != hash(info3)
assert hash(info1) != hash(info4)
assert hash(info1) != hash(info5)
assert hash(info1) != hash(info6)
assert hash(info1) != hash(info7)
assert hash(info1) != hash(info8)
assert hash(info1) != hash(info9)
| TestProfessionInfo |
python | django__django | django/test/utils.py | {
"start": 1526,
"end": 1806
} | class ____:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
| Approximate |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-cohere-citation-chat/llama_index/packs/cohere_citation_chat/types.py | {
"start": 86,
"end": 232
} | class ____:
"""Citation object."""
text: str
start: int
end: int
document_ids: List[str]
dict = asdict
@dataclass
| Citation |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/endpoints/multiapi/main.py | {
"start": 800,
"end": 1021
} | class ____(messages.Message):
message = messages.StringField(1)
# [START endpoints_multiclass]
api_collection = endpoints.api(name="library", version="v1.0")
@api_collection.api_class(resource_name="shelves")
| Response |
python | kubernetes-client__python | kubernetes/client/models/v1_param_ref.py | {
"start": 383,
"end": 8716
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'namespace': 'str',
'parameter_not_found_action': 'str',
'selector': 'V1LabelSelector'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'parameter_not_found_action': 'parameterNotFoundAction',
'selector': 'selector'
}
def __init__(self, name=None, namespace=None, parameter_not_found_action=None, selector=None, local_vars_configuration=None): # noqa: E501
"""V1ParamRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._namespace = None
self._parameter_not_found_action = None
self._selector = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if parameter_not_found_action is not None:
self.parameter_not_found_action = parameter_not_found_action
if selector is not None:
self.selector = selector
@property
def name(self):
"""Gets the name of this V1ParamRef. # noqa: E501
name is the name of the resource being referenced. One of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. A single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped. # noqa: E501
:return: The name of this V1ParamRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ParamRef.
name is the name of the resource being referenced. One of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. A single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped. # noqa: E501
:param name: The name of this V1ParamRef. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ParamRef. # noqa: E501
namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
:return: The namespace of this V1ParamRef. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ParamRef.
namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
:param namespace: The namespace of this V1ParamRef. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def parameter_not_found_action(self):
"""Gets the parameter_not_found_action of this V1ParamRef. # noqa: E501
`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Required # noqa: E501
:return: The parameter_not_found_action of this V1ParamRef. # noqa: E501
:rtype: str
"""
return self._parameter_not_found_action
@parameter_not_found_action.setter
def parameter_not_found_action(self, parameter_not_found_action):
"""Sets the parameter_not_found_action of this V1ParamRef.
`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Required # noqa: E501
:param parameter_not_found_action: The parameter_not_found_action of this V1ParamRef. # noqa: E501
:type: str
"""
self._parameter_not_found_action = parameter_not_found_action
@property
def selector(self):
"""Gets the selector of this V1ParamRef. # noqa: E501
:return: The selector of this V1ParamRef. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1ParamRef.
:param selector: The selector of this V1ParamRef. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ParamRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ParamRef):
return True
return self.to_dict() != other.to_dict()
| V1ParamRef |
python | numba__numba | numba/cuda/tests/cudapy/test_warp_ops.py | {
"start": 2357,
"end": 9042
} | class ____(CUDATestCase):
def test_useful_syncwarp(self):
compiled = cuda.jit("void(int32[:])")(useful_syncwarp)
nelem = 32
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == 42))
def test_shfl_sync_idx(self):
compiled = cuda.jit("void(int32[:], int32)")(use_shfl_sync_idx)
nelem = 32
idx = 4
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary, idx)
self.assertTrue(np.all(ary == idx))
def test_shfl_sync_up(self):
compiled = cuda.jit("void(int32[:], int32)")(use_shfl_sync_up)
nelem = 32
delta = 4
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
exp[delta:] -= delta
compiled[1, nelem](ary, delta)
self.assertTrue(np.all(ary == exp))
def test_shfl_sync_down(self):
compiled = cuda.jit("void(int32[:], int32)")(use_shfl_sync_down)
nelem = 32
delta = 4
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
exp[:-delta] += delta
compiled[1, nelem](ary, delta)
self.assertTrue(np.all(ary == exp))
def test_shfl_sync_xor(self):
compiled = cuda.jit("void(int32[:], int32)")(use_shfl_sync_xor)
nelem = 32
xor = 16
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32) ^ xor
compiled[1, nelem](ary, xor)
self.assertTrue(np.all(ary == exp))
def test_shfl_sync_types(self):
types = int32, int64, float32, float64
values = (np.int32(-1), np.int64(1 << 42),
np.float32(np.pi), np.float64(np.pi))
for typ, val in zip(types, values):
compiled = cuda.jit((typ[:], typ))(use_shfl_sync_with_val)
nelem = 32
ary = np.empty(nelem, dtype=val.dtype)
compiled[1, nelem](ary, val)
self.assertTrue(np.all(ary == val))
def test_vote_sync_all(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_vote_sync_all)
nelem = 32
ary_in = np.ones(nelem, dtype=np.int32)
ary_out = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
ary_in[-1] = 0
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
def test_vote_sync_any(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_vote_sync_any)
nelem = 32
ary_in = np.zeros(nelem, dtype=np.int32)
ary_out = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
ary_in[2] = 1
ary_in[5] = 1
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
def test_vote_sync_eq(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_vote_sync_eq)
nelem = 32
ary_in = np.zeros(nelem, dtype=np.int32)
ary_out = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
ary_in[1] = 1
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
ary_in[:] = 1
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
def test_vote_sync_ballot(self):
compiled = cuda.jit("void(uint32[:])")(use_vote_sync_ballot)
nelem = 32
ary = np.empty(nelem, dtype=np.uint32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.uint32(0xffffffff)))
@unittest.skipUnless(_safe_cc_check((7, 0)),
"Matching requires at least Volta Architecture")
def test_match_any_sync(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_match_any_sync)
nelem = 10
ary_in = np.arange(nelem, dtype=np.int32) % 2
ary_out = np.empty(nelem, dtype=np.int32)
exp = np.tile((0b0101010101, 0b1010101010), 5)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == exp))
@unittest.skipUnless(_safe_cc_check((7, 0)),
"Matching requires at least Volta Architecture")
def test_match_all_sync(self):
compiled = cuda.jit("void(int32[:], int32[:])")(use_match_all_sync)
nelem = 10
ary_in = np.zeros(nelem, dtype=np.int32)
ary_out = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0b1111111111))
ary_in[1] = 4
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
@unittest.skipUnless(_safe_cc_check((7, 0)),
"Independent scheduling requires at least Volta "
"Architecture")
def test_independent_scheduling(self):
compiled = cuda.jit("void(uint32[:])")(use_independent_scheduling)
arr = np.empty(32, dtype=np.uint32)
exp = np.tile((0x11111111, 0x22222222, 0x44444444, 0x88888888), 8)
compiled[1, 32](arr)
self.assertTrue(np.all(arr == exp))
def test_activemask(self):
@cuda.jit
def use_activemask(x):
i = cuda.grid(1)
if (i % 2) == 0:
# Even numbered threads fill in even numbered array entries
# with binary "...01010101"
x[i] = cuda.activemask()
else:
# Odd numbered threads fill in odd numbered array entries
# with binary "...10101010"
x[i] = cuda.activemask()
out = np.zeros(32, dtype=np.uint32)
use_activemask[1, 32](out)
# 0x5 = 0101: The pattern from even-numbered threads
# 0xA = 1010: The pattern from odd-numbered threads
expected = np.tile((0x55555555, 0xAAAAAAAA), 16)
np.testing.assert_equal(expected, out)
def test_lanemask_lt(self):
@cuda.jit
def use_lanemask_lt(x):
i = cuda.grid(1)
x[i] = cuda.lanemask_lt()
out = np.zeros(32, dtype=np.uint32)
use_lanemask_lt[1, 32](out)
# A string of 1s that grows from the LSB for each entry:
# 0, 1, 3, 7, F, 1F, 3F, 7F, FF, 1FF, etc.
# or in binary:
# ...0001, ....0011, ...0111, etc.
expected = np.asarray([(2 ** i) - 1 for i in range(32)],
dtype=np.uint32)
np.testing.assert_equal(expected, out)
if __name__ == '__main__':
unittest.main()
| TestCudaWarpOperations |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/visitor.py | {
"start": 102,
"end": 258
} | class ____(object):
def __nonzero__(self):
return False
def __bool__(self):
return False
BREAK = object()
REMOVE = Falsey()
| Falsey |
python | nedbat__coveragepy | coverage/parser.py | {
"start": 22327,
"end": 24078
} | class ____(Block):
"""A block on the block stack representing a `try` block."""
def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None:
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
self.final_start = final_start
def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.handler_start is not None:
for xit in exits:
add_arc(xit.lineno, self.handler_start, xit.cause)
return True
# TODO: Shouldn't the cause messages join with "and" instead of "or"?
def is_constant_test_expr(node: ast.AST) -> tuple[bool, bool]:
"""Is this a compile-time constant test expression?
We don't try to mimic all of CPython's optimizations. We just have to
handle the kinds of constant expressions people might actually use.
"""
match node:
case ast.Constant():
return True, bool(node.value)
case ast.Name():
if node.id in ["True", "False", "None", "__debug__"]:
return True, eval(node.id) # pylint: disable=eval-used
case ast.UnaryOp():
if isinstance(node.op, ast.Not):
is_constant, val = is_constant_test_expr(node.operand)
return is_constant, not val
case ast.BoolOp():
rets = [is_constant_test_expr(v) for v in node.values]
is_constant = all(is_const for is_const, _ in rets)
if is_constant:
op = any if isinstance(node.op, ast.Or) else all
return True, op(v for _, v in rets)
return False, False
| TryBlock |
python | numpy__numpy | numpy/polynomial/tests/test_polynomial.py | {
"start": 10271,
"end": 13945
} | class ____:
def test_polyint(self):
# check exceptions
assert_raises(TypeError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
assert_raises(ValueError, poly.polyint, [0], lbnd=[0])
assert_raises(ValueError, poly.polyint, [0], scl=[0])
assert_raises(TypeError, poly.polyint, [0], axis=.5)
assert_raises(TypeError, poly.polyint, [1, 1], 1.)
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = poly.polyint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
res = poly.polyint(pol, m=1, k=[i])
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
assert_almost_equal(poly.polyval(-1, res), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
res = poly.polyint(pol, m=1, k=[i], scl=2)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1)
res = poly.polyint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k])
res = poly.polyint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T
res = poly.polyint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c) for c in c2d])
res = poly.polyint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])
res = poly.polyint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
| TestIntegral |
python | doocs__leetcode | solution/2800-2899/2811.Check if it is Possible to Split Array/Solution.py | {
"start": 0,
"end": 530
} | class ____:
def canSplitArray(self, nums: List[int], m: int) -> bool:
@cache
def dfs(i: int, j: int) -> bool:
if i == j:
return True
for k in range(i, j):
a = k == i or s[k + 1] - s[i] >= m
b = k == j - 1 or s[j + 1] - s[k + 1] >= m
if a and b and dfs(i, k) and dfs(k + 1, j):
return True
return False
s = list(accumulate(nums, initial=0))
return dfs(0, len(nums) - 1)
| Solution |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 27643,
"end": 30955
} | class ____(testing.TestCase):
def setup(self):
y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_true = y_true
self.y_pred = y_pred
def test_config(self):
logcosh_obj = losses.LogCosh(reduction="sum", name="logcosh_loss")
self.assertEqual(logcosh_obj.name, "logcosh_loss")
self.assertEqual(logcosh_obj.reduction, "sum")
config = logcosh_obj.get_config()
self.assertEqual(config, {"name": "logcosh_loss", "reduction": "sum"})
def test_unweighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = np.asarray([1.2, 3.4])
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, 0.0, 3)
def test_dtype_arg(self):
self.setup()
logcosh_obj = losses.LogCosh(dtype="bfloat16")
loss = logcosh_obj(self.y_true, self.y_pred)
self.assertDType(loss, "bfloat16")
| LogCoshTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format27.py | {
"start": 315,
"end": 1981
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format27.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [108645376, 108655360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"trendline": {
"type": "polynomial",
"name": "My trend name",
"order": 2,
"forward": 0.5,
"backward": 0.5,
"display_equation": True,
"line": {
"color": "red",
"width": 1,
"dash_type": "long_dash",
},
},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 3055,
"end": 3127
} | class ____(DynamicApiError):
""" 403: StatusForbidden """
| ForbiddenError |
python | celery__celery | t/unit/app/test_utils.py | {
"start": 159,
"end": 1099
} | class ____:
def test_is_mapping(self):
"""Settings should be a collections.Mapping"""
assert issubclass(Settings, Mapping)
def test_is_mutable_mapping(self):
"""Settings should be a collections.MutableMapping"""
assert issubclass(Settings, MutableMapping)
def test_find(self):
assert self.app.conf.find_option('always_eager')
def test_get_by_parts(self):
self.app.conf.task_do_this_and_that = 303
assert self.app.conf.get_by_parts(
'task', 'do', 'this', 'and', 'that') == 303
def test_find_value_for_key(self):
assert self.app.conf.find_value_for_key(
'always_eager') is False
def test_table(self):
assert self.app.conf.table(with_defaults=True)
assert self.app.conf.table(with_defaults=False)
assert self.app.conf.table(censored=False)
assert self.app.conf.table(censored=True)
| test_Settings |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 65234,
"end": 65412
} | class ____(Conjugate):
pass
@keras_export(["keras.ops.conj", "keras.ops.numpy.conj"])
def conj(x):
"""Shorthand for `keras.ops.conjugate`."""
return conjugate(x)
| Conj |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 12427,
"end": 13462
} | class ____(Metafield):
"""
{
customers(query: "updated_at:>='2023-02-07T00:00:00+00:00' AND updated_at:<='2023-12-04T00:00:00+00:00'", sortKey: UPDATED_AT) {
edges {
node {
__typename
id
customer_updated_at: updatedAt
metafields {
edges {
node {
id
namespace
value
key
description
createdAt
updatedAt
type
}
}
}
}
}
}
}
"""
type = MetafieldType.CUSTOMERS
record_composition = {
"new_record": "Customer",
"record_components": ["Metafield"],
}
| MetafieldCustomer |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_numeric.py | {
"start": 45974,
"end": 56213
} | class ____:
# Tests in this class have been moved from type-specific test modules
# but not yet sorted, parametrized, and de-duplicated
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
],
)
@pytest.mark.parametrize(
"idx1",
[
RangeIndex(0, 10, 1),
RangeIndex(0, 20, 2),
RangeIndex(-10, 10, 2),
RangeIndex(5, -5, -1),
],
)
@pytest.mark.parametrize(
"idx2",
[
RangeIndex(0, 10, 1),
RangeIndex(0, 20, 2),
RangeIndex(-10, 10, 2),
RangeIndex(5, -5, -1),
],
)
def test_binops_index(self, op, idx1, idx2):
idx1 = idx1._rename("foo")
idx2 = idx2._rename("bar")
result = op(idx1, idx2)
expected = op(Index(idx1.to_numpy()), Index(idx2.to_numpy()))
tm.assert_index_equal(result, expected, exact="equiv")
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
],
)
@pytest.mark.parametrize(
"idx",
[
RangeIndex(0, 10, 1),
RangeIndex(0, 20, 2),
RangeIndex(-10, 10, 2),
RangeIndex(5, -5, -1),
],
)
@pytest.mark.parametrize("scalar", [-1, 1, 2])
def test_binops_index_scalar(self, op, idx, scalar):
result = op(idx, scalar)
expected = op(Index(idx.to_numpy()), scalar)
tm.assert_index_equal(result, expected, exact="equiv")
@pytest.mark.parametrize("idx1", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])
@pytest.mark.parametrize("idx2", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])
def test_binops_index_pow(self, idx1, idx2):
# numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
idx1 = idx1._rename("foo")
idx2 = idx2._rename("bar")
result = pow(idx1, idx2)
expected = pow(Index(idx1.to_numpy()), Index(idx2.to_numpy()))
tm.assert_index_equal(result, expected, exact="equiv")
@pytest.mark.parametrize("idx", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])
@pytest.mark.parametrize("scalar", [1, 2])
def test_binops_index_scalar_pow(self, idx, scalar):
# numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
result = pow(idx, scalar)
expected = pow(Index(idx.to_numpy()), scalar)
tm.assert_index_equal(result, expected, exact="equiv")
# TODO: divmod?
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.pow,
operator.mod,
],
)
def test_arithmetic_with_frame_or_series(self, op):
# check that we return NotImplemented when operating with Series
# or DataFrame
index = RangeIndex(5)
other = Series(np.random.default_rng(2).standard_normal(5))
expected = op(Series(index), other)
result = op(index, other)
tm.assert_series_equal(result, expected)
other = pd.DataFrame(np.random.default_rng(2).standard_normal((2, 5)))
expected = op(pd.DataFrame([index, index]), other)
result = op(index, other)
tm.assert_frame_equal(result, expected)
def test_numeric_compat2(self):
# validate that we are handling the RangeIndex overrides to numeric ops
# and returning RangeIndex where possible
idx = RangeIndex(0, 10, 2)
result = idx * 2
expected = RangeIndex(0, 20, 4)
tm.assert_index_equal(result, expected, exact=True)
result = idx + 2
expected = RangeIndex(2, 12, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx - 2
expected = RangeIndex(-2, 8, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx / 2
expected = RangeIndex(0, 5, 1).astype("float64")
tm.assert_index_equal(result, expected, exact=True)
result = idx / 4
expected = RangeIndex(0, 10, 2) / 4
tm.assert_index_equal(result, expected, exact=True)
result = idx // 1
expected = idx
tm.assert_index_equal(result, expected, exact=True)
# __mul__
result = idx * idx
expected = Index(idx.values * idx.values)
tm.assert_index_equal(result, expected, exact=True)
# __pow__
idx = RangeIndex(0, 1000, 2)
result = idx**2
expected = Index(idx._values) ** 2
tm.assert_index_equal(Index(result.values), expected, exact=True)
@pytest.mark.parametrize(
"idx, div, expected",
[
# TODO: add more dtypes
(RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)),
(RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)),
(
RangeIndex(0, 1000, 1),
2,
Index(RangeIndex(0, 1000, 1)._values) // 2,
),
(
RangeIndex(0, 100, 1),
2.0,
Index(RangeIndex(0, 100, 1)._values) // 2.0,
),
(RangeIndex(0), 50, RangeIndex(0)),
(RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)),
(RangeIndex(-5, -10, -6), 4, RangeIndex(-2, -1, 1)),
(RangeIndex(-100, -200, 3), 2, RangeIndex(0)),
],
)
def test_numeric_compat2_floordiv(self, idx, div, expected):
# __floordiv__
tm.assert_index_equal(idx // div, expected, exact=True)
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("delta", [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH#8142
delta = dtype(delta)
index = Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
def test_pow_nan_with_zero(self, box_with_array):
left = Index([np.nan, np.nan, np.nan])
right = Index([0, 0, 0])
expected = Index([1.0, 1.0, 1.0])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = left**right
tm.assert_equal(result, expected)
def test_fill_value_inf_masking():
# GH #27464 make sure we mask 0/1 with Inf and not NaN
df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]})
other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3])
result = df.rfloordiv(other, fill_value=1)
expected = pd.DataFrame(
{"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]}
)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_dataframe_div_silenced():
# GH#26793
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
index=list("abcdefghjk"),
columns=list("ABCX"),
)
with tm.assert_produces_warning(None):
pdf1.div(pdf2, fill_value=0)
@pytest.mark.parametrize(
"data, expected_data",
[([0, 1, 2], [0, 2, 4])],
)
@pytest.mark.parametrize("box_pandas_1d_array", [Index, Series, tm.to_array])
@pytest.mark.parametrize("box_1d_array", [Index, Series, tm.to_array, np.array, list])
def test_integer_array_add_list_like(
box_pandas_1d_array, box_1d_array, data, expected_data
):
# GH22606 Verify operators with IntegerArray and list-likes
arr = array(data, dtype="Int64")
container = box_pandas_1d_array(arr)
left = container + box_1d_array(data)
right = box_1d_array(data) + container
if Series in [box_1d_array, box_pandas_1d_array]:
cls = Series
elif Index in [box_1d_array, box_pandas_1d_array]:
cls = Index
else:
cls = array
expected = cls(expected_data, dtype="Int64")
tm.assert_equal(left, expected)
tm.assert_equal(right, expected)
def test_sub_multiindex_swapped_levels():
# GH 9952
df = pd.DataFrame(
{"a": np.random.default_rng(2).standard_normal(6)},
index=pd.MultiIndex.from_product(
[["a", "b"], [0, 1, 2]], names=["levA", "levB"]
),
)
df2 = df.copy()
df2.index = df2.index.swaplevel(0, 1)
result = df - df2
expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("power", [1, 2, 5])
@pytest.mark.parametrize("string_size", [0, 1, 2, 5])
def test_empty_str_comparison(power, string_size):
# GH 37348
a = np.array(range(10**power))
right = pd.DataFrame(a, dtype=np.int64)
left = " " * string_size
result = right == left
expected = pd.DataFrame(np.zeros(right.shape, dtype=bool))
tm.assert_frame_equal(result, expected)
def test_series_add_sub_with_UInt64():
# GH 22023
series1 = Series([1, 2, 3])
series2 = Series([2, 1, 3], dtype="UInt64")
result = series1 + series2
expected = Series([3, 3, 6], dtype="Float64")
tm.assert_series_equal(result, expected)
result = series1 - series2
expected = Series([-1, 1, 0], dtype="Float64")
tm.assert_series_equal(result, expected)
| TestNumericArithmeticUnsorted |
python | doocs__leetcode | solution/1700-1799/1733.Minimum Number of People to Teach/Solution.py | {
"start": 0,
"end": 650
} | class ____:
def minimumTeachings(
self, n: int, languages: List[List[int]], friendships: List[List[int]]
) -> int:
def check(u: int, v: int) -> bool:
for x in languages[u - 1]:
for y in languages[v - 1]:
if x == y:
return True
return False
s = set()
for u, v in friendships:
if not check(u, v):
s.add(u)
s.add(v)
cnt = Counter()
for u in s:
for l in languages[u - 1]:
cnt[l] += 1
return len(s) - max(cnt.values(), default=0)
| Solution |
python | pytorch__pytorch | torch/distributions/relaxed_bernoulli.py | {
"start": 4255,
"end": 6151
} | class ____(TransformedDistribution):
r"""
Creates a RelaxedBernoulli distribution, parametrized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits`
(but not both). This is a relaxed version of the `Bernoulli` distribution,
so the values are in (0, 1), and has reparametrizable samples.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = RelaxedBernoulli(torch.tensor([2.2]),
... torch.tensor([0.1, 0.2, 0.3, 0.99]))
>>> m.sample()
tensor([ 0.2951, 0.3442, 0.8918, 0.9021])
Args:
temperature (Tensor): relaxation temperature
probs (Number, Tensor): the probability of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
"""
arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
# pyrefly: ignore [bad-override]
support = constraints.unit_interval
has_rsample = True
# pyrefly: ignore [bad-override]
base_dist: LogitRelaxedBernoulli
def __init__(
self,
temperature: Tensor,
probs: Optional[Union[Tensor, Number]] = None,
logits: Optional[Union[Tensor, Number]] = None,
validate_args: Optional[bool] = None,
) -> None:
base_dist = LogitRelaxedBernoulli(temperature, probs, logits)
super().__init__(base_dist, SigmoidTransform(), validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(RelaxedBernoulli, _instance)
return super().expand(batch_shape, _instance=new)
@property
def temperature(self) -> Tensor:
return self.base_dist.temperature
@property
def logits(self) -> Tensor:
return self.base_dist.logits
@property
def probs(self) -> Tensor:
return self.base_dist.probs
| RelaxedBernoulli |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/definitions_load_context.py | {
"start": 11312,
"end": 14637
} | class ____(ABC, Generic[TState]):
"""A class for building state-backed definitions. In a structured way. It
handles manipulation of the DefinitionsLoadContext on the user's behalf.
The goal here is so that an unreliable backing source is fetched only
at code server load time, which defines the code location. When, for example,
a run worker is launched, the backing source is not queryed again, and
only `defs_from_state` is called.
Current meant for internal usage only hence TState must be a marked as whitelist_for_serdes.
Args:
defs_key (str): The unique key for the definitions. Must be unique per code location.
Examples:
.. code-block:: python
@whitelist_for_serdes
@record
class ExampleDefState:
a_string: str
class ExampleStateBackedDefinitionsLoader(StateBackedDefinitionsLoader[ExampleDefState]):
def fetch_state(self) -> ExampleDefState:
# Fetch from potentially unreliable API (e.g. Rest API)
return ExampleDefState(a_string="foo")
def defs_from_state(self, state: ExampleDefState) -> Definitions:
# Construct or reconstruct the Definitions from the previously
# fetched state.
return Definitions([AssetSpec(key=state.a_string)])
"""
@property
@abstractmethod
def defs_key(self) -> str:
"""The unique key for the definitions. Must be unique per code location."""
...
@abstractmethod
def fetch_state(self) -> TState:
"""Subclasses must implement this method. This is where the integration runs
code that fetches the backing state from the source of truth for the definitions.
This is only called when the code location is initializing, for example on
code server load, or when loading via dagster dev.
"""
...
@abstractmethod
def defs_from_state(self, state: TState) -> Definitions:
"""Subclasses must implement this method. It is invoked whenever the code location
is loading, whether it be initializaton or reconstruction. In the case of
intialization, it takes the result of fetch_backing state that just happened.
When reconstructing, it takes the state that was previously fetched and attached
as metadata.
This method also takes responsibility for attaching the state to the definitions
on its metadata, with the key passed in as defs_key.
"""
...
def get_or_fetch_state(self) -> TState:
context = DefinitionsLoadContext.get()
state = (
cast("TState", deserialize_value(context.reconstruction_metadata[self.defs_key]))
if (
context.load_type == DefinitionsLoadType.RECONSTRUCTION
and self.defs_key in context.reconstruction_metadata
)
else self.fetch_state()
)
context.add_to_pending_reconstruction_metadata(self.defs_key, serialize_value(state))
return state
def build_defs(self) -> Definitions:
state = self.get_or_fetch_state()
return self.defs_from_state(state).with_reconstruction_metadata(
{self.defs_key: serialize_value(state)}
)
| StateBackedDefinitionsLoader |
python | jupyterlab__jupyterlab | jupyterlab/extensions/pypi.py | {
"start": 1000,
"end": 3491
} | class ____(xmlrpc.client.Transport):
def set_proxy(self, host, port=None, headers=None):
self.proxy = host, port
self.proxy_headers = headers
def make_connection(self, host):
connection = http.client.HTTPConnection(*self.proxy)
connection.set_tunnel(host, headers=self.proxy_headers)
self._connection = host, connection
return connection
all_proxy_url = environ.get("ALL_PROXY")
# For historical reasons, we also support the lowercase environment variables.
# Info: https://about.gitlab.com/blog/2021/01/27/we-need-to-talk-no-proxy/
http_proxy_url = environ.get("http_proxy") or environ.get("HTTP_PROXY") or all_proxy_url
https_proxy_url = (
environ.get("https_proxy") or environ.get("HTTPS_PROXY") or http_proxy_url or all_proxy_url
)
# sniff ``httpx`` version for version-sensitive API
_httpx_version = Version(httpx.__version__)
_httpx_client_args = {}
xmlrpc_transport_override = None
if http_proxy_url:
http_proxy = urlparse(http_proxy_url)
proxy_host, _, proxy_port = http_proxy.netloc.partition(":")
if _httpx_version >= Version("0.28.0"):
_httpx_client_args = {
"mounts": {
"http://": httpx.AsyncHTTPTransport(proxy=http_proxy_url),
"https://": httpx.AsyncHTTPTransport(proxy=https_proxy_url),
}
}
else:
_httpx_client_args = {
"proxies": {
"http://": http_proxy_url,
"https://": https_proxy_url,
}
}
xmlrpc_transport_override = ProxiedTransport()
xmlrpc_transport_override.set_proxy(proxy_host, proxy_port)
async def _fetch_package_metadata(
client: httpx.AsyncClient,
name: str,
latest_version: str,
base_url: str,
) -> dict:
response = await client.get(
base_url + f"/{name}/{latest_version}/json",
headers={"Content-Type": "application/json"},
)
if response.status_code < 400: # noqa PLR2004
data = json.loads(response.text).get("info")
# Keep minimal information to limit cache size
return {
k: data.get(k)
for k in [
"author",
"bugtrack_url",
"docs_url",
"home_page",
"license",
"package_url",
"project_url",
"project_urls",
"summary",
]
}
else:
return {}
| ProxiedTransport |
python | django__django | tests/auth_tests/test_forms.py | {
"start": 64927,
"end": 66793
} | class ____(TestDataMixin, TestCase):
@sensitive_variables("data")
def test_passwords_marked_as_sensitive_in_admin_forms(self):
data = {
"password1": "passwordsensitive",
"password2": "sensitivepassword",
"usable_password": "true",
}
forms = [
AdminUserCreationForm({**data, "username": "newusername"}),
AdminPasswordChangeForm(self.u1, data),
]
password1_fragment = """
<td>password1</td>
<td class="code"><pre>'********************'</pre></td>
"""
password2_fragment = """
<td>password2</td>
<td class="code"><pre>'********************'</pre></td>
"""
error = ValueError("Forced error")
for form in forms:
with self.subTest(form=form):
with mock.patch.object(
SetPasswordMixin, "validate_passwords", side_effect=error
):
try:
form.is_valid()
except ValueError:
exc_info = sys.exc_info()
else:
self.fail("Form validation should have failed.")
response = technical_500_response(RequestFactory().get("/"), *exc_info)
self.assertNotContains(response, "sensitivepassword", status_code=500)
self.assertNotContains(response, "passwordsensitive", status_code=500)
self.assertContains(response, str(error), status_code=500)
self.assertContains(
response, password1_fragment, html=True, status_code=500
)
self.assertContains(
response, password2_fragment, html=True, status_code=500
)
| SensitiveVariablesTest |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8559
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.marker.colorbar"
_path_str = "scatter3d.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.mark
er.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | pypa__warehouse | warehouse/helpdesk/interfaces.py | {
"start": 114,
"end": 549
} | class ____(Interface):
def create_service(context, request) -> IHelpDeskService:
"""
Create a new instance of the service.
"""
def create_conversation(*, request_json: dict) -> str:
"""
Create a new conversation in the helpdesk service.
"""
def add_tag(*, conversation_url: str, tag: str) -> None:
"""
Add a tag to a conversation.
"""
| IHelpDeskService |
python | doocs__leetcode | solution/1500-1599/1570.Dot Product of Two Sparse Vectors/Solution.py | {
"start": 0,
"end": 512
} | class ____:
def __init__(self, nums: List[int]):
self.d = {i: v for i, v in enumerate(nums) if v}
# Return the dotProduct of two sparse vectors
def dotProduct(self, vec: "SparseVector") -> int:
a, b = self.d, vec.d
if len(b) < len(a):
a, b = b, a
return sum(v * b.get(i, 0) for i, v in a.items())
# Your SparseVector object will be instantiated and called as such:
# v1 = SparseVector(nums1)
# v2 = SparseVector(nums2)
# ans = v1.dotProduct(v2)
| SparseVector |
python | getsentry__sentry | src/sentry/api/endpoints/organization_trace_meta.py | {
"start": 1391,
"end": 2079
} | class ____(TypedDict, total=False):
logs: int
errors: int
performance_issues: int
span_count: int
transaction_child_count_map: SnubaData
span_count_map: dict[str, int]
uptime_checks: int # Only present when include_uptime is True
def extract_uptime_count(uptime_result: list[TraceItemTableResponse]) -> int:
"""Safely extract uptime count from query result."""
if not uptime_result:
return 0
first_result = uptime_result[0]
if not first_result.column_values:
return 0
first_column = first_result.column_values[0]
return len(first_column.results) if first_column.results else 0
@region_silo_endpoint
| SerializedResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol1.py | {
"start": 2619,
"end": 2657
} | class ____(Protocol[int]):
pass
| Proto6 |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/marker/_colorbar.py | {
"start": 233,
"end": 61680
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.marker"
_path_str = "scatter3d.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.scatter3d.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scatter3d.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
scatter3d.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.scatter3d.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.scatter3d.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter3d.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r3d.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter3d.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter3d.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter3d.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r3d.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter3d.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter3d.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | has2k1__plotnine | plotnine/geoms/geom_col.py | {
"start": 75,
"end": 1187
} | class ____(geom_bar):
"""
Bar plot with base on the x-axis
{usage}
This is an alternate version of [](`~plotnine.geoms.geom_bar`) that maps
the height of bars to an existing variable in your data. If
you want the height of the bar to represent a count of cases,
use [](`~plotnine.geoms.geom_bar`).
Parameters
----------
{common_parameters}
just : float, default=0.5
How to align the column with respect to the axis breaks. The default
`0.5` aligns the center of the column with the break. `0` aligns the
left of the of the column with the break and `1` aligns the right of
the column with the break.
width : float, default=None
Bar width. If `None`{.py}, the width is set to
`90%` of the resolution of the data.
See Also
--------
plotnine.geom_bar
"""
REQUIRED_AES = {"x", "y"}
NON_MISSING_AES = {"xmin", "xmax", "ymin", "ymax"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "stack",
"na_rm": False,
"just": 0.5,
"width": None,
}
| geom_col |
python | has2k1__plotnine | plotnine/themes/elements/element_base.py | {
"start": 201,
"end": 832
} | class ____:
"""
Base class for all theme elements
"""
properties: dict[str, Any] # dict of the properties
def __init__(self):
self.properties = {"visible": True}
def __repr__(self) -> str:
"""
Element representation
"""
return f"{self.__class__.__name__}({self})"
def __str__(self) -> str:
"""
Element as string
"""
d = self.properties.copy()
del d["visible"]
return f"{d}"
def setup(self, theme: theme, themeable_name: str):
"""
Setup the theme_element before drawing
"""
| element_base |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform3.py | {
"start": 2474,
"end": 2520
} | class ____:
not_a_field: str
| ModelBaseFrozen |
python | Pylons__pyramid | tests/test_util.py | {
"start": 28840,
"end": 29920
} | class ____(unittest.TestCase):
def _callFUT(self, *args, **kw):
from pyramid.util import make_contextmanager
return make_contextmanager(*args, **kw)
def test_with_None(self):
mgr = self._callFUT(None)
with mgr() as ctx:
self.assertIsNone(ctx)
def test_with_generator(self):
def mygen(ctx):
yield ctx
mgr = self._callFUT(mygen)
with mgr('a') as ctx:
self.assertEqual(ctx, 'a')
def test_with_multiple_yield_generator(self):
def mygen():
yield 'a'
yield 'b'
mgr = self._callFUT(mygen)
try:
with mgr() as ctx:
self.assertEqual(ctx, 'a')
except RuntimeError:
pass
else: # pragma: no cover
raise AssertionError('expected raise from multiple yields')
def test_with_regular_fn(self):
def mygen():
return 'a'
mgr = self._callFUT(mygen)
with mgr() as ctx:
self.assertEqual(ctx, 'a')
| Test_make_contextmanager |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 2789,
"end": 3753
} | class ____(ABC):
jobdir = None
@property
@abstractmethod
def priority_queue_cls(self) -> str:
raise NotImplementedError
def create_scheduler(self):
self.mock_crawler = MockCrawler(self.priority_queue_cls, self.jobdir)
self.scheduler = Scheduler.from_crawler(self.mock_crawler)
self.spider = Spider(name="spider")
self.scheduler.open(self.spider)
def close_scheduler(self):
self.scheduler.close("finished")
_schedule_coro(self.mock_crawler.stop_async())
self.mock_crawler.engine.downloader.close()
def setup_method(self):
self.create_scheduler()
def teardown_method(self):
self.close_scheduler()
_PRIORITIES = [
("http://foo.com/a", -2),
("http://foo.com/d", 1),
("http://foo.com/b", -1),
("http://foo.com/c", 0),
("http://foo.com/e", 2),
]
_URLS = {"http://foo.com/a", "http://foo.com/b", "http://foo.com/c"}
| SchedulerHandler |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 324814,
"end": 325945
} | class ____(Response):
"""
Response of tasks.make_public endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_public"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePublicResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePublicResponse |
python | realpython__materials | qt-designer-python/sample_editor/app.py | {
"start": 160,
"end": 913
} | class ____(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.connectSignalsSlots()
def connectSignalsSlots(self):
self.action_Exit.triggered.connect(self.close)
self.action_Find_Replace.triggered.connect(self.findAndReplace)
self.action_About.triggered.connect(self.about)
def findAndReplace(self):
dialog = FindReplaceDialog(self)
dialog.exec()
def about(self):
QMessageBox.about(
self,
"About Sample Editor",
"<p>A sample text editor app built with:</p>"
"<p>- PyQt</p>"
"<p>- Qt Designer</p>"
"<p>- Python</p>",
)
| Window |
python | PyCQA__bandit | tests/unit/cli/test_baseline.py | {
"start": 438,
"end": 10428
} | class ____(testtools.TestCase):
@classmethod
def setUpClass(cls):
# Set up prior to running test class
# read in content used for temporary file contents
with open("examples/mktemp.py") as fd:
cls.temp_file_contents = fd.read()
def setUp(self):
# Set up prior to run each test case
super().setUp()
self.current_directory = os.getcwd()
def tearDown(self):
# Tear down after running each test case
super().tearDown()
os.chdir(self.current_directory)
def test_bandit_baseline(self):
# Tests running bandit via the CLI (baseline) with benign and malicious
# content
repo_directory = self.useFixture(fixtures.TempDir()).path
# get benign and findings examples
with open("examples/okay.py") as fd:
benign_contents = fd.read()
with open("examples/os_system.py") as fd:
malicious_contents = fd.read()
contents = {
"benign_one.py": benign_contents,
"benign_two.py": benign_contents,
"malicious.py": malicious_contents,
}
# init git repo, change directory to it
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial commit")
os.chdir(repo_directory)
with open("bandit.yaml", "w") as fd:
fd.write(config)
# create three branches, first has only benign, second adds malicious,
# third adds benign
branches = [
{
"name": "benign1",
"files": ["benign_one.py"],
"expected_return": 0,
},
{
"name": "malicious",
"files": ["benign_one.py", "malicious.py"],
"expected_return": 1,
},
{
"name": "benign2",
"files": ["benign_one.py", "malicious.py", "benign_two.py"],
"expected_return": 0,
},
]
baseline_command = [
"bandit-baseline",
"-c",
"bandit.yaml",
"-r",
".",
"-p",
"test",
]
for branch in branches:
branch["branch"] = git_repo.create_head(branch["name"])
git_repo.head.reference = branch["branch"]
git_repo.head.reset(working_tree=True)
for f in branch["files"]:
with open(f, "w") as fd:
fd.write(contents[f])
git_repo.index.add(branch["files"])
git_repo.index.commit(branch["name"])
self.assertEqual(
branch["expected_return"], subprocess.call(baseline_command)
)
def test_main_non_repo(self):
# Test that bandit gracefully exits when there is no git repository
# when calling main
repo_dir = self.useFixture(fixtures.TempDir()).path
os.chdir(repo_dir)
# assert the system exits with code 2
self.assertRaisesRegex(SystemExit, "2", baseline.main)
def test_main_git_command_failure(self):
# Test that bandit does not run when the Git command fails
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
additional_content = "additional_file.py"
with open(additional_content, "w") as fd:
fd.write(self.temp_file_contents)
git_repo.index.add([additional_content])
git_repo.index.commit("Additional Content")
with mock.patch("git.Repo.commit") as mock_git_repo_commit:
mock_git_repo_commit.side_effect = git.exc.GitCommandError(
"commit", ""
)
# assert the system exits with code 2
self.assertRaisesRegex(SystemExit, "2", baseline.main)
def test_main_no_parent_commit(self):
# Test that bandit exits when there is no parent commit detected when
# calling main
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
# assert the system exits with code 2
self.assertRaisesRegex(SystemExit, "2", baseline.main)
def test_main_subprocess_error(self):
# Test that bandit handles a CalledProcessError when attempting to run
# bandit baseline via a subprocess
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
additional_content = "additional_file.py"
with open(additional_content, "w") as fd:
fd.write(self.temp_file_contents)
git_repo.index.add([additional_content])
git_repo.index.commit("Additional Content")
with mock.patch("subprocess.check_output") as mock_check_output:
mock_bandit_cmd = "bandit_mock -b temp_file.txt"
mock_check_output.side_effect = subprocess.CalledProcessError(
"3", mock_bandit_cmd
)
# assert the system exits with code 3 (returned from
# CalledProcessError)
self.assertRaisesRegex(SystemExit, "3", baseline.main)
def test_init_logger(self):
# Test whether the logger was initialized when calling init_logger
baseline.init_logger()
logger = baseline.LOG
# verify that logger was initialized
self.assertIsNotNone(logger)
def test_initialize_no_repo(self):
# Test that bandit does not run when there is no current git
# repository when calling initialize
repo_directory = self.useFixture(fixtures.TempDir()).path
os.chdir(repo_directory)
return_value = baseline.initialize()
# assert bandit did not run due to no git repo
self.assertEqual((None, None, None), return_value)
def test_initialize_git_command_failure(self):
# Test that bandit does not run when the Git command fails
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
additional_content = "additional_file.py"
with open(additional_content, "w") as fd:
fd.write(self.temp_file_contents)
git_repo.index.add([additional_content])
git_repo.index.commit("Additional Content")
with mock.patch("git.Repo") as mock_git_repo:
mock_git_repo.side_effect = git.exc.GitCommandNotFound("clone", "")
return_value = baseline.initialize()
# assert bandit did not run due to git command failure
self.assertEqual((None, None, None), return_value)
def test_initialize_dirty_repo(self):
# Test that bandit does not run when the current git repository is
# 'dirty' when calling the initialize method
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
# make the git repo 'dirty'
with open("dirty_file.py", "w") as fd:
fd.write(self.temp_file_contents)
git_repo.index.add(["dirty_file.py"])
return_value = baseline.initialize()
# assert bandit did not run due to dirty repo
self.assertEqual((None, None, None), return_value)
@mock.patch("sys.argv", ["bandit", "-f", "txt", "test"])
def test_initialize_existing_report_file(self):
# Test that bandit does not run when the output file exists (and the
# provided output format does not match the default format) when
# calling the initialize method
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
# create an existing version of output report file
existing_report = f"{baseline.report_basename}.txt"
with open(existing_report, "w") as fd:
fd.write(self.temp_file_contents)
return_value = baseline.initialize()
# assert bandit did not run due to existing report file
self.assertEqual((None, None, None), return_value)
@mock.patch(
"bandit.cli.baseline.bandit_args", ["-o", "bandit_baseline_result"]
)
def test_initialize_with_output_argument(self):
# Test that bandit does not run when the '-o' (output) argument is
# specified
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
return_value = baseline.initialize()
# assert bandit did not run due to provided -o (--ouput) argument
self.assertEqual((None, None, None), return_value)
def test_initialize_existing_temp_file(self):
# Test that bandit does not run when the temporary output file exists
# when calling the initialize method
repo_directory = self.useFixture(fixtures.TempDir()).path
git_repo = git.Repo.init(repo_directory)
git_repo.index.commit("Initial Commit")
os.chdir(repo_directory)
# create an existing version of temporary output file
existing_temp_file = baseline.baseline_tmp_file
with open(existing_temp_file, "w") as fd:
fd.write(self.temp_file_contents)
return_value = baseline.initialize()
# assert bandit did not run due to existing temporary report file
self.assertEqual((None, None, None), return_value)
| BanditBaselineToolTests |
python | apache__airflow | airflow-core/src/airflow/models/trigger.py | {
"start": 2398,
"end": 19274
} | class ____(Base):
"""
Base Trigger class.
Triggers are a workload that run in an asynchronous event loop shared with
other Triggers, and fire off events that will unpause deferred Tasks,
start linked DAGs, etc.
They are persisted into the database and then re-hydrated into a
"triggerer" process, where many are run at once. We model it so that
there is a many-to-one relationship between Task and Trigger, for future
deduplication logic to use.
Rows will be evicted from the database when the triggerer detects no
active Tasks/DAGs using them. Events are not stored in the database;
when an Event is fired, the triggerer will directly push its data to the
appropriate Task/DAG.
"""
__tablename__ = "trigger"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
classpath: Mapped[str] = mapped_column(String(1000), nullable=False)
encrypted_kwargs: Mapped[str] = mapped_column("kwargs", Text, nullable=False)
created_date: Mapped[datetime.datetime] = mapped_column(UtcDateTime, nullable=False)
triggerer_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
triggerer_job = relationship(
"Job",
primaryjoin="Job.id == Trigger.triggerer_id",
foreign_keys=triggerer_id,
uselist=False,
)
task_instance = relationship("TaskInstance", back_populates="trigger", lazy="selectin", uselist=False)
asset_watchers = relationship("AssetWatcherModel", back_populates="trigger")
assets = association_proxy("asset_watchers", "asset")
callback = relationship("Callback", back_populates="trigger", uselist=False)
def __init__(
self,
classpath: str,
kwargs: dict[str, Any],
created_date: datetime.datetime | None = None,
) -> None:
super().__init__()
self.classpath = classpath
self.encrypted_kwargs = self.encrypt_kwargs(kwargs)
self.created_date = created_date or timezone.utcnow()
@property
def kwargs(self) -> dict[str, Any]:
"""Return the decrypted kwargs of the trigger."""
return self._decrypt_kwargs(self.encrypted_kwargs)
@kwargs.setter
def kwargs(self, kwargs: dict[str, Any]) -> None:
"""Set the encrypted kwargs of the trigger."""
self.encrypted_kwargs = self.encrypt_kwargs(kwargs)
@staticmethod
def encrypt_kwargs(kwargs: dict[str, Any]) -> str:
"""Encrypt the kwargs of the trigger."""
import json
from airflow.models.crypto import get_fernet
from airflow.serialization.serialized_objects import BaseSerialization
serialized_kwargs = BaseSerialization.serialize(kwargs)
return get_fernet().encrypt(json.dumps(serialized_kwargs).encode("utf-8")).decode("utf-8")
@staticmethod
def _decrypt_kwargs(encrypted_kwargs: str) -> dict[str, Any]:
"""Decrypt the kwargs of the trigger."""
import json
from airflow.models.crypto import get_fernet
from airflow.serialization.serialized_objects import BaseSerialization
# We weren't able to encrypt the kwargs in all migration paths,
# so we need to handle the case where they are not encrypted.
# Triggers aren't long lasting, so we can skip encrypting them now.
if encrypted_kwargs.startswith("{"):
decrypted_kwargs = json.loads(encrypted_kwargs)
else:
decrypted_kwargs = json.loads(
get_fernet().decrypt(encrypted_kwargs.encode("utf-8")).decode("utf-8")
)
return BaseSerialization.deserialize(decrypted_kwargs)
def rotate_fernet_key(self):
"""Encrypts data with a new key. See: :ref:`security/fernet`."""
from airflow.models.crypto import get_fernet
self.encrypted_kwargs = get_fernet().rotate(self.encrypted_kwargs.encode("utf-8")).decode("utf-8")
@classmethod
def from_object(cls, trigger: BaseTrigger) -> Trigger:
"""Alternative constructor that creates a trigger row based directly off of a Trigger object."""
classpath, kwargs = trigger.serialize()
return cls(classpath=classpath, kwargs=kwargs)
@classmethod
@provide_session
def bulk_fetch(cls, ids: Iterable[int], session: Session = NEW_SESSION) -> dict[int, Trigger]:
"""Fetch all the Triggers by ID and return a dict mapping ID -> Trigger instance."""
stmt = (
select(cls)
.where(cls.id.in_(ids))
.options(
selectinload(cls.task_instance)
.joinedload(TaskInstance.trigger)
.joinedload(Trigger.triggerer_job)
)
)
return {obj.id: obj for obj in session.scalars(stmt)}
@classmethod
@provide_session
def fetch_trigger_ids_with_non_task_associations(cls, session: Session = NEW_SESSION) -> set[str]:
"""Fetch all trigger IDs actively associated with non-task entities like assets and callbacks."""
query = select(AssetWatcherModel.trigger_id).union_all(
select(Callback.trigger_id).where(Callback.trigger_id.is_not(None))
)
return set(session.scalars(query))
@classmethod
@provide_session
def clean_unused(cls, session: Session = NEW_SESSION) -> None:
"""
Delete all triggers that have no tasks dependent on them and are not associated to an asset.
Triggers have a one-to-many relationship to task instances, so we need to clean those up first.
Afterward we can drop the triggers not referenced by anyone.
"""
# Update all task instances with trigger IDs that are not DEFERRED to remove them
for attempt in run_with_db_retries():
with attempt:
session.execute(
update(TaskInstance)
.where(
TaskInstance.state != TaskInstanceState.DEFERRED, TaskInstance.trigger_id.is_not(None)
)
.values(trigger_id=None)
)
# Get all triggers that have no task instances, assets, or callbacks depending on them and delete them
ids = (
select(cls.id)
.where(~cls.assets.any(), ~cls.callback.has())
.join(TaskInstance, cls.id == TaskInstance.trigger_id, isouter=True)
.group_by(cls.id)
.having(func.count(TaskInstance.trigger_id) == 0)
)
if get_dialect_name(session) == "mysql":
# MySQL doesn't support DELETE with JOIN, so we need to do it in two steps
ids_list = list(session.scalars(ids).all())
session.execute(
delete(Trigger).where(Trigger.id.in_(ids_list)).execution_options(synchronize_session=False)
)
else:
session.execute(
delete(Trigger).where(Trigger.id.in_(ids)).execution_options(synchronize_session=False)
)
@classmethod
@provide_session
def submit_event(cls, trigger_id, event: TriggerEvent, session: Session = NEW_SESSION) -> None:
"""
Fire an event.
Resume all tasks that were in deferred state.
Send an event to all assets associated to the trigger.
"""
# Resume deferred tasks
for task_instance in session.scalars(
select(TaskInstance).where(
TaskInstance.trigger_id == trigger_id, TaskInstance.state == TaskInstanceState.DEFERRED
)
):
handle_event_submit(event, task_instance=task_instance, session=session)
# Send an event to assets
trigger = session.scalars(select(cls).where(cls.id == trigger_id)).one_or_none()
if trigger is None:
# Already deleted for some reason
return
for asset in trigger.assets:
AssetManager.register_asset_change(
asset=asset.to_public(),
extra={"from_trigger": True, "payload": event.payload},
session=session,
)
if trigger.callback:
trigger.callback.handle_event(event, session)
@classmethod
@provide_session
def submit_failure(cls, trigger_id, exc=None, session: Session = NEW_SESSION) -> None:
"""
When a trigger has failed unexpectedly, mark everything that depended on it as failed.
Notably, we have to actually run the failure code from a worker as it may
have linked callbacks, so hilariously we have to re-schedule the task
instances to a worker just so they can then fail.
We use a special __fail__ value for next_method to achieve this that
the runtime code understands as immediate-fail, and pack the error into
next_kwargs.
"""
for task_instance in session.scalars(
select(TaskInstance).where(
TaskInstance.trigger_id == trigger_id, TaskInstance.state == TaskInstanceState.DEFERRED
)
):
# Add the error and set the next_method to the fail state
if isinstance(exc, BaseException):
traceback = format_exception(type(exc), exc, exc.__traceback__)
else:
traceback = exc
task_instance.next_method = TRIGGER_FAIL_REPR
task_instance.next_kwargs = {
"error": TriggerFailureReason.TRIGGER_FAILURE,
"traceback": traceback,
}
# Remove ourselves as its trigger
task_instance.trigger_id = None
# Finally, mark it as scheduled so it gets re-queued
task_instance.state = TaskInstanceState.SCHEDULED
task_instance.scheduled_dttm = timezone.utcnow()
@classmethod
@provide_session
def ids_for_triggerer(cls, triggerer_id, session: Session = NEW_SESSION) -> list[int]:
"""Retrieve a list of trigger ids."""
return list(session.scalars(select(cls.id).where(cls.triggerer_id == triggerer_id)).all())
@classmethod
@provide_session
def assign_unassigned(
cls, triggerer_id, capacity, health_check_threshold, session: Session = NEW_SESSION
) -> None:
"""
Assign unassigned triggers based on a number of conditions.
Takes a triggerer_id, the capacity for that triggerer and the Triggerer job heartrate
health check threshold, and assigns unassigned triggers until that capacity is reached,
or there are no more unassigned triggers.
"""
from airflow.jobs.job import Job # To avoid circular import
count = session.scalar(select(func.count(cls.id)).filter(cls.triggerer_id == triggerer_id))
capacity -= count
if capacity <= 0:
log.info(
"Triggerer %s has reached the maximum capacity triggers assigned (%d). Not assigning any more triggers",
triggerer_id,
count,
)
return
alive_triggerer_ids = select(Job.id).where(
Job.end_date.is_(None),
Job.latest_heartbeat > timezone.utcnow() - datetime.timedelta(seconds=health_check_threshold),
Job.job_type == "TriggererJob",
)
# Find triggers who do NOT have an alive triggerer_id, and then assign
# up to `capacity` of those to us.
trigger_ids_query = cls.get_sorted_triggers(
capacity=capacity, alive_triggerer_ids=alive_triggerer_ids, session=session
)
if trigger_ids_query:
session.execute(
update(cls)
.where(cls.id.in_([i[0] for i in trigger_ids_query]))
.values(triggerer_id=triggerer_id)
.execution_options(synchronize_session=False)
)
session.commit()
@classmethod
def get_sorted_triggers(cls, capacity: int, alive_triggerer_ids: list[int] | Select, session: Session):
"""
Get sorted triggers based on capacity and alive triggerer ids.
:param capacity: The capacity of the triggerer.
:param alive_triggerer_ids: The alive triggerer ids as a list or a select query.
:param session: The database session.
"""
result: list[Row[Any]] = []
# Add triggers associated to callbacks first, then tasks, then assets
# It prioritizes callbacks, then DAGs over event driven scheduling which is fair
queries = [
# Callback triggers
select(cls.id)
.join(Callback, isouter=False)
.order_by(Callback.priority_weight.desc(), cls.created_date),
# Task Instance triggers
select(cls.id)
.prefix_with("STRAIGHT_JOIN", dialect="mysql")
.join(TaskInstance, cls.id == TaskInstance.trigger_id, isouter=False)
.where(or_(cls.triggerer_id.is_(None), cls.triggerer_id.not_in(alive_triggerer_ids)))
.order_by(coalesce(TaskInstance.priority_weight, 0).desc(), cls.created_date),
# Asset triggers
select(cls.id).where(cls.assets.any()).order_by(cls.created_date),
]
# Process each query while avoiding unnecessary queries when capacity is reached
for query in queries:
remaining_capacity = capacity - len(result)
if remaining_capacity <= 0:
break
locked_query = with_row_locks(query.limit(remaining_capacity), session, skip_locked=True)
result.extend(session.execute(locked_query).all())
return result
@singledispatch
def handle_event_submit(event: TriggerEvent, *, task_instance: TaskInstance, session: Session) -> None:
"""
Handle the submit event for a given task instance.
This function sets the next method and next kwargs of the task instance,
as well as its state to scheduled. It also adds the event's payload
into the kwargs for the task.
:param task_instance: The task instance to handle the submit event for.
:param session: The session to be used for the database callback sink.
"""
from airflow.utils.state import TaskInstanceState
# Get the next kwargs of the task instance, or an empty dictionary if it doesn't exist
next_kwargs = task_instance.next_kwargs or {}
# Add the event's payload into the kwargs for the task
next_kwargs["event"] = event.payload
# Update the next kwargs of the task instance
task_instance.next_kwargs = next_kwargs
# Remove ourselves as its trigger
task_instance.trigger_id = None
# Set the state of the task instance to scheduled
task_instance.state = TaskInstanceState.SCHEDULED
task_instance.scheduled_dttm = timezone.utcnow()
session.flush()
@handle_event_submit.register
def _(event: BaseTaskEndEvent, *, task_instance: TaskInstance, session: Session) -> None:
"""
Submit event for the given task instance.
Marks the task with the state `task_instance_state` and optionally pushes xcom if applicable.
:param task_instance: The task instance to be submitted.
:param session: The session to be used for the database callback sink.
"""
from airflow.callbacks.callback_requests import TaskCallbackRequest
from airflow.callbacks.database_callback_sink import DatabaseCallbackSink
from airflow.utils.state import TaskInstanceState
# Mark the task with terminal state and prevent it from resuming on worker
task_instance.trigger_id = None
task_instance.set_state(event.task_instance_state, session=session)
def _submit_callback_if_necessary() -> None:
"""Submit a callback request if the task state is SUCCESS or FAILED."""
if event.task_instance_state in (TaskInstanceState.SUCCESS, TaskInstanceState.FAILED):
if task_instance.dag_model.relative_fileloc is None:
raise RuntimeError("relative_fileloc should not be None for a finished task")
request = TaskCallbackRequest(
filepath=task_instance.dag_model.relative_fileloc,
ti=task_instance,
task_callback_type=event.task_instance_state,
bundle_name=task_instance.dag_model.bundle_name,
bundle_version=task_instance.dag_run.bundle_version,
)
log.info("Sending callback: %s", request)
try:
DatabaseCallbackSink().send(callback=request, session=session)
except Exception:
log.exception("Failed to send callback.")
def _push_xcoms_if_necessary() -> None:
"""Pushes XComs to the database if they are provided."""
if event.xcoms:
for key, value in event.xcoms.items():
task_instance.xcom_push(key=key, value=value)
_submit_callback_if_necessary()
_push_xcoms_if_necessary()
session.flush()
| Trigger |
python | eventlet__eventlet | eventlet/convenience.py | {
"start": 823,
"end": 3117
} | class ____(Warning):
pass
def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port=None):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog:
The maximum number of queued connections. Should be at least 1; the maximum
value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if reuse_addr and sys.platform[:3] != 'win':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0:
if reuse_port:
warnings.warn(
'''listen on random port (0) with SO_REUSEPORT is dangerous.
Double check your intent.
Example problem: https://github.com/eventlet/eventlet/issues/411''',
ReuseRandomPortWarning, stacklevel=3)
elif reuse_port is None:
reuse_port = True
if reuse_port and hasattr(socket, 'SO_REUSEPORT'):
# NOTE(zhengwei): linux kernel >= 3.9
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# OSError is enough on Python 3+
except OSError as ex:
if support.get_errno(ex) in (22, 92):
# A famous platform defines unsupported socket option.
# https://github.com/eventlet/eventlet/issues/380
# https://github.com/eventlet/eventlet/issues/418
warnings.warn(
'''socket.SO_REUSEPORT is defined but not supported.
On Windows: known bug, wontfix.
On other systems: please comment in the issue linked below.
More information: https://github.com/eventlet/eventlet/issues/380''',
ReusePortUnavailableWarning, stacklevel=3)
sock.bind(addr)
sock.listen(backlog)
return sock
| ReusePortUnavailableWarning |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 49196,
"end": 50055
} | class ____(PrefectFilterBaseModel):
"""Filter by `Log.task_run_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of task run IDs to include"
)
is_null_: Optional[bool] = Field(
default=None,
description="If true, only include logs without a task run id",
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Log.task_run_id.in_(self.any_))
if self.is_null_ is not None:
filters.append(
db.Log.task_run_id.is_(None)
if self.is_null_
else db.Log.task_run_id.is_not(None)
)
return filters
| LogFilterTaskRunId |
python | rushter__MLAlgorithms | mla/neuralnet/optimizers.py | {
"start": 3163,
"end": 3934
} | class ____(Optimizer):
def __init__(self, learning_rate=0.01, epsilon=1e-8):
self.eps = epsilon
self.lr = learning_rate
def update(self, network):
for i, layer in enumerate(network.parametric_layers):
for n in layer.parameters.keys():
grad = layer.parameters.grad[n]
self.accu[i][n] += grad**2
step = self.lr * grad / (np.sqrt(self.accu[i][n]) + self.eps)
layer.parameters.step(n, -step)
def setup(self, network):
# Accumulators
self.accu = defaultdict(dict)
for i, layer in enumerate(network.parametric_layers):
for n in layer.parameters.keys():
self.accu[i][n] = np.zeros_like(layer.parameters[n])
| Adagrad |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/compile_utils.py | {
"start": 3546,
"end": 10301
} | class ____(Container):
"""A container class for losses passed to `Model.compile`."""
def __init__(self, losses, loss_weights=None, output_names=None):
super(LossesContainer, self).__init__(output_names=output_names)
# Keep user-supplied values untouched for recompiling and serialization.
self._user_losses = losses
self._user_loss_weights = loss_weights
self._losses = losses
self._loss_weights = loss_weights
self._per_output_metrics = None # Per-output losses become metrics.
self._loss_metric = metrics_mod.Mean(name='loss') # Total loss.
self._built = False
@property
def metrics(self):
"""Per-output loss metrics."""
if not self._built:
return []
per_output_metrics = [
metric_obj for metric_obj in nest.flatten(self._per_output_metrics)
if metric_obj is not None
]
return [self._loss_metric] + per_output_metrics
def build(self, y_pred):
"""One-time setup of loss objects."""
super(LossesContainer, self).build(y_pred)
self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses)
self._losses = self._conform_to_outputs(y_pred, self._losses)
self._losses = nest.map_structure(self._get_loss_object, self._losses)
self._losses = nest.flatten(self._losses)
self._loss_weights = self._maybe_broadcast_to_outputs(
y_pred, self._loss_weights)
self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights)
self._loss_weights = nest.flatten(self._loss_weights)
self._create_metrics()
self._built = True
@property
def built(self):
return self._built
def _create_metrics(self):
"""Creates per-output loss metrics, but only for multi-output Models."""
if len(self._output_names) == 1:
self._per_output_metrics = [None]
else:
self._per_output_metrics = []
for loss_obj, output_name in zip(self._losses, self._output_names):
if loss_obj is None:
self._per_output_metrics.append(None)
else:
self._per_output_metrics.append(
metrics_mod.Mean(output_name + '_loss'))
def __call__(self,
y_true,
y_pred,
sample_weight=None,
regularization_losses=None):
"""Computes the overall loss.
Args:
y_true: An arbitrary structure of Tensors representing the ground truth.
y_pred: An arbitrary structure of Tensors representing a Model's outputs.
sample_weight: An arbitrary structure of Tensors representing the
per-sample loss weights. If one Tensor is passed, it is used for all
losses. If multiple Tensors are passed, the structure should match
`y_pred`.
regularization_losses: Additional losses to be added to the total loss.
Returns:
Tuple of `(total_loss, per_output_loss_list)`
"""
y_true = self._conform_to_outputs(y_pred, y_true)
sample_weight = self._conform_to_outputs(y_pred, sample_weight)
if not self._built:
self.build(y_pred)
y_pred = nest.flatten(y_pred)
y_true = nest.flatten(y_true)
sample_weight = nest.flatten(sample_weight)
loss_values = [] # Used for gradient calculation.
loss_metric_values = [] # Used for loss metric calculation.
batch_dim = None
zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights,
self._per_output_metrics)
for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args):
if y_t is None or loss_obj is None: # Ok to have no loss for an output.
continue
y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)
sw = apply_mask(y_p, sw, get_mask(y_p))
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
loss_metric_value = loss_value
# Correct for the `Mean` loss metrics counting each replica as a batch.
if loss_obj.reduction == losses_utils.ReductionV2.SUM:
loss_metric_value *= distribute_lib.get_strategy().num_replicas_in_sync
if batch_dim is None:
if tf_utils.is_ragged(y_t):
batch_dim = y_t.nrows()
else:
batch_dim = array_ops.shape(y_t)[0]
if metric_obj is not None:
metric_obj.update_state(loss_metric_value, sample_weight=batch_dim)
if loss_weight is not None:
loss_value *= loss_weight
loss_metric_value *= loss_weight
if (loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or
loss_obj.reduction == losses_utils.ReductionV2.AUTO):
loss_value = losses_utils.scale_loss_for_distribution(loss_value)
loss_values.append(loss_value)
loss_metric_values.append(loss_metric_value)
if regularization_losses:
regularization_losses = losses_utils.cast_losses_to_common_dtype(
regularization_losses)
reg_loss = math_ops.add_n(regularization_losses)
loss_metric_values.append(reg_loss)
loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss))
if loss_values:
loss_metric_values = losses_utils.cast_losses_to_common_dtype(
loss_metric_values)
total_loss_metric_value = math_ops.add_n(loss_metric_values)
self._loss_metric.update_state(
total_loss_metric_value, sample_weight=batch_dim)
loss_values = losses_utils.cast_losses_to_common_dtype(loss_values)
total_loss = math_ops.add_n(loss_values)
return total_loss
else:
# Ok for a model to have no compiled loss.
return array_ops.zeros(shape=())
def reset_state(self):
"""Resets the state of loss metrics."""
if not self._built:
return
metrics = [self._loss_metric] + nest.flatten(self._per_output_metrics)
for metric_obj in metrics:
if metric_obj is not None:
metric_obj.reset_state()
def _get_loss_object(self, loss):
"""Returns a `Loss` object.
Converts the user-supplied loss to a `Loss` object. Also allows
`SUM_OVER_BATCH_SIZE` reduction to be used for this loss.
Args:
loss: A string, function, or `Loss` object.
Returns:
A `Loss` object.
"""
if loss is None:
return None # Ok to have no loss for an output.
loss = losses_mod.get(loss)
if not isinstance(loss, losses_mod.Loss):
loss_name = get_custom_object_name(loss)
if loss_name is None:
raise ValueError('Loss should be a callable, found: {}'.format(loss))
loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)
loss._allow_sum_over_batch_size = True # pylint: disable=protected-access
return loss
def _should_broadcast(self, obj):
return not nest.is_nested(obj)
def _copy_object(self, obj):
return obj # Losses don't need to be copied.
| LossesContainer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 11886,
"end": 12534
} | class ____(ReflectedConstraint):
"""Dictionary representing the reflected elements corresponding to
:class:`.UniqueConstraint`.
The :class:`.ReflectedUniqueConstraint` structure is returned by the
:meth:`.Inspector.get_unique_constraints` method.
"""
column_names: List[str]
"""column names which comprise the unique constraint"""
duplicates_index: NotRequired[Optional[str]]
"Indicates if this unique constraint duplicates an index with this name"
dialect_options: NotRequired[Dict[str, Any]]
"""Additional dialect-specific options detected for this unique
constraint"""
| ReflectedUniqueConstraint |
python | hynek__structlog | src/structlog/tracebacks.py | {
"start": 1197,
"end": 1366
} | class ____:
"""
Represents a single stack frame.
"""
filename: str
lineno: int
name: str
locals: dict[str, str] | None = None
@dataclass
| Frame |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/event_frequency_query_handlers.py | {
"start": 1840,
"end": 1960
} | class ____(Exception):
"""
Invalid filter snuba query condition for the issue type
"""
pass
| InvalidFilter |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 24096,
"end": 25579
} | class ____(Builder):
"""Base class for all specific builders with common callbacks registered."""
# Check that self.prefix is there after installation
spack.phase_callbacks.run_after("install")(sanity_check_prefix)
def apply_macos_rpath_fixups(builder: Builder):
"""On Darwin, make installed libraries more easily relocatable.
Some build systems (handrolled, autotools, makefiles) can set their own rpaths that are
duplicated by spack's compiler wrapper. This fixup interrogates, and postprocesses if
necessary, all libraries installed by the code.
It should be added as a :func:`~spack.phase_callbacks.run_after` to packaging systems (or
individual packages) that do not install relocatable libraries by default.
Example::
run_after("install", when="platform=darwin")(apply_macos_rpath_fixups)
Args:
builder: builder that installed the package
"""
spack.relocate.fixup_macos_rpaths(builder.spec)
def execute_install_time_tests(builder: Builder):
"""Execute the install-time tests prescribed by builder.
Args:
builder: builder prescribing the test callbacks. The name of the callbacks is
stored as a list of strings in the ``install_time_test_callbacks`` attribute.
"""
if not builder.pkg.run_tests or not builder.install_time_test_callbacks:
return
builder.pkg.tester.phase_tests(builder, "install", builder.install_time_test_callbacks)
| BuilderWithDefaults |
python | ansible__ansible | test/lib/ansible_test/_util/target/setup/requirements.py | {
"start": 9600,
"end": 13735
} | class ____(ApplicationError):
"""A command returned a non-zero status."""
def __init__(self, cmd, status, stdout, stderr): # type: (t.List[str], int, str, str) -> None
message = 'A command failed with status %d: %s' % (status, shlex.join(cmd))
if stderr:
message += '\n>>> Standard Error\n%s' % stderr.strip()
if stdout:
message += '\n>>> Standard Output\n%s' % stdout.strip()
super(SubprocessError, self).__init__(message)
def log(message, verbosity=0): # type: (str, int) -> None
"""Log a message to the console if the verbosity is high enough."""
if verbosity > VERBOSITY:
return
print(message, file=CONSOLE)
CONSOLE.flush()
def execute_command(cmd, cwd=None, capture=False, env=None): # type: (t.List[str], t.Optional[str], bool, t.Optional[t.Dict[str, str]]) -> None
"""Execute the specified command."""
log('Execute command: %s' % shlex.join(cmd), verbosity=1)
cmd_bytes = [to_bytes(c) for c in cmd]
if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdout = None
stderr = None
cwd_bytes = to_optional_bytes(cwd)
process = subprocess.Popen(cmd_bytes, cwd=cwd_bytes, stdin=devnull(), stdout=stdout, stderr=stderr, env=env) # pylint: disable=consider-using-with
stdout_bytes, stderr_bytes = process.communicate()
stdout_text = to_optional_text(stdout_bytes) or u''
stderr_text = to_optional_text(stderr_bytes) or u''
if process.returncode != 0:
raise SubprocessError(cmd, process.returncode, stdout_text, stderr_text)
def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
"""Write the given text content to the specified path, optionally creating missing directories."""
if create_directories:
make_dirs(os.path.dirname(path))
with open_binary_file(path, 'wb') as file_obj:
file_obj.write(to_bytes(content))
def remove_tree(path): # type: (str) -> None
"""Remove the specified directory tree."""
try:
shutil.rmtree(to_bytes(path))
except FileNotFoundError:
pass
def make_dirs(path): # type: (str) -> None
"""Create a directory at path, including any necessary parent directories."""
os.makedirs(to_bytes(path), exist_ok=True)
def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes]
"""Open the given path for binary access."""
if 'b' not in mode:
raise Exception('mode must include "b" for binary files: %s' % mode)
return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with,unspecified-encoding
def to_optional_bytes(value, errors='strict'): # type: (t.Optional[str | bytes], str) -> t.Optional[bytes]
"""Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
return None if value is None else to_bytes(value, errors)
def to_optional_text(value, errors='strict'): # type: (t.Optional[str | bytes], str) -> t.Optional[t.Text]
"""Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
return None if value is None else to_text(value, errors)
def to_bytes(value, errors='strict'): # type: (str | bytes, str) -> bytes
"""Return the given value as bytes encoded using UTF-8 if not already bytes."""
if isinstance(value, bytes):
return value
if isinstance(value, Text):
return value.encode(ENCODING, errors)
raise Exception('value is not bytes or text: %s' % type(value))
def to_text(value, errors='strict'): # type: (str | bytes, str) -> t.Text
"""Return the given value as text decoded using UTF-8 if not already text."""
if isinstance(value, bytes):
return value.decode(ENCODING, errors)
if isinstance(value, Text):
return value
raise Exception('value is not bytes or text: %s' % type(value))
PAYLOAD = b'{payload}' # base-64 encoded JSON payload which will be populated before this script is executed
if __name__ == '__main__':
main()
| SubprocessError |
python | openai__openai-python | src/openai/types/realtime/mcp_list_tools_completed.py | {
"start": 198,
"end": 473
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP list tools item."""
type: Literal["mcp_list_tools.completed"]
"""The event type, must be `mcp_list_tools.completed`."""
| McpListToolsCompleted |
python | google__jax | tests/shape_poly_test.py | {
"start": 165222,
"end": 169003
} | class ____(jtu.JaxTestCase):
"""This test runs for all _POLY_SHAPE_PRIMITIVE_HARNESSES."""
def setUp(self):
_start_profile(self)
super().setUp()
def tearDown(self):
super().tearDown()
_stop_profile(self)
# For each primitive "xxx" the test will be called "test_harness_xxx_...".
# If you want to run this test for only one harness that includes "foo"
# in the name (after test_harness), add parameter `one_containing="foo"`
# to parameterized below.
@test_harnesses.parameterized(
_flatten_harnesses(_POLY_SHAPE_TEST_HARNESSES),
#one_containing="",
)
def test_harness(self, harness: PolyHarness):
# We do not expect the associative scan error on TPUs
if harness.expect_error == expect_error_associative_scan and jtu.test_device_matches(["tpu"]):
harness.expect_error = None
if harness.group_name == "schur" and not jtu.test_device_matches(["cpu"]):
raise unittest.SkipTest("schur decomposition is only implemented on CPU.")
if "fft_fft_type" in harness.fullname:
if "nr_fft_lengths_2" in harness.fullname:
raise unittest.SkipTest("native serialization with shape polymorphism not implemented for fft with non-constant fft_lengths on GPU and TPU")
if harness.group_name == "vmap_eigh":
raise unittest.SkipTest(
"Should not compare eigendecompositions for equality directly"
"because eigenvalues are sorted.")
if harness.group_name == "vmap_tan":
# Tan (b/274462307) require support for custom call stablehlo.tan.
raise unittest.SkipTest(
"native lowering with shape polymorphism requires additional StableHLO feature support")
if (jtu.test_device_matches(["cpu", "gpu"]) and
harness.fullname in [
"cumsum_reduce_axis_poly", "cumprod_reduce_axis_poly",
"cummin_reduce_axis_poly", "cummax_reduce_axis_poly",
"cumlogsumexp_reduce_axis_poly",
"jnp_insert_insert_constant", "jnp_insert_insert_poly",
"jnp_nonzero_size_constant", "jnp_nonzero_size_poly"]):
# Need associative scan reductions on CPU and GPU. On TPU we use the
# reduce_window HLO, but on CPU and GPU (with axis size >= 32) we use
# a recursive associative scan that we cannot express with shape
# polymorphism.
raise unittest.SkipTest(
"native serialization with shape polymorphism not implemented for window_reductions on CPU and GPU")
if harness.group_name == "vmap_conv_general_dilated":
# https://github.com/openxla/stablehlo/issues/1268
raise unittest.SkipTest("Need more dynamism for DynamicConvOp")
if harness.group_name == "eig" and not jtu.test_device_matches(["cpu"]):
raise unittest.SkipTest("JAX implements eig only on CPU.")
if (harness.group_name in ("eigh", "svd") and
not harness.polymorphic_shapes[0].endswith("...") and
jtu.test_device_matches(["tpu"])):
raise unittest.SkipTest(
"Shape polymorphism for Eigh and Svd is only supported for batch dimensions on TPU.")
config_flags = harness.override_jax_config_flags
# Update this here rather than in harness object because vmap_random_gamma is derived
# from test_harnesses.all_harnesses, which strips override_jax_config_flags.
if "random_gamma" in harness.group_name:
config_flags = {**config_flags, "jax_debug_key_reuse": False}
# TPU precision is a little lower since we swap the order of matmul operands.
if "cholesky" in harness.group_name and jtu.test_device_matches(["tpu"]):
harness.tol = 5e-5
with jtu.thread_local_config_context(**config_flags):
harness.run_test(self)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| ShapePolyHarnessesTest |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_hint_returned.py | {
"start": 1196,
"end": 1304
} | class ____:
""" Uninferable return value """
__length_hint__ = lambda self: Missing
| AmbigousLengthHint |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/reporters.py | {
"start": 0,
"end": 1601
} | class ____(object):
"""Delegate class to provider progress reporting for the resolver."""
def starting(self):
"""Called before the resolution actually starts."""
def starting_round(self, index):
"""Called before each round of resolution starts.
The index is zero-based.
"""
def ending_round(self, index, state):
"""Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
"""
def ending(self, state):
"""Called before the resolution ends successfully."""
def adding_requirement(self, requirement, parent):
"""Called when adding a new requirement into the resolve criteria.
:param requirement: The additional requirement to be applied to filter
the available candidaites.
:param parent: The candidate that requires ``requirement`` as a
dependency, or None if ``requirement`` is one of the root
requirements passed in from ``Resolver.resolve()``.
"""
def resolving_conflicts(self, causes):
"""Called when starting to attempt requirement conflict resolution.
:param causes: The information on the collision that caused the backtracking.
"""
def rejecting_candidate(self, criterion, candidate):
"""Called when rejecting a candidate during backtracking."""
def pinning(self, candidate):
"""Called when adding a candidate to the potential solution."""
| BaseReporter |
python | cherrypy__cherrypy | cherrypy/test/test_tools.py | {
"start": 16785,
"end": 17218
} | class ____:
def test_priorities(self):
"""Hooks should sort by priority order."""
Hook = cherrypy._cprequest.Hook
hooks = [
Hook(None, priority=48),
Hook(None),
Hook(None, priority=49),
]
hooks.sort()
by_priority = operator.attrgetter('priority')
priorities = list(map(by_priority, hooks))
assert priorities == [48, 49, 50]
| TestHooks |
python | django__django | django/contrib/gis/gdal/envelope.py | {
"start": 969,
"end": 7311
} | class ____:
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure,
4-element tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException(
"Incorrect number of tuple elements (%d)." % len(args[0])
)
else:
self._from_sequence(args[0])
else:
raise TypeError("Incorrect type of argument: %s" % type(args[0]))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException("Incorrect number (%d) of arguments." % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException("Envelope minimum X > maximum X.")
if self.min_y > self.max_y:
raise GDALException("Envelope minimum Y > maximum Y.")
def __eq__(self, other):
"""
Return True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (
(self.min_x == other.min_x)
and (self.min_y == other.min_y)
and (self.max_x == other.max_x)
and (self.max_y == other.max_y)
)
elif isinstance(other, tuple) and len(other) == 4:
return (
(self.min_x == other[0])
and (self.min_y == other[1])
and (self.max_x == other[2])
and (self.max_y == other[3])
)
else:
raise GDALException("Equivalence testing only works with other Envelopes.")
def __str__(self):
"Return a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initialize the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modify the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], "x") and hasattr(args[0], "y"):
return self.expand_to_include(
args[0].x, args[0].y, args[0].x, args[0].y
)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include(
(args[0][0], args[0][1], args[0][0], args[0][1])
)
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException(
"Incorrect number of tuple elements (%d)." % len(args[0])
)
else:
raise TypeError("Incorrect type of argument: %s" % type(args[0]))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException("Incorrect number (%d) of arguments." % len(args[0]))
@property
def min_x(self):
"Return the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Return the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Return the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Return the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Return the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Return the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Return a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Return WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return "POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))" % (
self.min_x,
self.min_y,
self.min_x,
self.max_y,
self.max_x,
self.max_y,
self.max_x,
self.min_y,
self.min_x,
self.min_y,
)
| Envelope |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_inputs.py | {
"start": 77,
"end": 3281
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.query_obj = connections["whoosh"].get_query()
def test_raw_init(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {})
self.assertEqual(raw.post_process, False)
raw = inputs.Raw("hello OR there, :you", test="really")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {"test": "really"})
self.assertEqual(raw.post_process, False)
def test_raw_prepare(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.prepare(self.query_obj), "hello OR there, :you")
def test_clean_init(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.query_string, "hello OR there, :you")
self.assertEqual(clean.post_process, True)
def test_clean_prepare(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.prepare(self.query_obj), "hello or there, ':you'")
def test_exact_init(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.query_string, "hello OR there, :you")
self.assertEqual(exact.post_process, True)
def test_exact_prepare(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.prepare(self.query_obj), '"hello OR there, :you"')
def test_not_init(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.query_string, "hello OR there, :you")
self.assertEqual(not_it.post_process, True)
def test_not_prepare(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.prepare(self.query_obj), "NOT (hello or there, ':you')")
def test_autoquery_init(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.query_string, 'panic -don\'t "froody dude"')
self.assertEqual(autoquery.post_process, False)
def test_autoquery_prepare(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(
autoquery.prepare(self.query_obj), 'panic NOT don\'t "froody dude"'
)
def test_altparser_init(self):
altparser = inputs.AltParser("dismax")
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "")
self.assertEqual(altparser.kwargs, {})
self.assertEqual(altparser.post_process, False)
altparser = inputs.AltParser("dismax", "douglas adams", qf="author", mm=1)
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "douglas adams")
self.assertEqual(altparser.kwargs, {"mm": 1, "qf": "author"})
self.assertEqual(altparser.post_process, False)
def test_altparser_prepare(self):
altparser = inputs.AltParser("hello OR there, :you")
# Not supported on that backend.
self.assertEqual(altparser.prepare(self.query_obj), "")
| WhooshInputTestCase |
python | getsentry__sentry | src/sentry/integrations/models/doc_integration_avatar.py | {
"start": 242,
"end": 971
} | class ____(ControlAvatarBase):
"""
A DocIntegrationAvatar associates a DocIntegration with a logo photo File.
"""
AVATAR_TYPES = ((0, "upload"),)
FILE_TYPE = "avatar.file"
doc_integration = FlexibleForeignKey("sentry.DocIntegration", related_name="avatar")
avatar_type = models.PositiveSmallIntegerField(
default=0, db_default=0, choices=((0, "upload"),)
)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_docintegrationavatar"
url_path = "doc-integration-avatar"
def get_cache_key(self, size) -> str:
return f"doc_integration_avatar:{self.doc_integration_id}:{size}"
| DocIntegrationAvatar |
python | skorch-dev__skorch | skorch/tests/callbacks/test_training.py | {
"start": 18033,
"end": 26162
} | class ____:
@pytest.fixture
def early_stopping_cls(self):
from skorch.callbacks import EarlyStopping
return EarlyStopping
@pytest.fixture
def epoch_scoring_cls(self):
from skorch.callbacks import EpochScoring
return EpochScoring
@pytest.fixture
def net_clf_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture
def broken_classifier_module(self, classifier_module):
"""Return a classifier that does not improve over time."""
class BrokenClassifier(classifier_module.func):
def forward(self, x):
return super().forward(x) * 0 + 0.5
return BrokenClassifier
def test_typical_use_case_nonstop(
self, net_clf_cls, classifier_module, classifier_data,
early_stopping_cls):
patience = 5
max_epochs = 8
early_stopping_cb = early_stopping_cls(patience=patience)
net = net_clf_cls(
classifier_module,
callbacks=[
early_stopping_cb,
],
max_epochs=max_epochs,
)
net.fit(*classifier_data)
assert len(net.history) == max_epochs
def test_weights_restore(
self, net_clf_cls, classifier_module, classifier_data,
early_stopping_cls):
patience = 3
max_epochs = 20
seed = 1
side_effect = []
def sink(x):
side_effect.append(x)
early_stopping_cb = early_stopping_cls(
patience=patience,
sink=sink,
load_best=True,
monitor="valid_acc",
lower_is_better=False,
)
# Split dataset to have a fixed validation
X_tr, X_val, y_tr, y_val = train_test_split(
*classifier_data, random_state=seed)
tr_dataset = TensorDataset(
torch.as_tensor(X_tr).float(), torch.as_tensor(y_tr))
val_dataset = TensorDataset(
torch.as_tensor(X_val).float(), torch.as_tensor(y_val))
# Fix the network once with early stoppping and fixed seed
net1 = net_clf_cls(
classifier_module,
callbacks=[early_stopping_cb],
max_epochs=max_epochs,
train_split=predefined_split(val_dataset),
)
torch.manual_seed(seed)
net1.fit(tr_dataset, y=None)
# Check training was stopped before the end
assert len(net1.history) < max_epochs
# check correct output messages
assert len(side_effect) == 2
msg = side_effect[0]
expected_msg = ("Stopping since valid_acc has not improved in "
"the last 3 epochs.")
assert msg == expected_msg
msg = side_effect[1]
expected_msg = "Restoring best model from epoch "
assert expected_msg in msg
# Recompute validation loss and store it together with module weights
y_proba = net1.predict_proba(val_dataset)
es_weights = deepcopy(net1.module_.state_dict())
es_loss = log_loss(y_val, y_proba)
# Retrain same classifier without ES, using the best epochs number
net2 = net_clf_cls(
classifier_module,
max_epochs=early_stopping_cb.best_epoch_,
train_split=predefined_split(val_dataset),
)
torch.manual_seed(seed)
net2.fit(tr_dataset, y=None)
# Check that weights obtained match
assert all(
torch.equal(wi, wj)
for wi, wj in zip(
net2.module_.state_dict().values(),
es_weights.values()
)
)
# Check validation loss obtained match
y_proba_2 = net2.predict_proba(val_dataset)
assert es_loss == log_loss(y_val, y_proba_2)
# Check best_model_weights_ is transformed into None when pickling
del net1.callbacks[0].sink
net1_pkl = pickle.dumps(net1)
reloaded_net1 = pickle.loads(net1_pkl)
assert reloaded_net1.callbacks[0].best_epoch_ == net1.callbacks[0].best_epoch_
assert reloaded_net1.callbacks[0].best_model_weights_ is None
def test_typical_use_case_stopping(
self, net_clf_cls, broken_classifier_module, classifier_data,
early_stopping_cls):
patience = 5
max_epochs = 8
side_effect = []
def sink(x):
side_effect.append(x)
early_stopping_cb = early_stopping_cls(patience=patience, sink=sink)
net = net_clf_cls(
broken_classifier_module,
callbacks=[
early_stopping_cb,
],
max_epochs=max_epochs,
)
net.fit(*classifier_data)
assert len(net.history) == patience + 1 < max_epochs
# check correct output message
assert len(side_effect) == 1
msg = side_effect[0]
expected_msg = ("Stopping since valid_loss has not improved in "
"the last 5 epochs.")
assert msg == expected_msg
def test_custom_scoring_nonstop(
self, net_clf_cls, classifier_module, classifier_data,
early_stopping_cls, epoch_scoring_cls,
):
lower_is_better = False
scoring_name = 'valid_roc_auc'
patience = 5
max_epochs = 8
scoring_mock = Mock(side_effect=list(range(2, 10)))
scoring_cb = epoch_scoring_cls(
scoring_mock, lower_is_better, name=scoring_name)
early_stopping_cb = early_stopping_cls(
patience=patience, lower_is_better=lower_is_better,
monitor=scoring_name)
net = net_clf_cls(
classifier_module,
callbacks=[
scoring_cb,
early_stopping_cb,
],
max_epochs=max_epochs,
)
net.fit(*classifier_data)
assert len(net.history) == max_epochs
def test_custom_scoring_stop(
self, net_clf_cls, broken_classifier_module, classifier_data,
early_stopping_cls, epoch_scoring_cls,
):
lower_is_better = False
scoring_name = 'valid_roc_auc'
patience = 5
max_epochs = 8
scoring_cb = epoch_scoring_cls(
'roc_auc', lower_is_better, name=scoring_name)
early_stopping_cb = early_stopping_cls(
patience=patience, lower_is_better=lower_is_better,
monitor=scoring_name)
net = net_clf_cls(
broken_classifier_module,
callbacks=[
scoring_cb,
early_stopping_cb,
],
max_epochs=max_epochs,
)
net.fit(*classifier_data)
assert len(net.history) < max_epochs
def test_stopping_big_absolute_threshold(
self, net_clf_cls, classifier_module, classifier_data,
early_stopping_cls):
patience = 5
max_epochs = 8
early_stopping_cb = early_stopping_cls(patience=patience,
threshold_mode='abs',
threshold=0.1)
net = net_clf_cls(
classifier_module,
callbacks=[
early_stopping_cb,
],
max_epochs=max_epochs,
)
net.fit(*classifier_data)
assert len(net.history) == patience + 1 < max_epochs
def test_wrong_threshold_mode(
self, net_clf_cls, classifier_module, classifier_data,
early_stopping_cls):
patience = 5
max_epochs = 8
early_stopping_cb = early_stopping_cls(
patience=patience, threshold_mode='incorrect')
net = net_clf_cls(
classifier_module,
callbacks=[
early_stopping_cb,
],
max_epochs=max_epochs,
)
with pytest.raises(ValueError) as exc:
net.fit(*classifier_data)
expected_msg = "Invalid threshold mode: 'incorrect'"
assert exc.value.args[0] == expected_msg
| TestEarlyStopping |
python | django__django | tests/i18n/tests.py | {
"start": 91193,
"end": 91885
} | class ____(SimpleTestCase):
def test_round_away_from_one(self):
tests = [
(0, 0),
(0.0, 0),
(0.25, 0),
(0.5, 0),
(0.75, 0),
(1, 1),
(1.0, 1),
(1.25, 2),
(1.5, 2),
(1.75, 2),
(-0.0, 0),
(-0.25, -1),
(-0.5, -1),
(-0.75, -1),
(-1, -1),
(-1.0, -1),
(-1.25, -2),
(-1.5, -2),
(-1.75, -2),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(round_away_from_one(value), expected)
| UtilsTests |
python | getsentry__sentry | src/sentry/hybridcloud/outbox/category.py | {
"start": 618,
"end": 8878
} | class ____(IntEnum):
USER_UPDATE = 0
WEBHOOK_PROXY = 1 # no longer in use
ORGANIZATION_UPDATE = 2
ORGANIZATION_MEMBER_UPDATE = 3
UNUSED_TWO = 4
AUDIT_LOG_EVENT = 5
USER_IP_EVENT = 6
INTEGRATION_UPDATE = 7
PROJECT_UPDATE = 8
API_APPLICATION_UPDATE = 9
SENTRY_APP_INSTALLATION_UPDATE = 10
TEAM_UPDATE = 11
ORGANIZATION_INTEGRATION_UPDATE = 12
UNUSUED_THREE = 13
SEND_SIGNAL = 14
ORGANIZATION_MAPPING_CUSTOMER_ID_UPDATE = 15
ORGAUTHTOKEN_UPDATE_USED = 16
PROVISION_ORGANIZATION = 17
POST_ORGANIZATION_PROVISION = 18
UNUSED_ONE = 19
DISABLE_AUTH_PROVIDER = 20 # no longer in use
RESET_IDP_FLAGS = 21
MARK_INVALID_SSO = 22
SUBSCRIPTION_UPDATE = 23
AUTH_PROVIDER_UPDATE = 24
AUTH_IDENTITY_UPDATE = 25
ORGANIZATION_MEMBER_TEAM_UPDATE = 26
ORGANIZATION_SLUG_RESERVATION_UPDATE = 27
API_KEY_UPDATE = 28
PARTNER_ACCOUNT_UPDATE = 29
SENTRY_APP_UPDATE = 30
UNUSED_FOUR = 31
API_TOKEN_UPDATE = 32
ORG_AUTH_TOKEN_UPDATE = 33
ISSUE_COMMENT_UPDATE = 34
EXTERNAL_ACTOR_UPDATE = 35
RELOCATION_EXPORT_REQUEST = 36 # no longer in use
RELOCATION_EXPORT_REPLY = 37 # no longer in use
SEND_VERCEL_INVOICE = 38
FTC_CONSENT = 39
SERVICE_HOOK_UPDATE = 40
SENTRY_APP_DELETE = 41
SENTRY_APP_INSTALLATION_DELETE = 42
@classmethod
def as_choices(cls) -> Sequence[tuple[int, int]]:
return [(i.value, i.value) for i in cls]
def connect_region_model_updates(self, model: type[ReplicatedRegionModel]) -> None:
def receiver(
object_identifier: int,
payload: Mapping[str, Any] | None,
shard_identifier: int,
*args: Any,
**kwds: Any,
) -> None:
from sentry.receivers.outbox import maybe_process_tombstone
maybe_instance: ReplicatedRegionModel | None = maybe_process_tombstone(
cast(Any, model), object_identifier, region_name=None
)
if maybe_instance is None:
model.handle_async_deletion(
identifier=object_identifier, shard_identifier=shard_identifier, payload=payload
)
else:
maybe_instance.handle_async_replication(shard_identifier=shard_identifier)
process_region_outbox.connect(receiver, weak=False, sender=self)
def connect_control_model_updates(self, model: type[HasControlReplicationHandlers]) -> None:
def receiver(
object_identifier: int,
payload: Mapping[str, Any] | None,
shard_identifier: int,
region_name: str,
*args: Any,
**kwds: Any,
) -> None:
from sentry.receivers.outbox import maybe_process_tombstone
maybe_instance: HasControlReplicationHandlers | None = maybe_process_tombstone(
cast(Any, model), object_identifier, region_name=region_name
)
if maybe_instance is None:
model.handle_async_deletion(
identifier=object_identifier,
region_name=region_name,
shard_identifier=shard_identifier,
payload=payload,
)
else:
maybe_instance.handle_async_replication(
shard_identifier=shard_identifier, region_name=region_name
)
process_control_outbox.connect(receiver, weak=False, sender=self)
def get_scope(self) -> OutboxScope:
for scope_int, categories in _outbox_categories_for_scope.items():
if self not in categories:
continue
break
else:
raise KeyError
return OutboxScope(scope_int)
def as_region_outbox(
self,
model: Any | None = None,
payload: dict[str, Any] | None = None,
shard_identifier: int | None = None,
object_identifier: int | None = None,
outbox: type[RegionOutboxBase] | None = None,
) -> RegionOutboxBase:
from sentry.hybridcloud.models.outbox import RegionOutbox
scope = self.get_scope()
shard_identifier, object_identifier = self.infer_identifiers(
scope, model, object_identifier=object_identifier, shard_identifier=shard_identifier
)
Outbox = outbox or RegionOutbox
return Outbox(
shard_scope=scope,
shard_identifier=shard_identifier,
category=self,
object_identifier=object_identifier,
payload=payload,
)
def as_control_outboxes(
self,
region_names: Collection[str],
model: Any | None = None,
payload: dict[str, Any] | None = None,
shard_identifier: int | None = None,
object_identifier: int | None = None,
outbox: type[ControlOutboxBase] | None = None,
) -> list[ControlOutboxBase]:
from sentry.hybridcloud.models.outbox import ControlOutbox
scope = self.get_scope()
shard_identifier, object_identifier = self.infer_identifiers(
scope, model, object_identifier=object_identifier, shard_identifier=shard_identifier
)
Outbox = outbox or ControlOutbox
return [
Outbox(
shard_scope=scope,
shard_identifier=shard_identifier,
category=self,
object_identifier=object_identifier,
region_name=region_name,
payload=payload,
)
for region_name in region_names
]
def infer_identifiers(
self,
scope: OutboxScope,
model: BaseModel | None,
*,
object_identifier: int | None,
shard_identifier: int | None,
) -> tuple[int, int]:
from sentry.integrations.models.integration import Integration
from sentry.models.apiapplication import ApiApplication
from sentry.models.organization import Organization
from sentry.users.models.user import User
assert (model is not None) ^ (
object_identifier is not None
), "Either model or object_identifier must be specified"
if model is not None and hasattr(model, "id"):
object_identifier = model.id
if shard_identifier is None and model is not None:
if scope == OutboxScope.ORGANIZATION_SCOPE:
if isinstance(model, Organization):
shard_identifier = model.id
elif hasattr(model, "organization_id"):
shard_identifier = model.organization_id
elif hasattr(model, "auth_provider"):
shard_identifier = model.auth_provider.organization_id
if scope == OutboxScope.USER_SCOPE:
if isinstance(model, User):
shard_identifier = model.id
elif hasattr(model, "user_id"):
shard_identifier = model.user_id
if scope == OutboxScope.APP_SCOPE:
if isinstance(model, ApiApplication):
shard_identifier = model.id
elif hasattr(model, "api_application_id"):
shard_identifier = model.api_application_id
if scope == OutboxScope.INTEGRATION_SCOPE:
if isinstance(model, Integration):
shard_identifier = model.id
elif hasattr(model, "integration_id"):
shard_identifier = model.integration_id
assert (
model is not None
) or shard_identifier is not None, "Either model or shard_identifier must be specified"
assert object_identifier is not None
assert shard_identifier is not None
return shard_identifier, object_identifier
def scope_categories(enum_value: int, categories: set[OutboxCategory]) -> int:
_outbox_categories_for_scope[enum_value] = categories
inter = _used_categories.intersection(categories)
assert not inter, f"OutboxCategories {inter} were already registered to a different scope"
_used_categories.update(categories)
return enum_value
| OutboxCategory |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 8046,
"end": 8261
} | class ____(scale_datetime, scale_y_continuous): # pyright: ignore[reportIncompatibleVariableOverride]
"""
Continuous y position for datetime data points
"""
guide: None = None
@alias
| scale_y_datetime |
python | doocs__leetcode | solution/1000-1099/1020.Number of Enclaves/Solution2.py | {
"start": 0,
"end": 793
} | class ____:
def numEnclaves(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
q = deque()
for j in range(n):
for i in (0, m - 1):
if grid[i][j]:
q.append((i, j))
grid[i][j] = 0
for i in range(m):
for j in (0, n - 1):
if grid[i][j]:
q.append((i, j))
grid[i][j] = 0
dirs = (-1, 0, 1, 0, -1)
while q:
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and grid[x][y]:
q.append((x, y))
grid[x][y] = 0
return sum(sum(row) for row in grid)
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/tryceratops/TRY004.py | {
"start": 5455,
"end": 8195
} | class ____(Exception):
pass
def correct_custom_exception(some_arg):
if isinstance(some_arg, int):
pass
else:
raise MyCustomTypeValidation("...") # that's correct, because it's not vanilla
def correct_complex_conditional(val):
if val is not None and (not isinstance(val, int) or val < 0):
raise ValueError(...) # fine if this is not a TypeError
def correct_multi_conditional(some_arg):
if some_arg == 3:
pass
elif isinstance(some_arg, int):
pass
else:
raise Exception("...") # fine if this is not a TypeError
def correct_should_ignore(some_arg):
if isinstance(some_arg, int):
pass
else:
raise TypeError("...")
def check_body(some_args):
if isinstance(some_args, int):
raise ValueError("...") # should be typeerror
def check_body_correct(some_args):
if isinstance(some_args, int):
raise TypeError("...") # correct
def multiple_elifs(some_args):
if not isinstance(some_args, int):
raise ValueError("...") # should be typerror
elif some_args < 3:
raise ValueError("...") # this is ok
elif some_args > 10:
raise ValueError("...") # this is ok if we don't simplify
else:
pass
def multiple_ifs(some_args):
if not isinstance(some_args, int):
raise ValueError("...") # should be typerror
else:
if some_args < 3:
raise ValueError("...") # this is ok
else:
if some_args > 10:
raise ValueError("...") # this is ok if we don't simplify
else:
pass
def else_body(obj):
if isinstance(obj, datetime.timedelta):
return "TimeDelta"
elif isinstance(obj, relativedelta.relativedelta):
return "RelativeDelta"
elif isinstance(obj, CronExpression):
return "CronExpression"
else:
raise Exception(f"Unknown object type: {obj.__class__.__name__}")
def early_return():
if isinstance(this, some_type):
if x in this:
return
raise ValueError(f"{this} has a problem") # this is ok
def early_break():
for x in this:
if isinstance(this, some_type):
if x in this:
break
raise ValueError(f"{this} has a problem") # this is ok
def early_continue():
for x in this:
if isinstance(this, some_type):
if x in this:
continue
raise ValueError(f"{this} has a problem") # this is ok
def early_return_else():
if isinstance(this, some_type):
pass
else:
if x in this:
return
raise ValueError(f"{this} has a problem") # this is ok
| MyCustomTypeValidation |
python | google__flatbuffers | tests/namespace_test/NamespaceA/NamespaceB/TableInNestedNS.py | {
"start": 181,
"end": 1340
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TableInNestedNS()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTableInNestedNS(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# TableInNestedNS
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TableInNestedNS
def Foo(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(
flatbuffers.number_types.Int32Flags, o + self._tab.Pos
)
return 0
def TableInNestedNSStart(builder):
builder.StartObject(1)
def Start(builder):
return TableInNestedNSStart(builder)
def TableInNestedNSAddFoo(builder, foo):
builder.PrependInt32Slot(0, foo, 0)
def AddFoo(builder, foo):
return TableInNestedNSAddFoo(builder, foo)
def TableInNestedNSEnd(builder):
return builder.EndObject()
def End(builder):
return TableInNestedNSEnd(builder)
| TableInNestedNS |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 17764,
"end": 19307
} | class ____(NamedTuple):
"""Field that can be configured by the user. It is a specification of a field."""
id: str
"""The unique identifier of the field."""
annotation: Any
"""The annotation of the field."""
name: str | None = None
"""The name of the field. """
description: str | None = None
"""The description of the field. """
default: Any = None
"""The default value for the field. """
is_shared: bool = False
"""Whether the field is shared."""
dependencies: list[str] | None = None
"""The dependencies of the field. """
def get_unique_config_specs(
specs: Iterable[ConfigurableFieldSpec],
) -> list[ConfigurableFieldSpec]:
"""Get the unique config specs from a sequence of config specs.
Args:
specs: The config specs.
Returns:
The unique config specs.
Raises:
ValueError: If the runnable sequence contains conflicting config specs.
"""
grouped = groupby(
sorted(specs, key=lambda s: (s.id, *(s.dependencies or []))), lambda s: s.id
)
unique: list[ConfigurableFieldSpec] = []
for spec_id, dupes in grouped:
first = next(dupes)
others = list(dupes)
if len(others) == 0 or all(o == first for o in others):
unique.append(first)
else:
msg = (
"RunnableSequence contains conflicting config specs"
f"for {spec_id}: {[first, *others]}"
)
raise ValueError(msg)
return unique
| ConfigurableFieldSpec |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_kubernetes_engine.py | {
"start": 10204,
"end": 11039
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
self.gke_hook = GKEHook(gcp_conn_id="test", location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
def test_get_cluster(self):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_get = self.gke_hook._client.get_cluster = mock.Mock()
self.gke_hook.get_cluster(
name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID, retry=retry_mock, timeout=timeout_mock
)
client_get.assert_called_once_with(
name=f"projects/{TEST_GCP_PROJECT_ID}/locations/{GKE_ZONE}/clusters/{CLUSTER_NAME}",
retry=retry_mock,
timeout=timeout_mock,
)
| TestGKEHookGet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.