language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/libraries/dagster-dask/dagster_dask/executor.py | {
"start": 5921,
"end": 12402
} | class ____(Executor):
def __init__(self, cluster_type, cluster_configuration):
self.cluster_type = check.opt_str_param(cluster_type, "cluster_type", default="local")
self.cluster_configuration = check.opt_dict_param(
cluster_configuration, "cluster_configuration"
)
@property
def retries(self):
return RetryMode.DISABLED
def execute(self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan):
check.inst_param(plan_context, "plan_context", PlanOrchestrationContext)
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
check.param_invariant(
isinstance(plan_context.executor, DaskExecutor),
"plan_context",
f"Expected executor to be DaskExecutor got {plan_context.executor}",
)
check.invariant(
plan_context.instance.is_persistent,
"Dask execution requires a persistent DagsterInstance",
)
step_levels = execution_plan.get_steps_to_execute_by_level()
job_name = plan_context.job_name
instance = plan_context.instance
cluster_type = self.cluster_type
if cluster_type == "existing":
# address passed directly to Client() below to connect to existing Scheduler
cluster = self.cluster_configuration["address"]
elif cluster_type == "local":
from dask.distributed import LocalCluster
cluster = LocalCluster(**self.build_dict(job_name))
elif cluster_type == "yarn":
from dask_yarn import YarnCluster
cluster = YarnCluster(**self.build_dict(job_name))
elif cluster_type == "ssh":
from dask.distributed import SSHCluster
cluster = SSHCluster(**self.build_dict(job_name))
elif cluster_type == "pbs":
from dask_jobqueue import PBSCluster
cluster = PBSCluster(**self.build_dict(job_name))
elif cluster_type == "moab":
from dask_jobqueue import MoabCluster
cluster = MoabCluster(**self.build_dict(job_name))
elif cluster_type == "sge":
from dask_jobqueue import SGECluster
cluster = SGECluster(**self.build_dict(job_name))
elif cluster_type == "lsf":
from dask_jobqueue import LSFCluster
cluster = LSFCluster(**self.build_dict(job_name))
elif cluster_type == "slurm":
from dask_jobqueue import SLURMCluster
cluster = SLURMCluster(**self.build_dict(job_name))
elif cluster_type == "oar":
from dask_jobqueue import OARCluster
cluster = OARCluster(**self.build_dict(job_name))
elif cluster_type == "kube":
from dask_kubernetes import KubeCluster
cluster = KubeCluster(**self.build_dict(job_name))
else:
raise ValueError(
"Must be providing one of the following ('existing', 'local', 'yarn', 'ssh',"
f" 'pbs', 'moab', 'sge', 'lsf', 'slurm', 'oar', 'kube') not {cluster_type}"
)
with dask.distributed.Client(cluster) as client:
execution_futures = []
execution_futures_dict = {}
for step_level in step_levels:
for step in step_level:
# We ensure correctness in sequencing by letting Dask schedule futures and
# awaiting dependencies within each step.
dependencies = []
for step_input in step.step_inputs:
for key in step_input.dependency_keys:
dependencies.append(execution_futures_dict[key])
run_config = plan_context.run_config
dask_task_name = f"{job_name}.{step.key}"
recon_job = plan_context.reconstructable_job
future = client.submit(
query_on_dask_worker,
dependencies,
recon_job,
plan_context.dagster_run,
run_config,
[step.key],
instance.get_ref(),
execution_plan.known_state,
key=dask_task_name,
resources=get_dask_resource_requirements(step.tags),
)
execution_futures.append(future)
execution_futures_dict[step.key] = future
# This tells Dask to awaits the step executions and retrieve their results to the
# master
futures = dask.distributed.as_completed(execution_futures, with_results=True)
# Allow interrupts while waiting for the results from Dask
for future, result in iterate_with_context(raise_execution_interrupts, futures):
for step_event in result:
yield check.inst(step_event, DagsterEvent)
def build_dict(self, job_name):
"""Returns a dict we can use for kwargs passed to dask client instantiation.
Intended to be used like:
with dask.distributed.Client(**cfg.build_dict()) as client:
<< use client here >>
"""
if self.cluster_type in ["yarn", "pbs", "moab", "sge", "lsf", "slurm", "oar", "kube"]:
dask_cfg = {"name": job_name}
else:
dask_cfg = {}
if self.cluster_configuration:
for k, v in self.cluster_configuration.items():
dask_cfg[k] = v
# if address is set, don't add LocalCluster args
# context: https://github.com/dask/distributed/issues/3313
if (self.cluster_type == "local") and ("address" not in dask_cfg):
# We set threads_per_worker because Dagster is not thread-safe. Even though
# environments=True by default, there is a clever piece of machinery
# (dask.distributed.deploy.local.nprocesses_nthreads) that automagically makes execution
# multithreaded by default when the number of available cores is greater than 4.
# See: https://github.com/dagster-io/dagster/issues/2181
# We may want to try to figure out a way to enforce this on remote Dask clusters against
# which users run Dagster workloads.
dask_cfg["threads_per_worker"] = 1
return dask_cfg
| DaskExecutor |
python | cherrypy__cherrypy | cherrypy/_cprequest.py | {
"start": 35175,
"end": 35679
} | class ____(object):
"""A delayed UUID4 string maker."""
def __str__(self):
"""Return UUID4 and keep it for future calls."""
return str(self.uuid4)
@property
def uuid4(self):
"""Provide unique id on per-request basis using UUID4.
It's evaluated lazily on render.
"""
try:
self._uuid4
except AttributeError:
# evaluate on first access
self._uuid4 = uuid.uuid4()
return self._uuid4
| LazyUUID4 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/subset/time_window.py | {
"start": 2129,
"end": 3618
} | class ____(NamedTupleSerializer):
# TimeWindowPartitionsSubsets have custom logic to delay calculating num_partitions until it
# is needed to improve performance. When serializing, we want to serialize the number of
# partitions, so we force calculation.
def before_pack(self, value: "TimeWindowPartitionsSubset") -> "TimeWindowPartitionsSubset": # pyright: ignore[reportIncompatibleMethodOverride]
# value.num_partitions will calculate the number of partitions if the field is None
# We want to check if the field is None and replace the value with the calculated value
# for serialization
if value._asdict()["num_partitions"] is None:
return TimeWindowPartitionsSubset(
partitions_def=value.partitions_def,
num_partitions=value.num_partitions,
included_time_windows=value.included_time_windows,
)
return value
def before_unpack(self, context, value: dict[str, Any]): # pyright: ignore[reportIncompatibleMethodOverride]
num_partitions = value.get("num_partitions")
# some objects were serialized with an invalid num_partitions, so fix that here
if num_partitions is not None and num_partitions < 0:
# set it to None so that it will be recalculated
value["num_partitions"] = None
return value
@whitelist_for_serdes(serializer=TimeWindowPartitionsSubsetSerializer)
| TimeWindowPartitionsSubsetSerializer |
python | django__django | tests/gis_tests/distapp/models.py | {
"start": 1002,
"end": 1157
} | class ____(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
| SouthTexasZipcode |
python | getsentry__sentry | src/sentry/hybridcloud/services/control_organization_provisioning/service.py | {
"start": 570,
"end": 4652
} | class ____(RpcService):
key = "control_org_provisioning"
local_mode = SiloMode.CONTROL
@abstractmethod
@rpc_method
def provision_organization(
self, *, region_name: str, org_provision_args: OrganizationProvisioningOptions
) -> RpcOrganizationSlugReservation:
"""
Provisions an organization, an organization member, and team based on the provisioning args passed.
In the event of a slug conflict, a new slug will be generated using the provided slug as a seed.
:param region_name: The region to provision the organization in.
:param org_provision_args: Provisioning and post-provisioning options for the organization.
:return: RpcOrganizationSlugReservation containing the organization ID and slug.
"""
@abstractmethod
@rpc_method
def idempotent_provision_organization(
self, *, region_name: str, org_provision_args: OrganizationProvisioningOptions
) -> RpcOrganizationSlugReservation | None:
"""
Provisions an organization, an organization member, and team based on the provisioning args passed.
In the event of a slug conflict, the conflicting org will be queried. If the provided owning_user_id
matches the organization's owning user, the organization will be returned. Otherwise, None will be returned.
Note: This is not intended to be used for normal organization provisioning; but rather, for use-cases
such as integrations which require strong idempotency.
:param region_name: The region to provision the organization in.
:param org_provision_args: Provisioning and post-provisioning options for the organization.
:return: RpcOrganization the organization ID and slug.
"""
@abstractmethod
@rpc_method
def update_organization_slug(
self,
*,
region_name: str,
organization_id: int,
desired_slug: str,
require_exact: bool = True,
) -> RpcOrganizationSlugReservation:
"""
Updates an organization's slug via an outbox based confirmation flow to ensure that the control
and region silos stay in sync.
Initially, the organization slug reservation is updated in control silo, which generates a replica
outbox to the desired region in order to ensure that a slug change in control _will eventually_
result in a slug change on the region side.
:param region_name: The region where the organization exists
:param organization_id: the ID of the organization whose slug to change
:param desired_slug: The slug to update the organization with
:param require_exact: Determines whether the slug can be modified with a unique suffix in the
case of a slug collision.
:return:
"""
@abstractmethod
@rpc_method
def bulk_create_organization_slug_reservations(
self,
*,
region_name: str,
slug_mapping: dict[int, str],
) -> None:
"""
Only really intended for bulk organization import usage. Creates unique organization slug
reservations for the given list of IDs and slug bases for organizations already provisioned
in the provided region.
:param region_name: The region where the imported organization exist
:param organization_ids_and_slugs: A set of ID and base slug tuples to reserve slugs for.
This parameter is deprecated. Use slug_mapping instead.
:param slug_mapping: A map of organization id -> slug to reserve.
:return:
"""
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.hybridcloud.services.control_organization_provisioning.impl import (
DatabaseBackedControlOrganizationProvisioningService,
)
return DatabaseBackedControlOrganizationProvisioningService()
control_organization_provisioning_rpc_service = (
ControlOrganizationProvisioningRpcService.create_delegation()
)
| ControlOrganizationProvisioningRpcService |
python | langchain-ai__langchain | libs/langchain_v1/langchain/chat_models/base.py | {
"start": 21579,
"end": 36525
} | class ____(Runnable[LanguageModelInput, Any]):
def __init__(
self,
*,
default_config: dict | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] = "any",
config_prefix: str = "",
queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (),
) -> None:
self._default_config: dict = default_config or {}
self._configurable_fields: Literal["any"] | list[str] = (
configurable_fields if configurable_fields == "any" else list(configurable_fields)
)
self._config_prefix = (
config_prefix + "_"
if config_prefix and not config_prefix.endswith("_")
else config_prefix
)
self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list(
queued_declarative_operations,
)
def __getattr__(self, name: str) -> Any:
if name in _DECLARATIVE_METHODS:
# Declarative operations that cannot be applied until after an actual model
# object is instantiated. So instead of returning the actual operation,
# we record the operation and its arguments in a queue. This queue is
# then applied in order whenever we actually instantiate the model (in
# self._model()).
def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
queued_declarative_operations = list(
self._queued_declarative_operations,
)
queued_declarative_operations.append((name, args, kwargs))
return _ConfigurableModel(
default_config=dict(self._default_config),
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
return queue
if self._default_config and (model := self._model()) and hasattr(model, name):
return getattr(model, name)
msg = f"{name} is not a BaseChatModel attribute"
if self._default_config:
msg += " and is not implemented on the default model"
msg += "."
raise AttributeError(msg)
def _model(self, config: RunnableConfig | None = None) -> Runnable:
params = {**self._default_config, **self._model_params(config)}
model = _init_chat_model_helper(**params)
for name, args, kwargs in self._queued_declarative_operations:
model = getattr(model, name)(*args, **kwargs)
return model
def _model_params(self, config: RunnableConfig | None) -> dict:
config = ensure_config(config)
model_params = {
_remove_prefix(k, self._config_prefix): v
for k, v in config.get("configurable", {}).items()
if k.startswith(self._config_prefix)
}
if self._configurable_fields != "any":
model_params = {k: v for k, v in model_params.items() if k in self._configurable_fields}
return model_params
def with_config(
self,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> _ConfigurableModel:
"""Bind config to a `Runnable`, returning a new `Runnable`."""
config = RunnableConfig(**(config or {}), **cast("RunnableConfig", kwargs))
model_params = self._model_params(config)
remaining_config = {k: v for k, v in config.items() if k != "configurable"}
remaining_config["configurable"] = {
k: v
for k, v in config.get("configurable", {}).items()
if _remove_prefix(k, self._config_prefix) not in model_params
}
queued_declarative_operations = list(self._queued_declarative_operations)
if remaining_config:
queued_declarative_operations.append(
(
"with_config",
(),
{"config": remaining_config},
),
)
return _ConfigurableModel(
default_config={**self._default_config, **model_params},
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
@property
def InputType(self) -> TypeAlias:
"""Get the input type for this `Runnable`."""
from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
# This is a version of LanguageModelInput which replaces the abstract
# base class BaseMessage with a union of its subclasses, which makes
# for a much better schema.
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return self._model(config).invoke(input, config=config, **kwargs)
@override
async def ainvoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return await self._model(config).ainvoke(input, config=config, **kwargs)
@override
def stream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).stream(input, config=config, **kwargs)
@override
async def astream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).astream(input, config=config, **kwargs):
yield x
def batch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return self._model(config).batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return super().batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return await self._model(config).abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return await super().abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
def batch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> Iterator[tuple[int, Any | Exception]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
yield from self._model(cast("RunnableConfig", config)).batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
yield from super().batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> AsyncIterator[tuple[int, Any]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
async for x in self._model(
cast("RunnableConfig", config),
).abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
async for x in super().abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
@override
def transform(
self,
input: Iterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).transform(input, config=config, **kwargs)
@override
async def atransform(
self,
input: AsyncIterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).atransform(input, config=config, **kwargs):
yield x
@overload
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[True] = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch]: ...
@overload
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[False],
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLog]: ...
@override
async def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: bool = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch] | AsyncIterator[RunLog]:
async for x in self._model(config).astream_log( # type: ignore[call-overload, misc]
input,
config=config,
diff=diff,
with_streamed_output_list=with_streamed_output_list,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
@override
async def astream_events(
self,
input: Any,
config: RunnableConfig | None = None,
*,
version: Literal["v1", "v2"] = "v2",
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[StreamEvent]:
async for x in self._model(config).astream_events(
input,
config=config,
version=version,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
# Explicitly added to satisfy downstream linters.
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
return self.__getattr__("bind_tools")(tools, **kwargs)
# Explicitly added to satisfy downstream linters.
def with_structured_output(
self,
schema: dict | type[BaseModel],
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
return self.__getattr__("with_structured_output")(schema, **kwargs)
| _ConfigurableModel |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/RemoteGraphicsView.py | {
"start": 3571,
"end": 4153
} | class ____(QtGui.QEnterEvent):
@staticmethod
def get_state(obj):
lpos = obj.position() if hasattr(obj, 'position') else obj.localPos()
wpos = obj.scenePosition() if hasattr(obj, 'scenePosition') else obj.windowPos()
gpos = obj.globalPosition() if hasattr(obj, 'globalPosition') else obj.screenPos()
return lpos, wpos, gpos
def __init__(self, rhs):
super().__init__(*self.get_state(rhs))
def __getstate__(self):
return self.get_state(self)
def __setstate__(self, state):
super().__init__(*state)
| EnterEvent |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 28001,
"end": 28267
} | class ____(NamedTuple("_CanCancelExecutionRequest", [("run_id", str)])):
def __new__(cls, run_id: str):
return super().__new__(
cls,
run_id=check.str_param(run_id, "run_id"),
)
@whitelist_for_serdes
| CanCancelExecutionRequest |
python | python-openxml__python-docx | src/docx/oxml/shape.py | {
"start": 4226,
"end": 4368
} | class ____(BaseOxmlElement):
"""``<pic:cNvPicPr>`` element, specifies picture locking and resize behaviors."""
| CT_NonVisualPictureProperties |
python | pypa__warehouse | warehouse/sponsors/models.py | {
"start": 195,
"end": 1893
} | class ____(db.Model):
__tablename__ = "sponsors"
__repr__ = make_repr("name")
name: Mapped[str]
service: Mapped[str | None]
activity_markdown: Mapped[str | None]
link_url: Mapped[str]
color_logo_url: Mapped[str]
white_logo_url: Mapped[str | None]
# control flags
# TODO: These cannot use `bool_false` type, as `default=False` is performed
# locally prior to sending the value to the database.
# Changing incurs a migration, which we should do as a later refactor.
is_active: Mapped[bool] = mapped_column(default=False)
footer: Mapped[bool] = mapped_column(default=False)
psf_sponsor: Mapped[bool] = mapped_column(default=False)
infra_sponsor: Mapped[bool] = mapped_column(default=False)
one_time: Mapped[bool] = mapped_column(default=False)
sidebar: Mapped[bool] = mapped_column(default=False)
# pythondotorg integration
origin: Mapped[str | None] = mapped_column(default="manual")
level_name: Mapped[str | None]
level_order: Mapped[int | None] = mapped_column(default=0)
slug: Mapped[str | None]
@property
def color_logo_img(self):
return f'<img src="{self.color_logo_url}" alt="" loading="lazy">'
@property
def white_logo_img(self):
if not self.white_logo_url:
return ""
return (
'<img class="sponsors__image" '
+ f'src="{self.white_logo_url}" alt="" loading="lazy">'
)
@property
def activity(self):
"""
Render raw activity markdown as HTML
"""
if not self.activity_markdown:
return ""
return readme.render(self.activity_markdown, "text/markdown")
| Sponsor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-genesys/source_genesys/source.py | {
"start": 425,
"end": 1901
} | class ____(HttpStream, ABC):
page_size = 500
@property
def url_base(self):
if self._api_base_url is not None:
return self._api_base_url + "/api/v2/"
return None
def __init__(self, api_base_url, *args, **kwargs):
self._api_base_url = api_base_url
super().__init__(*args, **kwargs)
def backoff_time(self, response: requests.Response) -> Optional[int]:
delay_time = response.headers.get("Retry-After")
if delay_time:
return int(delay_time)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
response_json = response.json()
if response_json.get("nextUri"):
next_query_string = urllib.parse.urlsplit(response_json.get("nextUri")).query
return dict(urllib.parse.parse_qsl(next_query_string))
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {"pageSize": self.page_size}
# Handle pagination by inserting the next page's token in the request parameters
if next_page_token:
params.update(next_page_token)
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
json_response = response.json()
yield from json_response.get("entities", [])
| GenesysStream |
python | mlflow__mlflow | tests/pyfunc/sample_code/streamable_model_code.py | {
"start": 76,
"end": 362
} | class ____(PythonModel):
def __init__(self):
pass
def predict(self, context, model_input, params=None):
pass
def predict_stream(self, context, model_input, params=None):
yield "test1"
yield "test2"
set_model(StreamableModel())
| StreamableModel |
python | kamyu104__LeetCode-Solutions | Python/number-of-atoms.py | {
"start": 60,
"end": 790
} | class ____(object):
def countOfAtoms(self, formula):
"""
:type formula: str
:rtype: str
"""
parse = re.findall(r"([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)", formula)
stk = [collections.Counter()]
for name, m1, left_open, right_open, m2 in parse:
if name:
stk[-1][name] += int(m1 or 1)
if left_open:
stk.append(collections.Counter())
if right_open:
top = stk.pop()
for k, v in top.iteritems():
stk[-1][k] += v * int(m2 or 1)
return "".join(name + (str(stk[-1][name]) if stk[-1][name] > 1 else '') \
for name in sorted(stk[-1]))
| Solution |
python | streamlit__streamlit | e2e_playwright/st_help.py | {
"start": 3259,
"end": 3659
} | class ____:
"""My docstring."""
def __init__(self):
self.my_var_1 = 123
def my_func_1(self, a: int, b: bool = False) -> None:
"""Func with doc."""
def my_func_2(self):
# Func without doc.
pass
st.container(key="help_mixed_docs").help(FooWithMixedDocs())
# Create a class with very long documentation to demonstrate width differences
| FooWithMixedDocs |
python | ray-project__ray | python/ray/data/_internal/iterator/stream_split_iterator.py | {
"start": 4448,
"end": 11021
} | class ____:
"""Coordinator actor for routing blocks to output splits.
This actor runs a streaming executor locally on its main thread. Clients can
retrieve results via actor calls running on other threads.
"""
def __init__(
self,
dataset_wrapper: _DatasetWrapper,
n: int,
locality_hints: Optional[List[NodeIdStr]],
):
dataset = dataset_wrapper._dataset
# Set current DataContext.
# This needs to be a deep copy so that updates to the base dataset's
# context does not affect this process's global DataContext.
self._data_context = dataset.context.copy()
ray.data.DataContext._set_current(self._data_context)
if self._data_context.execution_options.locality_with_output is True:
self._data_context.execution_options.locality_with_output = locality_hints
logger.info(f"Auto configuring locality_with_output={locality_hints}")
self._base_dataset = dataset
self._n = n
self._locality_hints = locality_hints
self._lock = threading.RLock()
self._executor = None
# Guarded by self._lock.
self._next_bundle: Dict[int, RefBundle] = {}
self._unfinished_clients_in_epoch = n
self._cur_epoch = -1
# Add a new stats field to track coordinator overhead
self._coordinator_overhead_s = 0.0
def gen_epochs():
while True:
self._executor = self._base_dataset._plan.create_executor()
output_iterator = execute_to_legacy_bundle_iterator(
self._executor, dataset._plan
)
yield output_iterator
self._next_epoch = gen_epochs()
self._output_iterator = None
# Store the error raised from the `gen_epoch` call.
self._gen_epoch_error: Optional[Exception] = None
def stats(self) -> DatasetStats:
"""Returns stats from the base dataset."""
if self._executor:
stats = self._executor.get_stats()
else:
stats = self._base_dataset._plan.stats()
# Set the tracked overhead time
stats.streaming_split_coordinator_s.add(self._coordinator_overhead_s)
return stats
def start_epoch(self, split_idx: int) -> str:
"""Called to start an epoch.
Returns:
UUID for the epoch, which must be used when accessing results via get().
"""
# Wait for all clients to arrive at the barrier before starting a new epoch.
epoch_id = self._barrier(split_idx)
return epoch_id
def get(self, epoch_id: int, output_split_idx: int) -> Optional[RefBundle]:
"""Blocking get operation.
This is intended to be called concurrently from multiple clients.
"""
start_time = time.perf_counter()
if epoch_id != self._cur_epoch:
raise ValueError(
"Invalid iterator: the dataset has moved on to another epoch."
)
try:
# Ensure there is at least one bundle.
with self._lock:
if output_split_idx in self._next_bundle:
next_bundle = self._next_bundle[output_split_idx]
else:
next_bundle = None
# Fetch next bundle if needed.
while next_bundle is None or not next_bundle.blocks:
# This is a BLOCKING call, so do it outside the lock.
next_bundle = self._output_iterator.get_next(output_split_idx)
schema = next_bundle.schema
block = next_bundle.blocks[-1]
next_bundle = RefBundle(
blocks=next_bundle.blocks[:-1],
schema=next_bundle.schema,
owns_blocks=next_bundle.owns_blocks,
output_split_idx=next_bundle.output_split_idx,
)
# Accumulate any remaining blocks in next_bundle map as needed.
with self._lock:
self._next_bundle[output_split_idx] = next_bundle
if not next_bundle.blocks:
del self._next_bundle[output_split_idx]
return RefBundle(
[block], schema=schema, owns_blocks=next_bundle.owns_blocks
)
except StopIteration:
return None
finally:
# Track overhead time in the instance variable
self._coordinator_overhead_s += time.perf_counter() - start_time
def shutdown_executor(self):
"""Shuts down the internal data executor."""
with self._lock:
# Call shutdown on the executor
if self._executor is not None:
self._executor.shutdown(force=False)
def _barrier(self, split_idx: int) -> int:
"""Arrive and block until the start of the given epoch."""
# Decrement and await all clients to arrive here.
with self._lock:
starting_epoch = self._cur_epoch
self._unfinished_clients_in_epoch -= 1
start_time = time.time()
while (
self._cur_epoch == starting_epoch and self._unfinished_clients_in_epoch != 0
):
if time.time() - start_time > BLOCKED_CLIENT_WARN_TIMEOUT:
if log_once(f"stream_split_blocked_{split_idx}_{starting_epoch}"):
logger.warning(
f"StreamSplitDataIterator(epoch={starting_epoch}, "
f"split={split_idx}) blocked waiting on other clients "
f"for more than {BLOCKED_CLIENT_WARN_TIMEOUT}s. All "
"clients must read from the DataIterator splits at "
"the same time. This warning will not be printed again "
"for this epoch."
)
time.sleep(0.1)
# Advance to the next epoch.
with self._lock:
if self._cur_epoch == starting_epoch:
self._cur_epoch += 1
self._unfinished_clients_in_epoch = self._n
try:
self._output_iterator = next(self._next_epoch)
except Exception as e:
self._gen_epoch_error = e
if self._gen_epoch_error is not None:
# If there was an error when advancing to the next epoch,
# re-raise it for all threads.
raise self._gen_epoch_error
assert self._output_iterator is not None
return starting_epoch + 1
| SplitCoordinator |
python | viewflow__viewflow | viewflow/contrib/auth.py | {
"start": 4657,
"end": 15773
} | class ____(Viewset):
"""
Class-based URL configuration for `django.contrib.auth`.
This viewset provides URL patterns for user authentication, including login,
logout, and password management views.
.. code-block:: python
urlpatterns = [
path('accounts/', AuthViewset(
allow_password_change=False,
login_view=views.LoginView.as_view(
authentication_form=MyAuthForm
),
).urls),
]
"""
def __init__(self, *, allow_password_change=True, with_profile_view=True, **kwargs):
"""
Initialize the viewset with options for password change and profile view.
:param allow_password_change: Enable or disable password change/reset views. Defaults to True.
:param with_profile_view: Enable or disable profile view. Defaults to True.
"""
super().__init__(**kwargs)
self.allow_password_change = allow_password_change
self.with_profile_view = with_profile_view
"""
Login
"""
login_view_class = views.LoginView
def get_login_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the login view.
:param kwargs: Additional keyword arguments for the login view.
:return: Dictionary of keyword arguments.
"""
result = {"form_class": AuthenticationForm}
result.update(kwargs)
return result
@viewprop
def login_view(self):
"""
Property to get the configured login view.
:return: Configured login view instance.
"""
return self.login_view_class.as_view(**self.get_login_view_kwargs())
@property
def login_path(self):
"""
Property to get the URL pattern for the login view.
:return: URL pattern for the login view.
"""
return path("login/", self.login_view, name="login")
"""
Logout
"""
logout_view_class = views.LogoutView
def get_logout_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the logout view.
:param kwargs: Additional keyword arguments for the logout view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def logout_view(self):
"""
Property to get the configured logout view.
:return: Configured logout view class.
"""
return self.logout_view_class.as_view(**self.get_logout_view_kwargs())
@property
def logout_path(self):
"""
Property to get the URL pattern for the logout view.
:return: URL pattern for the logout view.
"""
return path("logout/", self.logout_view, name="logout")
"""
Password Change
"""
pass_change_view_class = views.PasswordChangeView
def get_pass_change_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password change view.
:param kwargs: Additional keyword arguments for the password change view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_change_view(self):
"""
Property to get the configured password change view.
:return: Configured password change view class.
"""
return self.pass_change_view_class.as_view(**self.get_pass_change_view_kwargs())
@property
def pass_change_path(self):
"""
Property to get the URL pattern for the password change view.
:return: URL pattern for the password change view.
"""
if self.allow_password_change:
return path(
"password_change/", self.pass_change_view, name="password_change"
)
"""
Password Change Done
"""
pass_change_done_view_class = views.PasswordChangeDoneView
def get_pass_change_done_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password change done view.
:param kwargs: Additional keyword arguments for the password change done view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_change_done_view(self):
"""
Property to get the configured password change done view.
:return: Configured password change done view class.
"""
return self.pass_change_done_view_class.as_view(
**self.get_pass_change_done_view_kwargs()
)
@property
def pass_change_done_path(self):
"""
Property to get the URL pattern for the password change done view.
:return: URL pattern for the password change done view.
"""
if self.allow_password_change:
return path(
"password_change/done/",
self.pass_change_done_view,
name="password_change_done",
)
"""
Password Reset Request
"""
pass_reset_view_class = views.PasswordResetView
def get_pass_reset_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password reset request view.
:param kwargs: Additional keyword arguments for the password reset request view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_reset_view(self):
"""
Property to get the configured password reset request view.
:return: Configured password reset request view class.
"""
return self.pass_reset_view_class.as_view(**self.get_pass_reset_view_kwargs())
@property
def pass_reset_path(self):
"""
Property to get the URL pattern for the password reset request view.
:return: URL pattern for the password reset request view.
"""
if self.allow_password_change:
return path("password_reset/", self.pass_reset_view, name="password_reset")
"""
Password Reset Request Done
"""
pass_reset_done_view_class = views.PasswordResetDoneView
def get_pass_reset_done_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password reset request done view.
:param kwargs: Additional keyword arguments for the password reset request done view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_reset_done_view(self):
"""
Property to get the configured password reset request done view.
:return: Configured password reset request done view class.
"""
return self.pass_reset_done_view_class.as_view(
**self.get_pass_reset_done_view_kwargs()
)
@property
def pass_reset_done_path(self):
"""
Property to get the URL pattern for the password reset request done view.
:return: URL pattern for the password reset request done view.
"""
if self.allow_password_change:
return path(
"password_reset/done/",
self.pass_reset_done_view,
name="password_reset_done",
)
"""
Password Reset Request Confirm
"""
pass_reset_confirm_view_class = views.PasswordResetConfirmView
def get_pass_reset_confirm_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password reset confirm view.
:param kwargs: Additional keyword arguments for the password reset confirm view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_reset_confirm_view(self):
"""
Property to get the configured password reset confirm view.
:return: Configured password reset confirm view class.
"""
return self.pass_reset_confirm_view_class.as_view(
**self.get_pass_reset_confirm_view_kwargs()
)
@property
def pass_reset_confirm_path(self):
"""
Property to get the URL pattern for the password reset confirm view.
:return: URL pattern for the password reset confirm view.
"""
if self.allow_password_change:
return path(
"reset/<uidb64>/<token>/",
self.pass_reset_confirm_view,
name="password_reset_confirm",
)
"""
Password Request Request Confirmed
"""
pass_reset_complete_view_class = views.PasswordResetCompleteView
def get_pass_reset_complete_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the password reset complete view.
:param kwargs: Additional keyword arguments for the password reset complete view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def pass_reset_complete_view(self):
"""
Property to get the configured password reset complete view.
:return: Configured password reset complete view class.
"""
return self.pass_reset_complete_view_class.as_view(
**self.get_pass_reset_complete_view_kwargs()
)
@property
def pass_reset_complete_path(self):
"""
Property to get the URL pattern for the password reset complete view.
:return: URL pattern for the password reset complete view.
"""
if self.allow_password_change:
return path(
"reset/done/",
self.pass_reset_complete_view,
name="password_reset_complete",
)
"""
Profile
"""
profile_view_class = ProfileView
def get_profile_view_kwargs(self, **kwargs):
"""
Get keyword arguments for the profile view.
:param kwargs: Additional keyword arguments for the profile view.
:return: Dictionary of keyword arguments.
"""
return kwargs
@viewprop
def profile_view(self):
"""
Property to get the configured profile view.
:return: Configured profile view class.
"""
return self.profile_view_class.as_view(**self.get_profile_view_kwargs())
@property
def profile_path(self):
"""
Property to get the URL pattern for the profile view.
:return: URL pattern for the profile view.
"""
if self.with_profile_view:
return path("profile/", self.profile_view, name="profile")
"""
Django-allauth integration
"""
def get_allauth_providers(self):
try:
from allauth.socialaccount import providers
return providers.registry.get_class_list()
except ImportError:
return []
GREETINGS = [
_("Fantastic!"),
_("That looks awesome!"),
_("You are looking very well today!"),
_("I totally admire your spontaneity."),
_("I like your new haircut."),
_("What a beautiful costume!"),
_("You look very good in that suit"),
_("I love your style."),
_("I love your hair today"),
_("That color looks great on you!"),
]
| AuthViewset |
python | walkccc__LeetCode | solutions/1080. Insufficient Nodes in Root to Leaf Paths/1080.py | {
"start": 0,
"end": 444
} | class ____:
def sufficientSubset(
self,
root: TreeNode | None,
limit: int
) -> TreeNode | None:
if not root:
return None
if not root.left and not root.right:
return None if root.val < limit else root
root.left = self.sufficientSubset(root.left, limit - root.val)
root.right = self.sufficientSubset(root.right, limit - root.val)
return None if not root.left and not root.right else root
| Solution |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 27187,
"end": 28015
} | class ____:
def foo():
some_func_call(
"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x "
"xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; "
"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" ",
None,
("xxxxxxxxxxx",),
),
xxxxxxx = {
"xx": (
"xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} -xx {1} -xx"
" xxx=xxx_xxxx,xxx_xx,xxx_xxx,xxx_xxxx,xxx_xx,xxx_xxx | xxxxxx -x xxxxxxxx -x"
" xxxxxxxx -x"
),
"xx": (
"xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} -xx {1} -xx"
" xxx=xxx_xxxx_xxx_xxxx,xxx_xx_xxx_xxxx,xxx_xxxx_xxx_xxxx,xxx_xx_xxxx_xxxx,xxx_xxx_xxxx,xxx_xxx_xxxx"
" xxxx=xxx | xxxxxx -x xxxxxxxx -x xxxxxxxx -x"
),
}
| A |
python | kamyu104__LeetCode-Solutions | Python/smallest-missing-integer-greater-than-sequential-prefix-sum.py | {
"start": 42,
"end": 431
} | class ____(object):
def missingInteger(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = nums[0]
for i in xrange(1, len(nums)):
if nums[i] != nums[i-1]+1:
break
total += nums[i]
lookup = set(nums)
while total in lookup:
total += 1
return total
| Solution |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_ismags.py | {
"start": 15543,
"end": 19400
} | class ____:
def test_mcis(self):
# Example graphs from DOI: 10.1002/spe.588
graph1 = nx.Graph()
graph1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (4, 5)])
graph1.nodes[1]["color"] = 0
graph2 = nx.Graph()
graph2.add_edges_from(
[(1, 2), (2, 3), (2, 4), (3, 4), (3, 5), (5, 6), (5, 7), (6, 7)]
)
graph2.nodes[1]["color"] = 1
graph2.nodes[6]["color"] = 2
graph2.nodes[7]["color"] = 2
ismags = iso.ISMAGS(
graph1, graph2, node_match=iso.categorical_node_match("color", None)
)
assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == []
assert list(ismags.subgraph_isomorphisms_iter(symmetry=False)) == []
found_mcis = _matches_to_sets(ismags.largest_common_subgraph())
expected = _matches_to_sets(
[{2: 2, 3: 4, 4: 3, 5: 5}, {2: 4, 3: 2, 4: 3, 5: 5}]
)
assert expected == found_mcis
ismags = iso.ISMAGS(
graph2, graph1, node_match=iso.categorical_node_match("color", None)
)
assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == []
assert list(ismags.subgraph_isomorphisms_iter(symmetry=False)) == []
found_mcis = _matches_to_sets(ismags.largest_common_subgraph())
# Same answer, but reversed.
expected = _matches_to_sets(
[{2: 2, 3: 4, 4: 3, 5: 5}, {4: 2, 2: 3, 3: 4, 5: 5}]
)
assert expected == found_mcis
def test_symmetry_mcis(self):
graph1 = nx.Graph()
nx.add_path(graph1, range(4))
graph2 = nx.Graph()
nx.add_path(graph2, range(3))
graph2.add_edge(1, 3)
# Only the symmetry of graph2 is taken into account here.
ismags1 = iso.ISMAGS(
graph1, graph2, node_match=iso.categorical_node_match("color", None)
)
assert list(ismags1.subgraph_isomorphisms_iter(symmetry=True)) == []
found_mcis = _matches_to_sets(ismags1.largest_common_subgraph())
expected = _matches_to_sets([{0: 0, 1: 1, 2: 2}, {1: 0, 3: 2, 2: 1}])
assert expected == found_mcis
# Only the symmetry of graph1 is taken into account here.
ismags2 = iso.ISMAGS(
graph2, graph1, node_match=iso.categorical_node_match("color", None)
)
assert list(ismags2.subgraph_isomorphisms_iter(symmetry=True)) == []
found_mcis = _matches_to_sets(ismags2.largest_common_subgraph())
expected = _matches_to_sets(
[
{3: 2, 0: 0, 1: 1},
{2: 0, 0: 2, 1: 1},
{3: 0, 0: 2, 1: 1},
{3: 0, 1: 1, 2: 2},
{0: 0, 1: 1, 2: 2},
{2: 0, 3: 2, 1: 1},
]
)
assert expected == found_mcis
found_mcis1 = _matches_to_sets(ismags1.largest_common_subgraph(symmetry=False))
found_mcis2 = ismags2.largest_common_subgraph(symmetry=False)
found_mcis2 = [{v: k for k, v in d.items()} for d in found_mcis2]
found_mcis2 = _matches_to_sets(found_mcis2)
expected = _matches_to_sets(
[
{3: 2, 1: 3, 2: 1},
{2: 0, 0: 2, 1: 1},
{1: 2, 3: 3, 2: 1},
{3: 0, 1: 3, 2: 1},
{0: 2, 2: 3, 1: 1},
{3: 0, 1: 2, 2: 1},
{2: 0, 0: 3, 1: 1},
{0: 0, 2: 3, 1: 1},
{1: 0, 3: 3, 2: 1},
{1: 0, 3: 2, 2: 1},
{0: 3, 1: 1, 2: 2},
{0: 0, 1: 1, 2: 2},
]
)
assert expected == found_mcis1
assert expected == found_mcis2
def is_isomorphic(G, SG, edge_match=None, node_match=None):
return iso.ISMAGS(G, SG, node_match, edge_match).is_isomorphic()
| TestLargestCommonSubgraph |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py | {
"start": 3957,
"end": 4096
} | class ____(GraniteMoeSharedMLP):
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
| GraniteMoeHybridMLP |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_dataflow.py | {
"start": 4667,
"end": 6630
} | class ____:
def test_support_project_id_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(project_id="TEST")
mock_instance.assert_called_once_with(project_id="TEST")
def test_support_project_id_from_variable_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(variables={"project": "TEST"})
mock_instance.assert_called_once_with(project_id="TEST", variables={})
def test_raise_exception_on_conflict(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException,
match="The mutually exclusive parameter `project_id` and `project` key in `variables` parameter "
"are both present\\. Please remove one\\.",
):
FixtureFallback().test_fn(variables={"project": "TEST"}, project_id="TEST2")
def test_raise_exception_on_positional_argument(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException, match="You must use keyword arguments in this methods rather than positional"
):
FixtureFallback().test_fn({"project": "TEST"}, "TEST2")
@pytest.mark.db_test
| TestFallbackToVariables |
python | readthedocs__readthedocs.org | readthedocs/organizations/managers.py | {
"start": 1324,
"end": 1816
} | class ____(models.Manager):
"""Manager for queries on team members."""
def sorted(self):
"""
Return sorted list of members and invites.
Return list of members and invites sorted by members first, and null
members (invites) last.
"""
return (
self.get_queryset()
.annotate(
null_member=models.Count("member"),
)
.order_by("-null_member", "member")
)
| TeamMemberManager |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/testing/codegen.py | {
"start": 888,
"end": 1118
} | class ____(object):
sample_map = None
def sample(self):
nodes, magnitudes = zip(*self.sample_map.items())
return np.random.choice(
nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes))
| NodeSampler |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 7533,
"end": 12796
} | class ____(enum.Enum):
"""Thread semantics for a primitives at the Pallas user-level."""
Warp = enum.auto()
Warpgroup = enum.auto()
# Convenience constants for (lowering, primitive) thread semantics pairs.
LANExWG_SEMANTICS = (
mgpu.LoweringSemantics.Lane, PrimitiveSemantics.Warpgroup)
LANExWARP_SEMANTICS = (
mgpu.LoweringSemantics.Lane, PrimitiveSemantics.Warp)
WGxWG_SEMANTICS = (
mgpu.LoweringSemantics.Warpgroup, PrimitiveSemantics.Warpgroup)
# TODO(justinfu): Reconcile with pl.kernel.
def kernel(
body: Callable[..., None],
out_shape: object,
*,
scratch_shapes: pallas_core.ScratchShapeTree = (),
compiler_params: pallas_core.CompilerParams | None = None,
# Mesh kwargs
grid: tuple[int, ...] = (),
grid_names: tuple[str, ...] = (),
cluster: tuple[int, ...] = (),
cluster_names: tuple[str, ...] = (),
num_threads: int | None = None,
thread_name: str | None = None,
**mesh_kwargs: object,
):
"""Entry point for defining a Mosaic GPU kernel.
Args:
body: The kernel body, which should take as arguments the input, output,
and scratch Refs. The number of input Refs is determined by the number
of arguments passed into kernel returned by this function. The number of
output and scratch Refs are determined by `out_shape` and `scratch_shapes`
respectively.
out_shape: a PyTree of :class:`jax.ShapeDtypeStruct` describing the shape
and dtypes of the outputs.
scratch_shapes: an iterable (may be nested) of GPUMemoryRef describing
scratch Refs to allocate for this kernel.
compiler_params: Additional compiler options. See the `CompilerParams`
dataclass for more details.
grid: A tuple of integers specifying the size of the kernel grid.
grid_names: The axis names of the grid. Must be the same length as `grid`.
cluster: A tuple of integers specifying the size of the kernel cluster.
cluster_names: The axis names of the grid. Must be the same length as
`cluster`.
num_threads: The number of threads to launch per block. Note that these
do not correspond to CUDA threads, but rather to warpgroups on Hopper
and Blackwell GPUs.
thread_name: The axis name used to query the thread index.
**mesh_kwargs: Additional mesh kwargs. See `Mesh` for more details.
Returns:
A function that runs the kernel. It should take any number of input
operands and returns an output with the same PyTree structure as
`out_shape`.
"""
if unwrap_out := not isinstance(out_shape, (tuple, list)):
out_shape = (out_shape,)
@custom_batching.custom_vmap
def wrapper(*operands):
def stateful(operand_and_out_refs):
operand_refs, out_refs = operand_and_out_refs
mesh = Mesh(
grid=grid,
grid_names=grid_names,
cluster=cluster,
cluster_names=cluster_names,
num_threads=num_threads,
thread_name=thread_name,
**mesh_kwargs)
_thread_name = mesh.thread_name if mesh.thread_name is not None else ()
def cmap_body():
pallas_primitives.run_scoped(
functools.partial(body, *operand_refs, *out_refs),
*(scratch_shapes if isinstance(scratch_shapes, Sequence) else ()),
collective_axes=_thread_name,
**(scratch_shapes if isinstance(scratch_shapes, Mapping) else {}),
)
if mesh.kernel_name is not None:
cmap_body.__name__ = mesh.kernel_name
else:
# The body function name is used to set the name of the kernel as a
# fallback if the kernel name is not set explicitly.
cmap_body.__name__ = getattr(body, "__name__", "anonymous")
pallas_core.core_map(mesh, compiler_params=compiler_params)(cmap_body)
_, outs = state_discharge.run_state(stateful)((
operands,
jax.tree.map(lambda s: jax.lax.empty(s.shape, s.dtype), out_shape),
))
return outs[0] if unwrap_out else outs
@wrapper.def_vmap
def _vmap_rule(axis_size, in_batched, *args):
axis_name = object()
def batched_body(*refs):
idx = lax.axis_index(axis_name)
lens = (len(args), len(out_shape))
operand_refs, out_refs, scratch_refs = util.split_list(refs, lens)
slice_ref = lambda r, b=True: (r.at[idx] if b else r)
operand_refs = tree_util.tree_map(slice_ref, operand_refs, in_batched)
out_refs = tree_util.tree_map(slice_ref, out_refs)
return body(*operand_refs, *out_refs, *scratch_refs)
out_shape_ = out_shape[0] if unwrap_out else out_shape
add_batch_dim = lambda x: x.update(shape=(axis_size, *x.shape))
mesh_kwargs_ = dict(mesh_kwargs)
out = kernel(
batched_body,
out_shape=tree_util.tree_map(add_batch_dim, out_shape_),
scratch_shapes=scratch_shapes,
compiler_params=compiler_params,
grid=(axis_size,) + grid,
grid_names=(axis_name,) + grid_names,
cluster=cluster,
cluster_names=cluster_names,
num_threads=num_threads,
thread_name=thread_name,
**mesh_kwargs_,
)(*args)
out_batched = tree_util.tree_map(lambda _: True, out_shape_)
return out, out_batched
return wrapper
@dataclasses.dataclass(frozen=True)
| PrimitiveSemantics |
python | great-expectations__great_expectations | great_expectations/core/metric_domain_types.py | {
"start": 103,
"end": 614
} | class ____(enum.Enum):
"""Enum type, whose members signify the data "Domain", on which a metric can be computed.
A wide variety of "Domain" types can be defined with applicable metrics associated with their respective "Domain"
types. The "Domain" types currently in use (`TABLE`, `COLUMN`, `COLUMN_PAIR`, and `MULTICOLUMN`) are declared here.
""" # noqa: E501 # FIXME CoP
TABLE = "table"
COLUMN = "column"
COLUMN_PAIR = "column_pair"
MULTICOLUMN = "multicolumn"
| MetricDomainTypes |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py | {
"start": 2568,
"end": 2644
} | class ____:
@property
def type(self) -> Literal[0]:
return 0
| E |
python | apache__airflow | task-sdk/src/airflow/sdk/exceptions.py | {
"start": 4551,
"end": 5027
} | class ____(AirflowException):
"""
Raise when the task should be re-scheduled at a later time.
:param reschedule_date: The date when the task should be rescheduled
"""
def __init__(self, reschedule_date):
super().__init__()
self.reschedule_date = reschedule_date
def serialize(self):
cls = self.__class__
return f"{cls.__module__}.{cls.__name__}", (), {"reschedule_date": self.reschedule_date}
| AirflowRescheduleException |
python | falconry__falcon | tests/asgi/test_middleware_asgi.py | {
"start": 31,
"end": 134
} | class ____:
async def process_request(self, req, resp):
pass
| MiddlewareIncompatibleWithWSGI_A |
python | sqlalchemy__sqlalchemy | test/sql/test_syntax_extensions.py | {
"start": 2728,
"end": 2984
} | class ____(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_insert(self, insert_stmt):
insert_stmt.apply_syntax_extension_point(
lambda existing: [self],
"post_values",
)
| PostValuesClause |
python | pyca__cryptography | tests/x509/test_ocsp.py | {
"start": 6738,
"end": 12627
} | class ____:
def test_add_cert_twice(self):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_certificate(cert, issuer, hashes.SHA1())
# Fails calling a second time
with pytest.raises(ValueError):
builder.add_certificate(cert, issuer, hashes.SHA1())
# Fails calling a second time with add_certificate_by_hash
with pytest.raises(ValueError):
builder.add_certificate_by_hash(
b"0" * 20, b"0" * 20, 1, hashes.SHA1()
)
def test_add_cert_by_hash_twice(self):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_certificate_by_hash(
b"0" * 20, b"0" * 20, 1, hashes.SHA1()
)
# Fails calling a second time
with pytest.raises(ValueError):
builder.add_certificate_by_hash(
b"0" * 20, b"0" * 20, 1, hashes.SHA1()
)
# Fails calling a second time with add_certificate
with pytest.raises(ValueError):
builder.add_certificate(cert, issuer, hashes.SHA1())
def test_add_cert_by_hash_bad_hash(self):
builder = ocsp.OCSPRequestBuilder()
with pytest.raises(ValueError):
builder.add_certificate_by_hash(
b"0" * 20,
b"0" * 20,
1,
"notahash", # type:ignore[arg-type]
)
with pytest.raises(ValueError):
builder.add_certificate_by_hash(
b"0" * 19, b"0" * 20, 1, hashes.SHA1()
)
with pytest.raises(ValueError):
builder.add_certificate_by_hash(
b"0" * 20, b"0" * 21, 1, hashes.SHA1()
)
with pytest.raises(TypeError):
builder.add_certificate_by_hash(
b"0" * 20,
b"0" * 20,
"notanint", # type:ignore[arg-type]
hashes.SHA1(),
)
def test_create_ocsp_request_no_req(self):
builder = ocsp.OCSPRequestBuilder()
with pytest.raises(ValueError):
builder.build()
def test_create_ocsp_request_invalid_alg(self):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
with pytest.raises(ValueError):
builder.add_certificate(cert, issuer, hashes.MD5())
def test_add_extension_twice(self):
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_extension(x509.OCSPNonce(b"123"), False)
with pytest.raises(ValueError):
builder.add_extension(x509.OCSPNonce(b"123"), False)
def test_add_invalid_extension(self):
builder = ocsp.OCSPRequestBuilder()
with pytest.raises(TypeError):
builder.add_extension(
"notanext", # type:ignore[arg-type]
False,
)
def test_unsupported_extension(self):
cert, issuer = _cert_and_issuer()
builder = (
ocsp.OCSPRequestBuilder()
.add_extension(DummyExtension(), critical=False)
.add_certificate(cert, issuer, hashes.SHA256())
)
with pytest.raises(NotImplementedError):
builder.build()
def test_create_ocsp_request_invalid_cert(self):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
with pytest.raises(TypeError):
builder.add_certificate(
b"notacert", # type:ignore[arg-type]
issuer,
hashes.SHA1(),
)
with pytest.raises(TypeError):
builder.add_certificate(
cert,
b"notacert", # type:ignore[arg-type]
hashes.SHA1(),
)
def test_create_ocsp_request(self):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_certificate(cert, issuer, hashes.SHA1())
req = builder.build()
serialized = req.public_bytes(serialization.Encoding.DER)
assert serialized == base64.b64decode(
b"MEMwQTA/MD0wOzAJBgUrDgMCGgUABBRAC0Z68eay0wmDug1gfn5ZN0gkxAQUw5zz"
b"/NNGCDS7zkZ/oHxb8+IIy1kCAj8g"
)
@pytest.mark.parametrize(
("ext", "critical"),
[
[x509.OCSPNonce(b"0000"), False],
[x509.OCSPNonce(b"\x00\x01\x02"), True],
],
)
def test_create_ocsp_request_with_extension(self, ext, critical):
cert, issuer = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_certificate(
cert, issuer, hashes.SHA1()
).add_extension(ext, critical)
req = builder.build()
assert len(req.extensions) == 1
assert req.extensions[0].value == ext
assert req.extensions[0].oid == ext.oid
assert req.extensions[0].critical is critical
def test_add_cert_by_hash(self):
cert, _ = _cert_and_issuer()
builder = ocsp.OCSPRequestBuilder()
h = hashes.Hash(hashes.SHA1())
h.update(cert.issuer.public_bytes())
issuer_name_hash = h.finalize()
# issuer_key_hash is a hash of the public key BitString DER,
# not the subjectPublicKeyInfo
issuer_key_hash = base64.b64decode(b"w5zz/NNGCDS7zkZ/oHxb8+IIy1k=")
builder = builder.add_certificate_by_hash(
issuer_name_hash,
issuer_key_hash,
cert.serial_number,
hashes.SHA1(),
)
req = builder.build()
serialized = req.public_bytes(serialization.Encoding.DER)
assert serialized == base64.b64decode(
b"MEMwQTA/MD0wOzAJBgUrDgMCGgUABBRAC0Z68eay0wmDug1gfn5ZN0gkxAQUw5zz"
b"/NNGCDS7zkZ/oHxb8+IIy1kCAj8g"
)
| TestOCSPRequestBuilder |
python | numba__numba | numba/core/typing/collections.py | {
"start": 3136,
"end": 4025
} | class ____(AttributeTemplate):
key = types.NamedTupleClass
def resolve___call__(self, classty):
"""
Resolve the named tuple constructor, aka the class's __call__ method.
"""
instance_class = classty.instance_class
pysig = utils.pysignature(instance_class)
def typer(*args, **kws):
# Fold keyword args
try:
bound = pysig.bind(*args, **kws)
except TypeError as e:
msg = "In '%s': %s" % (instance_class, e)
e.args = (msg,)
raise
assert not bound.kwargs
return types.BaseTuple.from_types(bound.args, instance_class)
# Override the typer's pysig to match the namedtuple constructor's
typer.pysig = pysig
return types.Function(make_callable_template(self.key, typer))
| NamedTupleClassAttribute |
python | ansible__ansible | test/units/galaxy/test_collection_install.py | {
"start": 1006,
"end": 48759
} | class ____():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
@pytest.fixture(autouse=True)
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', {})
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
galaxy_api.get_collection_signatures = MagicMock(return_value=[])
return galaxy_api
def test_concrete_artifact_manager_scm_no_executable(monkeypatch):
url = 'https://github.com/org/repo'
version = 'commitish'
mock_subprocess_check_call = MagicMock()
monkeypatch.setattr(collection.concrete_artifact_manager.subprocess, 'check_call', mock_subprocess_check_call)
mock_mkdtemp = MagicMock(return_value='')
monkeypatch.setattr(collection.concrete_artifact_manager, 'mkdtemp', mock_mkdtemp)
mock_get_bin_path = MagicMock(side_effect=[ValueError('Failed to find required executable')])
monkeypatch.setattr(collection.concrete_artifact_manager, 'get_bin_path', mock_get_bin_path)
error = re.escape(
"Could not find git executable to extract the collection from the Git repository `https://github.com/org/repo`"
)
with pytest.raises(AnsibleError, match=error):
collection.concrete_artifact_manager._extract_collection_from_git(url, version, b'path')
@pytest.mark.parametrize(
'url,version,trailing_slash',
[
('https://github.com/org/repo', 'commitish', False),
('https://github.com/org/repo,commitish', None, False),
('https://github.com/org/repo/,commitish', None, True),
('https://github.com/org/repo#,commitish', None, False),
]
)
def test_concrete_artifact_manager_scm_cmd(url, version, trailing_slash, monkeypatch):
context.CLIARGS._store = {'ignore_certs': False}
mock_subprocess_check_call = MagicMock()
monkeypatch.setattr(collection.concrete_artifact_manager.subprocess, 'check_call', mock_subprocess_check_call)
mock_mkdtemp = MagicMock(return_value='')
monkeypatch.setattr(collection.concrete_artifact_manager, 'mkdtemp', mock_mkdtemp)
collection.concrete_artifact_manager._extract_collection_from_git(url, version, b'path')
assert mock_subprocess_check_call.call_count == 2
repo = 'https://github.com/org/repo'
if trailing_slash:
repo += '/'
git_executable = get_bin_path('git')
clone_cmd = [git_executable, 'clone', repo, '']
assert mock_subprocess_check_call.call_args_list[0].args[0] == clone_cmd
assert mock_subprocess_check_call.call_args_list[1].args[0] == (git_executable, 'checkout', 'commitish')
@pytest.mark.parametrize(
'url,version,trailing_slash',
[
('https://github.com/org/repo', 'HEAD', False),
('https://github.com/org/repo,HEAD', None, False),
('https://github.com/org/repo/,HEAD', None, True),
('https://github.com/org/repo#,HEAD', None, False),
('https://github.com/org/repo', None, False),
]
)
def test_concrete_artifact_manager_scm_cmd_shallow(url, version, trailing_slash, monkeypatch):
context.CLIARGS._store = {'ignore_certs': False}
mock_subprocess_check_call = MagicMock()
monkeypatch.setattr(collection.concrete_artifact_manager.subprocess, 'check_call', mock_subprocess_check_call)
mock_mkdtemp = MagicMock(return_value='')
monkeypatch.setattr(collection.concrete_artifact_manager, 'mkdtemp', mock_mkdtemp)
collection.concrete_artifact_manager._extract_collection_from_git(url, version, b'path')
assert mock_subprocess_check_call.call_count == 2
repo = 'https://github.com/org/repo'
if trailing_slash:
repo += '/'
git_executable = get_bin_path('git')
shallow_clone_cmd = [git_executable, 'clone', '--depth=1', repo, '']
assert mock_subprocess_check_call.call_args_list[0].args[0] == shallow_clone_cmd
assert mock_subprocess_check_call.call_args_list[1].args[0] == (git_executable, 'checkout', 'HEAD')
@pytest.mark.parametrize(
'ignore_certs_cli,ignore_certs_config,expected_ignore_certs',
[
(False, False, False),
(False, True, True),
(True, False, True),
]
)
def test_concrete_artifact_manager_scm_cmd_validate_certs(ignore_certs_cli, ignore_certs_config, expected_ignore_certs, monkeypatch):
context.CLIARGS._store = {'ignore_certs': ignore_certs_cli}
monkeypatch.setattr(C, 'GALAXY_IGNORE_CERTS', ignore_certs_config)
mock_subprocess_check_call = MagicMock()
monkeypatch.setattr(collection.concrete_artifact_manager.subprocess, 'check_call', mock_subprocess_check_call)
mock_mkdtemp = MagicMock(return_value='')
monkeypatch.setattr(collection.concrete_artifact_manager, 'mkdtemp', mock_mkdtemp)
url = 'https://github.com/org/repo'
version = 'HEAD'
collection.concrete_artifact_manager._extract_collection_from_git(url, version, b'path')
assert mock_subprocess_check_call.call_count == 2
git_executable = get_bin_path('git')
clone_cmd = [git_executable, 'clone', '--depth=1', url, '']
if expected_ignore_certs:
clone_cmd.extend(['-c', 'http.sslVerify=false'])
assert mock_subprocess_check_call.call_args_list[0].args[0] == clone_cmd
assert mock_subprocess_check_call.call_args_list[1].args[0] == (git_executable, 'checkout', 'HEAD')
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_url(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
test_url = 'https://example.com/org/repo/sample.tar.gz'
expected = fr"^Failed to download collection tar from '{to_text(test_url)}'"
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': test_url, 'type': 'url'}, concrete_artifact_cm)
def test_build_requirement_from_tar_url_wrong_type(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
test_url = 'https://example.com/org/repo/sample.tar.gz'
expected = fr"^Unable to find collection artifact file at '{to_text(test_url)}'\.$"
with pytest.raises(AnsibleError, match=expected):
# Specified wrong collection type for http URL
Requirement.from_requirement_dict({'name': test_url, 'type': 'file'}, concrete_artifact_cm)
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = S_IRWU_RG_RO
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = S_IRWU_RG_RO
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = S_IRWU_RG_RO
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib.error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}, None, None),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}, None, None),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir', None)])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {}, None, None)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file', None)
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == S_IRWXU_RXG_RXO
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == S_IRWU_RG_RO
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == S_IRWXU_RXG_RXO
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Candidate('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy', None)
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file', None)]
collection.install_collections(
requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm, True, False, set())
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file', None)]
collection.install_collections(
requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm, True, False, set())
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
assert actual_manifest['collection_info']['dependencies'] == {'ansible_namespace.collection': '>=0.0.1'}
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
@pytest.mark.parametrize('collection_artifact', [
None,
{},
], indirect=True)
def test_install_collection_with_no_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file', None)]
collection.install_collections(
requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm, True, False, set())
assert os.path.isdir(collection_path)
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert not actual_manifest['collection_info']['dependencies']
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
@pytest.mark.parametrize(
"signatures,required_successful_count,ignore_errors,expected_success",
[
([], 'all', [], True),
(["good_signature"], 'all', [], True),
(["good_signature", collection.gpg.GpgBadArmor(status='failed')], 'all', [], False),
([collection.gpg.GpgBadArmor(status='failed')], 'all', [], False),
# This is expected to succeed because ignored does not increment failed signatures.
# "all" signatures is not a specific number, so all == no (non-ignored) signatures in this case.
([collection.gpg.GpgBadArmor(status='failed')], 'all', ["BADARMOR"], True),
([collection.gpg.GpgBadArmor(status='failed'), "good_signature"], 'all', ["BADARMOR"], True),
([], '+all', [], False),
([collection.gpg.GpgBadArmor(status='failed')], '+all', ["BADARMOR"], False),
([], '1', [], True),
([], '+1', [], False),
(["good_signature"], '2', [], False),
(["good_signature", collection.gpg.GpgBadArmor(status='failed')], '2', [], False),
# This is expected to fail because ignored does not increment successful signatures.
# 2 signatures are required, but only 1 is successful.
(["good_signature", collection.gpg.GpgBadArmor(status='failed')], '2', ["BADARMOR"], False),
(["good_signature", "good_signature"], '2', [], True),
]
)
def test_verify_file_signatures(signatures: list[str], required_successful_count: str, ignore_errors: list[str], expected_success: bool) -> None:
def gpg_error_generator(results):
for result in results:
if isinstance(result, collection.gpg.GpgBaseError):
yield result
fqcn = 'ns.coll'
manifest_file = 'MANIFEST.json'
keyring = '~/.ansible/pubring.kbx'
with patch.object(collection, 'run_gpg_verify', MagicMock(return_value=("somestdout", 0,))):
with patch.object(collection, 'parse_gpg_errors', MagicMock(return_value=gpg_error_generator(signatures))):
assert collection.verify_file_signatures(
fqcn,
manifest_file,
signatures,
keyring,
required_successful_count,
ignore_errors
) == expected_success
| RequirementCandidates |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/container.py | {
"start": 4432,
"end": 13515
} | class ____(TypedDict):
num_allocated_cores: Optional[int]
cpu_usage: Optional[float] # CPU usage in seconds
cpu_cfs_quota_us: Optional[float] # CPU quota per period in microseconds
cpu_cfs_period_us: Optional[float] # CPU period in microseconds
memory_usage: Optional[float] # Memory usage in bytes
memory_limit: Optional[int] # Memory limit in bytes
measurement_timestamp: Optional[float]
previous_cpu_usage: Optional[float]
previous_measurement_timestamp: Optional[float]
cgroup_version: Optional[str]
def retrieve_containerized_utilization_metrics(
logger: Optional[logging.Logger],
previous_measurement_timestamp: Optional[float] = None,
previous_cpu_usage: Optional[float] = None,
) -> ContainerUtilizationMetrics:
"""Retrieve the CPU and memory utilization metrics from cgroup and proc files."""
cgroup_version = _retrieve_cgroup_version(logger)
return {
"num_allocated_cores": _retrieve_containerized_num_allocated_cores(logger),
"cpu_usage": _retrieve_containerized_cpu_usage(logger, cgroup_version),
"previous_cpu_usage": previous_cpu_usage,
"previous_measurement_timestamp": previous_measurement_timestamp,
"cpu_cfs_quota_us": _retrieve_containerized_cpu_cfs_quota_us(logger, cgroup_version),
"cpu_cfs_period_us": _retrieve_containerized_cpu_cfs_period_us(logger, cgroup_version),
"memory_usage": _retrieve_containerized_memory_usage(logger, cgroup_version),
"memory_limit": _retrieve_containerized_memory_limit(logger, cgroup_version),
"measurement_timestamp": get_current_timestamp(),
"cgroup_version": cgroup_version.value if cgroup_version else None,
}
def _retrieve_cgroup_version(logger: Optional[logging.Logger]) -> Optional[CGroupVersion]:
try:
# Run the stat command in a subprocess and read the result.
status = os.popen("stat -fc %T /sys/fs/cgroup/").read().strip()
if status == "cgroup2fs":
return CGroupVersion.V2
elif status == "tmpfs":
return CGroupVersion.V1
else:
return None
except Exception as e:
if logger:
logger.info(f"No cgroup version found: {e}")
return None
def _retrieve_containerized_cpu_usage(
logger: Optional[logging.Logger], cgroup_version: Optional[CGroupVersion]
) -> Optional[float]:
"""Retrieve the CPU time in seconds from the cgroup file."""
if cgroup_version == CGroupVersion.V1:
return _retrieve_containerized_cpu_usage_v1(logger)
elif cgroup_version == CGroupVersion.V2:
return _retrieve_containerized_cpu_usage_v2(logger)
else:
return None
def _retrieve_containerized_cpu_usage_v1(logger: Optional[logging.Logger]) -> Optional[float]:
try:
with open(cpu_usage_path_cgroup_v1()) as f:
return float(f.read()) / 1e9 # Cpuacct.usage is in nanoseconds
except Exception as e:
if logger:
logger.error(f"Failed to retrieve CPU time from cgroup: {e}")
return None
def _retrieve_containerized_cpu_usage_v2(logger: Optional[logging.Logger]) -> Optional[float]:
try:
with open(cpu_stat_path_cgroup_v2()) as f:
lines = f.readlines()
for line in lines:
if line.startswith("usage_usec"):
return float(line.split()[1]) / 1e6 # Cpu.stat usage_usec is in microseconds
return None
except Exception as e:
if logger:
logger.error(f"Failed to retrieve CPU time from cgroup: {e}")
return None
def _retrieve_containerized_num_allocated_cores(logger: Optional[logging.Logger]) -> Optional[int]:
"""Retrieve the number of cores from the /proc/cpuinfo file."""
try:
with open(cpu_info_path()) as f:
return len([line for line in f if line.startswith("processor")])
except Exception as e:
if logger:
logger.error(f"Failed to retrieve number of cores from /proc/cpuinfo: {e}")
return None
def _retrieve_containerized_memory_usage(
logger: Optional[logging.Logger], cgroup_version: Optional[CGroupVersion]
) -> Optional[int]:
"""Retrieve the memory usage in bytes from the cgroup file."""
if cgroup_version == CGroupVersion.V1:
return _retrieve_containerized_memory_usage_v1(logger)
elif cgroup_version == CGroupVersion.V2:
return _retrieve_containerized_memory_usage_v2(logger)
else:
return None
def _retrieve_containerized_memory_usage_v1(logger: Optional[logging.Logger]) -> Optional[int]:
try:
with open(memory_usage_path_cgroup_v1()) as f:
return int(f.read())
except Exception as e:
if logger:
logger.error(f"Failed to retrieve memory usage from cgroup: {e}")
return None
def _retrieve_containerized_memory_usage_v2(logger: Optional[logging.Logger]) -> Optional[int]:
try:
with open(memory_usage_path_cgroup_v2()) as f:
return int(f.read())
except Exception as e:
if logger:
logger.error(f"Failed to retrieve memory usage from cgroup: {e}")
return None
def _retrieve_containerized_memory_limit(
logger: Optional[logging.Logger], cgroup_version: Optional[CGroupVersion]
) -> Optional[int]:
"""Retrieve the memory limit in bytes from the cgroup file."""
if cgroup_version == CGroupVersion.V1:
return _retrieve_containerized_memory_limit_v1(logger)
elif cgroup_version == CGroupVersion.V2:
return _retrieve_containerized_memory_limit_v2(logger)
else:
return None
def _retrieve_containerized_memory_limit_v1(logger: Optional[logging.Logger]) -> Optional[int]:
try:
with open(memory_limit_path_cgroup_v1()) as f:
return int(f.read())
except:
if logger:
logger.exception("Failed to retrieve memory limit from cgroup")
return None
def _retrieve_containerized_memory_limit_v2(logger: Optional[logging.Logger]) -> Optional[int]:
try:
with open(memory_limit_path_cgroup_v2()) as f:
return int(f.read())
except:
if logger:
logger.exception(
"Failed to retrieve memory limit from cgroup. There may be no limit set on the container."
)
return None
def _retrieve_containerized_cpu_cfs_period_us(
logger: Optional[logging.Logger], cgroup_version: Optional[CGroupVersion]
) -> Optional[float]:
"""Retrieve the CPU period in microseconds from the cgroup file."""
if cgroup_version == CGroupVersion.V1:
return _retrieve_containerized_cpu_cfs_period_us_v1(logger)
elif cgroup_version == CGroupVersion.V2:
return _retrieve_containerized_cpu_cfs_period_us_v2(logger)
else:
return None
def _retrieve_containerized_cpu_cfs_period_us_v1(
logger: Optional[logging.Logger],
) -> Optional[float]:
try:
with open(cpu_cfs_period_us_path()) as f:
return float(f.read())
except:
if logger:
logger.exception("Failed to retrieve CPU period from cgroup")
return None
def _retrieve_containerized_cpu_cfs_period_us_v2(
logger: Optional[logging.Logger],
) -> Optional[float]:
# We can retrieve period information from the cpu.max file. The file is in the format $MAX $PERIOD and is only one line.
try:
with open(cpu_max_path_cgroup_v2()) as f:
line = f.readline()
return float(line.split()[1])
except:
if logger:
logger.exception("Failed to retrieve CPU period from cgroup")
return None
def _retrieve_containerized_cpu_cfs_quota_us(
logger: Optional[logging.Logger], cgroup_version: Optional[CGroupVersion]
) -> Optional[float]:
"""Retrieve the CPU quota in microseconds from the cgroup file."""
if cgroup_version == CGroupVersion.V1:
return _retrieve_containerized_cpu_cfs_quota_us_v1(logger)
elif cgroup_version == CGroupVersion.V2:
return _retrieve_containerized_cpu_cfs_quota_us_v2(logger)
else:
return None
def _retrieve_containerized_cpu_cfs_quota_us_v1(
logger: Optional[logging.Logger],
) -> Optional[float]:
try:
with open(cpu_cfs_quota_us_path()) as f:
return float(f.read())
except:
if logger:
logger.debug("Failed to retrieve CPU quota from cgroup", exc_info=True)
return None
def _retrieve_containerized_cpu_cfs_quota_us_v2(
logger: Optional[logging.Logger],
) -> Optional[float]:
# We can retrieve quota information from the cpu.max file. The file is in the format $MAX $PERIOD .
try:
with open(cpu_max_path_cgroup_v2()) as f:
line = f.readline()
return float(line.split()[0])
except:
if logger:
logger.debug(
"Failed to retrieve CPU quota from cgroup. There might not be a limit set on the container.",
exc_info=True,
)
return None
| ContainerUtilizationMetrics |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 8744,
"end": 9435
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that takes checks if all elements in a row are not equal.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
return 1- np.expand_dims(np.all(X == X[0,:], axis = 1),1).astype(float)
| NETransformer |
python | getsentry__sentry | tests/sentry/api/bases/test_project.py | {
"start": 1243,
"end": 12359
} | class ____(ProjectPermissionBase):
def test_regular_user(self) -> None:
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_superuser(self) -> None:
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
assert self.has_object_perm("POST", self.project, user=user, is_superuser=True)
assert self.has_object_perm("PUT", self.project, user=user, is_superuser=True)
assert self.has_object_perm("DELETE", self.project, user=user, is_superuser=True)
def test_member_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member", teams=[team])
# if `allow_joinleave` is True, members should be able to GET a project even if
# it has no teams
assert self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="member", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
@with_feature("organizations:team-roles")
def test_member_with_team_membership_and_team_role_admin(self) -> None:
team = self.create_team(organization=self.organization)
project = self.create_project(organization=self.organization, teams=[team])
user = self.create_user(is_superuser=False)
member = self.create_member(user=user, organization=self.organization, role="member")
self.create_team_membership(team, member, role="admin")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_admin_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="admin", teams=[team])
# if `allow_joinleave` is True, admins can act on teams
# they don't have access to
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_admin_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="admin", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="manager", teams=[team])
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="manager", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_if_project_has_no_teams(self) -> None:
project = self.create_project(organization=self.organization, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="manager")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_owner_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="owner", teams=[team])
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_owner_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="owner", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_owner_if_project_has_no_teams(self) -> None:
project = self.create_project(organization=self.organization, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="owner")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_api_key_with_org_access(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_without_org_access(self) -> None:
key = self.create_api_key(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_without_access(self) -> None:
key = self.create_api_key(organization=self.organization)
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_with_wrong_access(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["project:write"])
assert self.has_object_perm("GET", self.project, auth=key)
assert self.has_object_perm("POST", self.project, auth=key)
assert self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_project_no_team_sentry_app_installed(self) -> None:
project = self.create_project(teams=[self.team])
self.team.delete()
other_org = self.create_organization()
sentry_app = self.create_sentry_app(
name="my_app",
organization=other_org,
scopes=("project:write",),
webhook_url="http://example.com",
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.organization, user=self.user
)
assert self.has_object_perm("GET", project, user=sentry_app.proxy_user)
assert self.has_object_perm("POST", project, user=sentry_app.proxy_user)
assert self.has_object_perm("PUT", project, user=sentry_app.proxy_user)
assert not self.has_object_perm("DELETE", project, user=sentry_app.proxy_user)
def test_project_no_team_sentry_app_not_installed(self) -> None:
project = self.create_project(teams=[self.team])
self.team.delete()
other_org = self.create_organization()
sentry_app = self.create_sentry_app(
name="my_app",
organization=other_org,
scopes=("project:write",),
webhook_url="http://example.com",
)
# install on other org
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=other_org, user=self.user
)
assert not self.has_object_perm("GET", project, user=sentry_app.proxy_user)
assert not self.has_object_perm("POST", project, user=sentry_app.proxy_user)
assert not self.has_object_perm("PUT", project, user=sentry_app.proxy_user)
assert not self.has_object_perm("DELETE", project, user=sentry_app.proxy_user)
| ProjectPermissionTest |
python | gevent__gevent | src/greentest/3.11/test_ftplib.py | {
"start": 9037,
"end": 16612
} | class ____(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self.encoding = encoding
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn, encoding=self.encoding)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
default_error_handler()
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "certdata", "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "certdata", "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
default_error_handler()
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
DummyFTPHandler.__init__(self, conn, encoding=encoding)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
| DummyFTPServer |
python | huggingface__transformers | tests/models/deepseek_v3/test_modeling_deepseek_v3.py | {
"start": 15233,
"end": 18516
} | class ____(unittest.TestCase):
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
@require_read_token
def test_compile_static_cache(self):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
NUM_TOKENS_TO_GENERATE = 40
# https://github.com/huggingface/transformers/pull/38562#issuecomment-2939209171
# The reason why the output is gibberish is because the testing model bzantium/tiny-deepseek-v3 is not trained
# one. Since original DeepSeek-V3 model is too big to debug and test, there was no testing with the original one.
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that Frojekecdytesాలు sicʰtinaccianntuala breej的效率和质量的控制lavestock-PraccuraciesOTTensorialoghismos的思路astiomotivityosexualriad TherapeuticsoldtYPEface Kishsatellite-TV",
"My favorite all time favorite condiment is ketchup.ieden沟渠係室温 Fryrok般地Segmentation Cycle/physicalwarenkrautempsాలు蹈梗 Mesomac一等asan lethality suspended Causewaydreamswith Fossilsdorfాలు蹈 ChristiansenHOMEbrew",
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained("bzantium/tiny-deepseek-v3", pad_token="</s>", padding_side="right")
model = DeepseekV3ForCausalLM.from_pretrained(
"bzantium/tiny-deepseek-v3", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"`
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
| DeepseekV3IntegrationTest |
python | modin-project__modin | modin/tests/pandas/test_io.py | {
"start": 109724,
"end": 110322
} | class ____:
@pytest.mark.skip(reason="No clipboard in CI")
def test_read_clipboard(self):
setup_clipboard()
eval_io(fn_name="read_clipboard")
@pytest.mark.skip(reason="No clipboard in CI")
def test_to_clipboard(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
modin_df.to_clipboard()
modin_as_clip = pandas.read_clipboard()
pandas_df.to_clipboard()
pandas_as_clip = pandas.read_clipboard()
assert modin_as_clip.equals(pandas_as_clip)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
| TestClipboard |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_typing.py | {
"start": 2762,
"end": 2868
} | class ____(TypedDict):
bool: DType
# `__array_namespace_info__.dtypes(kind="signed integer")`
| DTypesBool |
python | huggingface__transformers | src/transformers/models/bros/modeling_bros.py | {
"start": 1525,
"end": 2363
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores for entity initial tokens (before SoftMax).
subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
Classification scores for entity sequence tokens (before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
initial_token_logits: Optional[torch.FloatTensor] = None
subsequent_token_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| BrosSpadeOutput |
python | jazzband__django-waffle | test_app/models.py | {
"start": 809,
"end": 2408
} | class ____(AbstractUserFlag):
FLAG_COMPANIES_CACHE_KEY = 'FLAG_COMPANIES_CACHE_KEY'
FLAG_COMPANIES_CACHE_KEY_DEFAULT = 'flag:%s:companies'
companies = models.ManyToManyField(
Company,
blank=True,
help_text=_('Activate this flag for these companies.'),
)
def get_flush_keys(self, flush_keys=None):
flush_keys = super().get_flush_keys(flush_keys)
companies_cache_key = get_setting(CompanyAwareFlag.FLAG_COMPANIES_CACHE_KEY,
CompanyAwareFlag.FLAG_COMPANIES_CACHE_KEY_DEFAULT)
flush_keys.append(keyfmt(companies_cache_key, self.name))
return flush_keys
def is_active_for_user(self, user):
is_active = super().is_active_for_user(user)
if is_active:
return is_active
if getattr(user, 'company_id', None):
company_ids = self._get_company_ids()
if user.company_id in company_ids:
return True
def _get_company_ids(self):
cache_key = keyfmt(
get_setting(CompanyAwareFlag.FLAG_COMPANIES_CACHE_KEY, CompanyAwareFlag.FLAG_COMPANIES_CACHE_KEY_DEFAULT),
self.name
)
cached = cache.get(cache_key)
if cached == CACHE_EMPTY:
return set()
if cached:
return cached
company_ids = set(self.companies.all().values_list('pk', flat=True))
if not company_ids:
cache.add(cache_key, CACHE_EMPTY)
return set()
cache.add(cache_key, company_ids)
return company_ids
| CompanyAwareFlag |
python | celery__celery | celery/worker/consumer/consumer.py | {
"start": 4215,
"end": 30773
} | class ____:
"""Consumer blueprint."""
Strategies = dict
#: Optional callback called the first time the worker
#: is ready to receive tasks.
init_callback = None
#: The current worker pool instance.
pool = None
#: A timer used for high-priority internal tasks, such
#: as sending heartbeats.
timer = None
restart_count = -1 # first start is the same as a restart
#: This flag will be turned off after the first failed
#: connection attempt.
first_connection_attempt = True
#: Counter to track number of conn retry attempts
#: to broker. Will be reset to 0 once successful
broker_connection_retry_attempt = 0
class Blueprint(bootsteps.Blueprint):
"""Consumer blueprint."""
name = 'Consumer'
default_steps = [
'celery.worker.consumer.connection:Connection',
'celery.worker.consumer.mingle:Mingle',
'celery.worker.consumer.events:Events',
'celery.worker.consumer.gossip:Gossip',
'celery.worker.consumer.heart:Heart',
'celery.worker.consumer.control:Control',
'celery.worker.consumer.tasks:Tasks',
'celery.worker.consumer.delayed_delivery:DelayedDelivery',
'celery.worker.consumer.consumer:Evloop',
'celery.worker.consumer.agent:Agent',
]
def shutdown(self, parent):
self.send_all(parent, 'shutdown')
def __init__(self, on_task_request,
init_callback=noop, hostname=None,
pool=None, app=None,
timer=None, controller=None, hub=None, amqheartbeat=None,
worker_options=None, disable_rate_limits=False,
initial_prefetch_count=2, prefetch_multiplier=1, **kwargs):
self.app = app
self.controller = controller
self.init_callback = init_callback
self.hostname = hostname or gethostname()
self.pid = os.getpid()
self.pool = pool
self.timer = timer
self.strategies = self.Strategies()
self.conninfo = self.app.connection_for_read()
self.connection_errors = self.conninfo.connection_errors
self.channel_errors = self.conninfo.channel_errors
self._restart_state = restart_state(maxR=5, maxT=1)
self._does_info = logger.isEnabledFor(logging.INFO)
self._limit_order = 0
self.on_task_request = on_task_request
self.on_task_message = set()
self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate
self.disable_rate_limits = disable_rate_limits
self.initial_prefetch_count = initial_prefetch_count
self.prefetch_multiplier = prefetch_multiplier
self._maximum_prefetch_restored = True
# this contains a tokenbucket for each task type by name, used for
# rate limits, or None if rate limits are disabled for that task.
self.task_buckets = defaultdict(lambda: None)
self.reset_rate_limits()
self.hub = hub
if self.hub or getattr(self.pool, 'is_green', False):
self.amqheartbeat = amqheartbeat
if self.amqheartbeat is None:
self.amqheartbeat = self.app.conf.broker_heartbeat
else:
self.amqheartbeat = 0
if not hasattr(self, 'loop'):
self.loop = loops.asynloop if hub else loops.synloop
if _detect_environment() == 'gevent':
# there's a gevent bug that causes timeouts to not be reset,
# so if the connection timeout is exceeded once, it can NEVER
# connect again.
self.app.conf.broker_connection_timeout = None
self._pending_operations = []
self.steps = []
self.blueprint = self.Blueprint(
steps=self.app.steps['consumer'],
on_close=self.on_close,
)
self.blueprint.apply(self, **dict(worker_options or {}, **kwargs))
def call_soon(self, p, *args, **kwargs):
p = ppartial(p, *args, **kwargs)
if self.hub:
return self.hub.call_soon(p)
self._pending_operations.append(p)
return p
def perform_pending_operations(self):
if not self.hub:
while self._pending_operations:
try:
self._pending_operations.pop()()
except Exception as exc: # pylint: disable=broad-except
logger.exception('Pending callback raised: %r', exc)
def bucket_for_task(self, type):
limit = rate(getattr(type, 'rate_limit', None))
return TokenBucket(limit, capacity=1) if limit else None
def reset_rate_limits(self):
self.task_buckets.update(
(n, self.bucket_for_task(t)) for n, t in self.app.tasks.items()
)
def _update_prefetch_count(self, index=0):
"""Update prefetch count after pool/shrink grow operations.
Index must be the change in number of processes as a positive
(increasing) or negative (decreasing) number.
Note:
Currently pool grow operations will end up with an offset
of +1 if the initial size of the pool was 0 (e.g.
:option:`--autoscale=1,0 <celery worker --autoscale>`).
"""
num_processes = self.pool.num_processes
if not self.initial_prefetch_count or not num_processes:
return # prefetch disabled
self.initial_prefetch_count = (
self.pool.num_processes * self.prefetch_multiplier
)
return self._update_qos_eventually(index)
def _update_qos_eventually(self, index):
return (self.qos.decrement_eventually if index < 0
else self.qos.increment_eventually)(
abs(index) * self.prefetch_multiplier)
def _limit_move_to_pool(self, request):
task_reserved(request)
self.on_task_request(request)
def _schedule_bucket_request(self, bucket):
while True:
try:
request, tokens = bucket.pop()
except IndexError:
# no request, break
break
if bucket.can_consume(tokens):
self._limit_move_to_pool(request)
continue
else:
# requeue to head, keep the order.
bucket.contents.appendleft((request, tokens))
pri = self._limit_order = (self._limit_order + 1) % 10
hold = bucket.expected_time(tokens)
self.timer.call_after(
hold, self._schedule_bucket_request, (bucket,),
priority=pri,
)
# no tokens, break
break
def _limit_task(self, request, bucket, tokens):
bucket.add((request, tokens))
return self._schedule_bucket_request(bucket)
def _limit_post_eta(self, request, bucket, tokens):
self.qos.decrement_eventually()
bucket.add((request, tokens))
return self._schedule_bucket_request(bucket)
def start(self):
blueprint = self.blueprint
while blueprint.state not in STOP_CONDITIONS:
maybe_shutdown()
if self.restart_count:
try:
self._restart_state.step()
except RestartFreqExceeded as exc:
crit('Frequent restarts detected: %r', exc, exc_info=1)
sleep(1)
self.restart_count += 1
if self.app.conf.broker_channel_error_retry:
recoverable_errors = (self.connection_errors + self.channel_errors)
else:
recoverable_errors = self.connection_errors
try:
blueprint.start(self)
except recoverable_errors as exc:
# If we're not retrying connections, we need to properly shutdown or terminate
# the Celery main process instead of abruptly aborting the process without any cleanup.
is_connection_loss_on_startup = self.first_connection_attempt
self.first_connection_attempt = False
connection_retry_type = self._get_connection_retry_type(is_connection_loss_on_startup)
connection_retry = self.app.conf[connection_retry_type]
if not connection_retry:
crit(
f"Retrying to {'establish' if is_connection_loss_on_startup else 're-establish'} "
f"a connection to the message broker after a connection loss has "
f"been disabled (app.conf.{connection_retry_type}=False). Shutting down..."
)
raise WorkerShutdown(1) from exc
if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
crit("Too many open files. Aborting...")
raise WorkerTerminate(1) from exc
maybe_shutdown()
if blueprint.state not in STOP_CONDITIONS:
if self.connection:
self.on_connection_error_after_connected(exc)
else:
self.on_connection_error_before_connected(exc)
self.on_close()
blueprint.restart(self)
def _get_connection_retry_type(self, is_connection_loss_on_startup):
return ('broker_connection_retry_on_startup'
if (is_connection_loss_on_startup
and self.app.conf.broker_connection_retry_on_startup is not None)
else 'broker_connection_retry')
def on_connection_error_before_connected(self, exc):
error(CONNECTION_ERROR, self.conninfo.as_uri(), exc,
'Trying to reconnect...')
def on_connection_error_after_connected(self, exc):
warn(CONNECTION_RETRY, exc_info=True)
try:
self.connection.collect()
except Exception: # pylint: disable=broad-except
pass
if self.app.conf.worker_cancel_long_running_tasks_on_connection_loss:
for request in tuple(active_requests):
if request.task.acks_late and not request.acknowledged:
warn(TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS,
request)
request.cancel(self.pool)
else:
warnings.warn(CANCEL_TASKS_BY_DEFAULT, CPendingDeprecationWarning)
if self.app.conf.worker_enable_prefetch_count_reduction:
self.initial_prefetch_count = max(
self.prefetch_multiplier,
self.max_prefetch_count - len(tuple(active_requests)) * self.prefetch_multiplier
)
self._maximum_prefetch_restored = self.initial_prefetch_count == self.max_prefetch_count
if not self._maximum_prefetch_restored:
logger.info(
f"Temporarily reducing the prefetch count to {self.initial_prefetch_count} to avoid "
f"over-fetching since {len(tuple(active_requests))} tasks are currently being processed.\n"
f"The prefetch count will be gradually restored to {self.max_prefetch_count} as the tasks "
"complete processing."
)
def register_with_event_loop(self, hub):
self.blueprint.send_all(
self, 'register_with_event_loop', args=(hub,),
description='Hub.register',
)
def shutdown(self):
self.perform_pending_operations()
self.blueprint.shutdown(self)
def stop(self):
self.blueprint.stop(self)
def on_ready(self):
callback, self.init_callback = self.init_callback, None
if callback:
callback(self)
def loop_args(self):
return (self, self.connection, self.task_consumer,
self.blueprint, self.hub, self.qos, self.amqheartbeat,
self.app.clock, self.amqheartbeat_rate)
def on_decode_error(self, message, exc):
"""Callback called if an error occurs while decoding a message.
Simply logs the error and acknowledges the message so it
doesn't enter a loop.
Arguments:
message (kombu.Message): The message received.
exc (Exception): The exception being handled.
"""
crit(MESSAGE_DECODE_ERROR,
exc, message.content_type, message.content_encoding,
safe_repr(message.headers), dump_body(message, message.body),
exc_info=1)
message.ack()
def on_close(self):
# Clear internal queues to get rid of old messages.
# They can't be acked anyway, as a delivery tag is specific
# to the current channel.
if self.controller and self.controller.semaphore:
self.controller.semaphore.clear()
if self.timer:
self.timer.clear()
for bucket in self.task_buckets.values():
if bucket:
bucket.clear_pending()
for request_id in reserved_requests:
if request_id in requests:
del requests[request_id]
reserved_requests.clear()
if self.pool and self.pool.flush:
self.pool.flush()
def connect(self):
"""Establish the broker connection used for consuming tasks.
Retries establishing the connection if the
:setting:`broker_connection_retry` setting is enabled
"""
conn = self.connection_for_read(heartbeat=self.amqheartbeat)
if self.hub:
conn.transport.register_with_event_loop(conn.connection, self.hub)
return conn
def connection_for_read(self, heartbeat=None):
return self.ensure_connected(
self.app.connection_for_read(heartbeat=heartbeat))
def connection_for_write(self, url=None, heartbeat=None):
return self.ensure_connected(
self.app.connection_for_write(url=url, heartbeat=heartbeat))
def ensure_connected(self, conn):
# Callback called for each retry while the connection
# can't be established.
def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP):
if getattr(conn, 'alt', None) and interval == 0:
next_step = CONNECTION_FAILOVER
elif interval > 0:
self.broker_connection_retry_attempt += 1
next_step = next_step.format(
when=humanize_seconds(interval, 'in', ' '),
retries=self.broker_connection_retry_attempt,
max_retries=self.app.conf.broker_connection_max_retries)
error(CONNECTION_ERROR, conn.as_uri(), exc, next_step)
# Remember that the connection is lazy, it won't establish
# until needed.
# TODO: Rely only on broker_connection_retry_on_startup to determine whether connection retries are disabled.
# We will make the switch in Celery 6.0.
retry_disabled = False
if self.app.conf.broker_connection_retry_on_startup is None:
# If broker_connection_retry_on_startup is not set, revert to broker_connection_retry
# to determine whether connection retries are disabled.
retry_disabled = not self.app.conf.broker_connection_retry
if retry_disabled:
warnings.warn(
CPendingDeprecationWarning(
"The broker_connection_retry configuration setting will no longer determine\n"
"whether broker connection retries are made during startup in Celery 6.0 and above.\n"
"If you wish to refrain from retrying connections on startup,\n"
"you should set broker_connection_retry_on_startup to False instead.")
)
else:
if self.first_connection_attempt:
retry_disabled = not self.app.conf.broker_connection_retry_on_startup
else:
retry_disabled = not self.app.conf.broker_connection_retry
if retry_disabled:
# Retry disabled, just call connect directly.
conn.connect()
self.first_connection_attempt = False
return conn
conn = conn.ensure_connection(
_error_handler, self.app.conf.broker_connection_max_retries,
callback=maybe_shutdown,
)
self.first_connection_attempt = False
self.broker_connection_retry_attempt = 0
return conn
def _flush_events(self):
if self.event_dispatcher:
self.event_dispatcher.flush()
def on_send_event_buffered(self):
if self.hub:
self.hub._ready.add(self._flush_events)
def add_task_queue(self, queue, exchange=None, exchange_type=None,
routing_key=None, **options):
cset = self.task_consumer
queues = self.app.amqp.queues
# Must use in' here, as __missing__ will automatically
# create queues when :setting:`task_create_missing_queues` is enabled.
# (Issue #1079)
if queue in queues:
q = queues[queue]
else:
exchange = queue if exchange is None else exchange
exchange_type = ('direct' if exchange_type is None
else exchange_type)
q = queues.select_add(queue,
exchange=exchange,
exchange_type=exchange_type,
routing_key=routing_key, **options)
if not cset.consuming_from(queue):
cset.add_queue(q)
cset.consume()
info('Started consuming from %s', queue)
def cancel_task_queue(self, queue):
info('Canceling queue %s', queue)
self.app.amqp.queues.deselect(queue)
self.task_consumer.cancel_by_queue(queue)
def apply_eta_task(self, task):
"""Method called by the timer to apply a task with an ETA/countdown."""
task_reserved(task)
self.on_task_request(task)
self.qos.decrement_eventually()
def _message_report(self, body, message):
return MESSAGE_REPORT.format(dump_body(message, body),
safe_repr(message.content_type),
safe_repr(message.content_encoding),
safe_repr(message.delivery_info),
safe_repr(message.headers))
def on_unknown_message(self, body, message):
warn(UNKNOWN_FORMAT, self._message_report(body, message))
message.reject_log_error(logger, self.connection_errors)
signals.task_rejected.send(sender=self, message=message, exc=None)
def on_unknown_task(self, body, message, exc):
error(UNKNOWN_TASK_ERROR,
exc,
dump_body(message, body),
message.headers,
message.delivery_info,
exc_info=True)
try:
id_, name = message.headers['id'], message.headers['task']
root_id = message.headers.get('root_id')
except KeyError: # proto1
payload = message.payload
id_, name = payload['id'], payload['task']
root_id = None
request = Bunch(
name=name, chord=None, root_id=root_id,
correlation_id=message.properties.get('correlation_id'),
reply_to=message.properties.get('reply_to'),
errbacks=None,
)
message.reject_log_error(logger, self.connection_errors)
self.app.backend.mark_as_failure(
id_, NotRegistered(name), request=request,
)
if self.event_dispatcher:
self.event_dispatcher.send(
'task-failed', uuid=id_,
exception=f'NotRegistered({name!r})',
)
signals.task_unknown.send(
sender=self, message=message, exc=exc, name=name, id=id_,
)
def on_invalid_task(self, body, message, exc):
error(INVALID_TASK_ERROR, exc, dump_body(message, body),
exc_info=True)
message.reject_log_error(logger, self.connection_errors)
signals.task_rejected.send(sender=self, message=message, exc=exc)
def update_strategies(self):
loader = self.app.loader
for name, task in self.app.tasks.items():
self.strategies[name] = task.start_strategy(self.app, self)
task.__trace__ = build_tracer(name, task, loader, self.hostname,
app=self.app)
def create_task_handler(self, promise=promise):
strategies = self.strategies
on_unknown_message = self.on_unknown_message
on_unknown_task = self.on_unknown_task
on_invalid_task = self.on_invalid_task
callbacks = self.on_task_message
call_soon = self.call_soon
def on_task_received(message):
# payload will only be set for v1 protocol, since v2
# will defer deserializing the message body to the pool.
payload = None
try:
type_ = message.headers['task'] # protocol v2
except TypeError:
return on_unknown_message(None, message)
except KeyError:
try:
payload = message.decode()
except Exception as exc: # pylint: disable=broad-except
return self.on_decode_error(message, exc)
try:
type_, payload = payload['task'], payload # protocol v1
except (TypeError, KeyError):
return on_unknown_message(payload, message)
try:
strategy = strategies[type_]
except KeyError as exc:
return on_unknown_task(None, message, exc)
else:
try:
ack_log_error_promise = promise(
call_soon,
(message.ack_log_error,),
on_error=self._restore_prefetch_count_after_connection_restart,
)
reject_log_error_promise = promise(
call_soon,
(message.reject_log_error,),
on_error=self._restore_prefetch_count_after_connection_restart,
)
if (
not self._maximum_prefetch_restored
and self.restart_count > 0
and self._new_prefetch_count <= self.max_prefetch_count
):
ack_log_error_promise.then(self._restore_prefetch_count_after_connection_restart,
on_error=self._restore_prefetch_count_after_connection_restart)
reject_log_error_promise.then(self._restore_prefetch_count_after_connection_restart,
on_error=self._restore_prefetch_count_after_connection_restart)
strategy(
message, payload,
ack_log_error_promise,
reject_log_error_promise,
callbacks,
)
except (InvalidTaskError, ContentDisallowed) as exc:
return on_invalid_task(payload, message, exc)
except DecodeError as exc:
return self.on_decode_error(message, exc)
return on_task_received
def _restore_prefetch_count_after_connection_restart(self, p, *args):
with self.qos._mutex:
if any((
not self.app.conf.worker_enable_prefetch_count_reduction,
self._maximum_prefetch_restored,
)):
return
new_prefetch_count = min(self.max_prefetch_count, self._new_prefetch_count)
self.qos.value = self.initial_prefetch_count = new_prefetch_count
self.qos.set(self.qos.value)
already_restored = self._maximum_prefetch_restored
self._maximum_prefetch_restored = new_prefetch_count == self.max_prefetch_count
if already_restored is False and self._maximum_prefetch_restored is True:
logger.info(
"Resuming normal operations following a restart.\n"
f"Prefetch count has been restored to the maximum of {self.max_prefetch_count}"
)
@property
def max_prefetch_count(self):
return self.pool.num_processes * self.prefetch_multiplier
@property
def _new_prefetch_count(self):
return self.qos.value + self.prefetch_multiplier
def __repr__(self):
"""``repr(self)``."""
return '<Consumer: {self.hostname} ({state})>'.format(
self=self, state=self.blueprint.human_state(),
)
def cancel_active_requests(self):
"""Cancel active requests during shutdown.
Cancels all active requests that either do not require late acknowledgments or,
if they do, have not been acknowledged yet.
Does not cancel successful tasks, even if they have not been acknowledged yet.
"""
def should_cancel(request):
if not request.task.acks_late:
# Task does not require late acknowledgment, cancel it.
return True
if not request.acknowledged:
# Task is late acknowledged, but it has not been acknowledged yet, cancel it.
if request.id in successful_requests:
# Unless it was successful, in which case we don't want to cancel it.
return False
return True
# Task is late acknowledged, but it has already been acknowledged.
return False # Do not cancel and allow it to gracefully finish as it has already been acknowledged.
requests_to_cancel = tuple(filter(should_cancel, active_requests))
if requests_to_cancel:
for request in requests_to_cancel:
# For acks_late tasks, don't emit RETRY signal since broker will handle redelivery
# For non-acks_late tasks, emit RETRY signal as usual
emit_retry = not request.task.acks_late
request.cancel(self.pool, emit_retry=emit_retry)
| Consumer |
python | mitmproxy__pdoc | test/testdata/visibility.py | {
"start": 127,
"end": 859
} | class ____:
# Not shown because no docstring.
pass
def public_func_marked_private():
"""
This is a public method that's not shown because it's marked as @private.
"""
def _protected_func():
"""
This is a protected method that's not shown because its name starts with _.
"""
def __private_func():
"""
This is a private method that's not shown because its name starts with __.
"""
def __private_func_explicitly_public():
"""@public
This is a private method that's shown because it is explicitly marked
as public.
"""
def public_func():
"""
This is another public method that's shown. It should show without additional
whitespace above.
"""
| Undocumented |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 2691,
"end": 3946
} | class ____(APITestCase):
@override_settings(MIDDLEWARE=('tests.test_middleware.RequestUserMiddleware',))
def test_middleware_can_access_user_when_processing_response(self):
user = User.objects.create_user('john', 'john@example.com', 'password')
key = 'abcd1234'
Token.objects.create(key=key, user=user)
self.client.get('/auth', HTTP_AUTHORIZATION='Token %s' % key)
@override_settings(MIDDLEWARE=('tests.test_middleware.RequestPOSTMiddleware',))
def test_middleware_can_access_request_post_when_processing_response(self):
response = self.client.post('/post', {'foo': 'bar'})
assert response.status_code == 200
response = self.client.post('/post', {'foo': 'bar'}, format='json')
assert response.status_code == 200
@unittest.skipUnless(django.VERSION >= (5, 1), 'Only for Django 5.1+')
@override_settings(
ROOT_URLCONF='tests.test_middleware',
MIDDLEWARE=(
# Needed for AuthenticationMiddleware
'django.contrib.sessions.middleware.SessionMiddleware',
# Needed for LoginRequiredMiddleware
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.LoginRequiredMiddleware',
),
)
| TestMiddleware |
python | walkccc__LeetCode | solutions/1827. Minimum Operations to Make the Array Increasing/1827.py | {
"start": 0,
"end": 198
} | class ____:
def minOperations(self, nums: list[int]) -> int:
ans = 0
last = 0
for num in nums:
ans += max(0, last - num + 1)
last = max(num, last + 1)
return ans
| Solution |
python | huggingface__transformers | tests/utils/test_modeling_utils.py | {
"start": 125497,
"end": 130968
} | class ____(unittest.TestCase):
@unittest.skip("Just a bit annoying")
def test_error_no_sdpa_available(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa")
self.assertTrue(
"does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention"
in str(cm.exception)
)
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel")
# TODO (ydshieh): use another model
@unittest.skip("model deleted")
def test_error_no_flash_available(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained(
"hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2"
)
self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception))
# TODO (ydshieh): use another model
@unittest.skip("model deleted")
def test_error_no_flash_available_with_config(self):
with self.assertRaises(ValueError) as cm:
config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel")
_ = AutoModel.from_pretrained(
"hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2"
)
self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception))
# TODO (ydshieh): use another model
@unittest.skip("model deleted")
def test_error_wrong_attn_implementation(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo")
self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception))
def test_not_available_flash(self):
if is_flash_attn_2_available():
self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash")
if is_torch_npu_available():
self.skipTest(
reason="FlashAttention2 is supported on Ascend NPU without using package `flash-attn`, ignore this test case."
)
if is_kernels_available():
self.skipTest(reason="Please uninstall `kernels` package to run `test_not_available_flash`")
with self.assertRaises(ImportError) as cm:
_ = AutoModel.from_pretrained(
"hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2"
)
self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception))
def test_not_available_flash_with_config(self):
if is_flash_attn_2_available():
self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash")
if is_torch_npu_available():
self.skipTest(
reason="FlashAttention2 is supported on Ascend NPU without using package `flash-attn`, ignore this test case."
)
if is_kernels_available():
self.skipTest(reason="Please uninstall `kernels` package to run `test_not_available_flash_with_config`")
config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel")
with self.assertRaises(ImportError) as cm:
_ = AutoModel.from_pretrained(
"hf-internal-testing/tiny-random-GPTBigCodeModel",
config=config,
attn_implementation="flash_attention_2",
)
self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception))
def test_kernels_fallback(self):
if not is_kernels_available():
self.skipTest(reason="Please install `kernels` package to run `test_kernels_fallback`")
if is_flash_attn_2_available():
self.skipTest(reason="Please uninstall flash-attn package to run test_kernels_fallback")
if is_torch_npu_available():
self.skipTest(
reason="FlashAttention2 is supported on Ascend NPU without using package `flash-attn`, ignore this test case."
)
logger = logging.get_logger("transformers.modeling_utils")
with LoggingLevel(logging.WARNING):
with CaptureLogger(logger) as cl:
_ = AutoModel.from_pretrained(
"hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2"
)
self.assertTrue(
"You do not have `flash_attn` installed, using `kernels-community/flash-attn2` from the `kernels` library instead!"
in cl.out
)
# TODO (ydshieh): use another model
@unittest.skip("model deleted")
def test_not_available_kernels(self):
if is_kernels_available():
self.skipTest(reason="Please uninstall `kernels` package to run `test_not_available_kernels`")
with self.assertRaises(ImportError) as cm:
_ = AutoModel.from_pretrained(
"hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="kernels-community/flash-attn2"
)
self.assertTrue("`kernels` is either not installed or uses an incompatible version." in str(cm.exception))
@require_torch
| TestAttentionImplementation |
python | jazzband__django-pipeline | tests/tests/test_compiler.py | {
"start": 1668,
"end": 1978
} | class ____(SubProcessCompiler):
output_extension = "junk"
def match_file(self, path):
return path.endswith(".coffee")
def compile_file(self, infile, outfile, outdated=False, force=False):
command = ("cp", infile, outfile)
return self.execute_command(command)
| CopyingCompiler |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 55288,
"end": 55334
} | class ____(Collection):
_wrap = Shape
| Shapes |
python | pydantic__pydantic | tests/test_type_adapter.py | {
"start": 850,
"end": 924
} | class ____(BaseModel, Generic[T]):
x: NestedList[T]
| GenericPydanticModel |
python | PyCQA__flake8 | src/flake8/formatting/default.py | {
"start": 1996,
"end": 2128
} | class ____(SimpleFormatter):
"""Pylint formatter for Flake8."""
error_format = "%(path)s:%(row)d: [%(code)s] %(text)s"
| Pylint |
python | Textualize__textual | docs/examples/widgets/static.py | {
"start": 80,
"end": 245
} | class ____(App):
def compose(self) -> ComposeResult:
yield Static("Hello, world!")
if __name__ == "__main__":
app = StaticApp()
app.run()
| StaticApp |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 6959,
"end": 7115
} | class ____(EllipticCurve):
name = "sect233r1"
key_size = 233
group_order = 0x1000000000000000000000000000013E974E72F8A6922031D2603CFE0D7
| SECT233R1 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 821480,
"end": 822262
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for OrganizationAuditEntry."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("OrganizationAuditEntryEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("OrganizationAuditEntry"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| OrganizationAuditEntryConnection |
python | rapidsai__cudf | python/cudf/cudf/core/indexed_frame.py | {
"start": 7531,
"end": 220652
} | class ____(Frame):
"""A frame containing an index.
This class encodes the common behaviors for core user-facing classes like
DataFrame and Series that consist of a sequence of columns along with a
special set of index columns.
Parameters
----------
data : dict
An dict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
# mypy can't handle bound type variables as class members
_loc_indexer_type: type[_LocIndexerClass] # type: ignore[valid-type]
_iloc_indexer_type: type[_IlocIndexerClass] # type: ignore[valid-type]
_groupby = GroupBy
_resampler = _Resampler
_VALID_SCANS = {
"cumsum",
"cumprod",
"cummin",
"cummax",
}
# Necessary because the function names don't directly map to the docs.
_SCAN_DOCSTRINGS = {
"cumsum": {"op_name": "cumulative sum"},
"cumprod": {"op_name": "cumulative product"},
"cummin": {"op_name": "cumulative min"},
"cummax": {"op_name": "cumulative max"},
}
def __init__(
self,
data: ColumnAccessor | MutableMapping[Any, ColumnBase],
index: Index,
attrs: dict[Hashable, Any] | None = None,
):
super().__init__(data=data)
if not isinstance(index, Index):
raise ValueError(
f"index must be a cudf index not {type(index).__name__}"
)
elif self._data.nrows > 0 and self._data.nrows != len(index):
raise ValueError(
f"Length of values ({self._data.nrows}) does not "
f"match length of index ({len(index)})"
)
self._index = index
if attrs is None:
self._attrs = {}
else:
self._attrs = attrs
@property
def _num_rows(self) -> int:
# Important to use the index because the data may be empty.
return len(self.index)
@property
def _index_names(self) -> tuple[Any, ...]: # TODO: Tuple[str]?
return self.index._column_names
@property
def attrs(self) -> dict[Hashable, Any]:
"""
Dictionary of global attributes of this dataset.
Notes
-----
Many operations that create new datasets will copy ``attrs``. Copies
are always deep so that changing ``attrs`` will only affect the
present dataset. ``cudf.concat`` copies ``attrs`` only if all input
datasets have the same ``attrs``.
Examples
--------
For Series:
>>> import cudf
>>> ser = cudf.Series([1, 2, 3])
>>> ser.attrs = {"A": [10, 20, 30]}
>>> ser.attrs
{'A': [10, 20, 30]}
For DataFrame:
>>> df = cudf.DataFrame({'A': [1, 2], 'B': [3, 4]})
>>> df.attrs = {"A": [10, 20, 30]}
>>> df.attrs
{'A': [10, 20, 30]}
"""
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@classmethod
def _from_data(
cls,
data: MutableMapping,
index: Index | None = None,
attrs: dict | None = None,
):
out = super()._from_data(data)
if index is None:
# out._num_rows requires .index to be defined
index = RangeIndex(out._data.nrows)
elif not isinstance(index, Index):
raise ValueError(
f"index must be a cudf.Index not {type(index).__name__}"
)
out._index = index
out._attrs = {} if attrs is None else attrs
return out
@_performance_tracking
def _get_columns_by_label(self, labels) -> Self:
"""
Returns columns of the Frame specified by `labels`.
Akin to cudf.DataFrame(...).loc[:, labels]
"""
return self._from_data(
self._data.select_by_label(labels),
index=self.index,
attrs=self.attrs,
)
@_performance_tracking
def _from_data_like_self(self, data: MutableMapping):
out = super()._from_data_like_self(data)
out.index = self.index
out._attrs = copy.deepcopy(self._attrs)
return out
@_performance_tracking
def _from_columns_like_self(
self,
columns: list[ColumnBase],
column_names: Iterable[str] | None = None,
index_names: list[str] | None = None,
) -> Self:
"""Construct a `Frame` from a list of columns with metadata from self.
If `index_names` is set, the first `len(index_names)` columns are
used to construct the index of the frame.
"""
if column_names is None:
column_names = self._column_names
data_columns = columns
index = None
if index_names is not None:
n_index_columns = len(index_names)
data_columns = columns[n_index_columns:]
index = _index_from_data(
dict(enumerate(columns[:n_index_columns]))
)
index = index._copy_type_metadata(self.index)
# TODO: Should this if statement be handled in Index._copy_type_metadata?
if (
isinstance(self.index, cudf.CategoricalIndex)
and not isinstance(index, cudf.CategoricalIndex)
) or (
isinstance(self.index, cudf.MultiIndex)
and not isinstance(index, cudf.MultiIndex)
):
index = type(self.index)._from_data(index._data)
if isinstance(index, cudf.MultiIndex):
index.names = index_names
else:
index.name = index_names[0]
data = dict(zip(column_names, data_columns, strict=True))
frame = type(self)._from_data(data, index, attrs=self.attrs)
return frame._copy_type_metadata(self)
def __round__(self, digits=0):
# Shouldn't be added to BinaryOperand
# because pandas Index doesn't implement
# this method.
return self.round(decimals=digits)
def _mimic_inplace(
self, result: Self, inplace: bool = False
) -> Self | None:
if inplace:
self._index = result.index
self._attrs = result._attrs
return super()._mimic_inplace(result, inplace)
@_performance_tracking
def _scan(
self,
op: str,
axis: Axis | None = None,
skipna: bool = True,
*args,
**kwargs,
) -> Self:
"""
Return {op_name} of the {cls}.
Parameters
----------
axis: {{index (0), columns(1)}}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
{cls}
Examples
--------
**Series**
>>> import cudf
>>> ser = cudf.Series([1, 5, 2, 4, 3])
>>> ser.cumsum()
0 1
1 6
2 8
3 12
4 15
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({{'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]}})
>>> s.cumsum()
a b
0 1 7
1 3 15
2 6 24
3 10 34
"""
if "numeric_only" in kwargs:
raise TypeError(
"got an unexpected keyword argument 'numeric_only'"
)
cast_to_int = op in ("cumsum", "cumprod")
skipna = True if skipna is None else skipna
results = []
for col in self._columns:
if skipna:
result_col = col.nans_to_nulls()
else:
if col.has_nulls(include_nan=True):
first_index = col.isnull().find_first_value(True)
result_col = col.copy()
result_col[first_index:] = None
else:
result_col = col
if cast_to_int and result_col.dtype.kind in "uib":
# For reductions that accumulate a value (e.g. sum, not max)
# pandas returns an int64 dtype for all int or bool dtypes.
if cudf.get_option("mode.pandas_compatible"):
dtype = get_dtype_of_same_kind(
result_col.dtype, np.dtype(np.int64)
)
else:
dtype = np.dtype(np.int64)
result_col = result_col.astype(dtype)
results.append(getattr(result_col, op)())
return self._from_data_like_self(
self._data._from_columns_like_self(results)
)
@_performance_tracking
@ioutils.doc_to_json()
def to_json(self, path_or_buf=None, *args, **kwargs):
"""{docstring}"""
return cudf.io.json.to_json(
self, path_or_buf=path_or_buf, *args, **kwargs
)
@_performance_tracking
@ioutils.doc_to_hdf()
def to_hdf(self, path_or_buf, key, *args, **kwargs):
"""{docstring}"""
cudf.io.hdf.to_hdf(path_or_buf, key, self, *args, **kwargs)
@_performance_tracking
def to_string(self):
r"""
Convert to string
cuDF uses Pandas internals for efficient string formatting.
Set formatting options using pandas string formatting options and
cuDF objects will print identically to Pandas objects.
cuDF supports `null/None` as a value in any column type, which
is transparently supported during this output process.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2]
>>> df['val'] = [float(i + 10) for i in range(3)]
>>> df.to_string()
' key val\n0 0 10.0\n1 1 11.0\n2 2 12.0'
"""
return str(self)
def copy(self, deep: bool = True) -> Self:
"""Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Examples
--------
>>> s = cudf.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = cudf.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s['a'] = 3
>>> shallow['b'] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
"""
return self._from_data(
self._data.copy(deep=deep),
self.index.copy(deep=deep),
attrs=copy.deepcopy(self.attrs) if deep else self._attrs,
)
@_performance_tracking
def equals(self, other) -> bool:
return super().equals(other) and self.index.equals(other.index)
@property
def index(self):
"""Get the labels for the rows."""
return self._index
@index.setter
def index(self, value):
old_length = len(self)
new_length = len(value)
# A DataFrame with 0 columns can have an index of arbitrary length.
if self._num_columns > 0 and new_length != old_length:
raise ValueError(
f"Length mismatch: Expected axis has {old_length} elements, "
f"new values have {len(value)} elements"
)
# avoid unnecessary cast to Index
value = ensure_index(value)
self._index = value
@_performance_tracking
def replace(
self,
to_replace=None,
value=no_default,
inplace: bool = False,
limit=None,
regex: bool = False,
method=no_default,
) -> Self | None:
"""Replace values given in ``to_replace`` with ``value``.
Parameters
----------
to_replace : numeric, str or list-like
Value(s) to replace.
* numeric or str:
- values equal to ``to_replace`` will be replaced
with ``value``
* list of numeric or str:
- If ``value`` is also list-like, ``to_replace`` and
``value`` must be of same length.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example, {'a': 'b',
'y': 'z'} replaces the value 'a' with 'b' and
'y' with 'z'.
To use a dict in this way the ``value`` parameter should
be ``None``.
value : scalar, dict, list-like, str, default None
Value to replace any values matching ``to_replace`` with.
inplace : bool, default False
If True, in place.
See Also
--------
Series.fillna
Raises
------
TypeError
- If ``to_replace`` is not a scalar, array-like, dict, or None
- If ``to_replace`` is a dict and value is not a list, dict,
or Series
ValueError
- If a list is passed to ``to_replace`` and ``value`` but they
are not the same length.
Returns
-------
result : Series
Series after replacement. The mask and index are preserved.
Examples
--------
**Series**
Scalar ``to_replace`` and ``value``
>>> import cudf
>>> s = cudf.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like ``to_replace``
>>> s.replace([1, 2], 10)
0 0
1 10
2 10
3 3
4 4
dtype: int64
dict-like ``to_replace``
>>> s.replace({1:5, 3:50})
0 0
1 5
2 2
3 50
4 4
dtype: int64
>>> s = cudf.Series(['b', 'a', 'a', 'b', 'a'])
>>> s
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace({'a': None})
0 b
1 <NA>
2 <NA>
3 b
4 <NA>
dtype: object
If there is a mismatch in types of the values in
``to_replace`` & ``value`` with the actual series, then
cudf exhibits different behavior with respect to pandas
and the pairs are ignored silently:
>>> s = cudf.Series(['b', 'a', 'a', 'b', 'a'])
>>> s
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace('a', 1)
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace(['a', 'c'], [1, 2])
0 b
1 a
2 a
3 b
4 a
dtype: object
**DataFrame**
Scalar ``to_replace`` and ``value``
>>> import cudf
>>> df = cudf.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df
A B C
0 0 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
List-like ``to_replace``
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
dict-like ``to_replace``
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
.. pandas-compat::
:meth:`pandas.DataFrame.replace`, :meth:`pandas.Series.replace`
Parameters that are currently not supported are: `limit`, `regex`,
`method`
"""
if limit is not None:
raise NotImplementedError("limit parameter is not implemented yet")
if regex:
raise NotImplementedError("regex parameter is not implemented yet")
if method is not no_default:
warnings.warn(
"The 'method' keyword in "
f"{type(self).__name__}.replace is deprecated and "
"will be removed in a future version.",
FutureWarning,
)
elif method not in {"pad", None, no_default}:
raise NotImplementedError("method parameter is not implemented")
if (
value is no_default
and method is no_default
and not is_dict_like(to_replace)
and regex is False
):
warnings.warn(
f"{type(self).__name__}.replace without 'value' and with "
"non-dict-like 'to_replace' is deprecated "
"and will raise in a future version. "
"Explicitly specify the new values instead.",
FutureWarning,
)
if not (to_replace is None and value is no_default):
(
all_na_per_column,
to_replace_per_column,
replacements_per_column,
) = _get_replacement_values_for_columns(
to_replace=to_replace,
value=value,
columns_dtype_map=dict(self._dtypes),
)
copy_data = []
for name, col in self._column_labels_and_values:
try:
replaced = col.find_and_replace(
to_replace_per_column[name],
replacements_per_column[name],
all_na_per_column[name],
)
except (KeyError, OverflowError):
# We need to create a deep copy if:
# i. `find_and_replace` was not successful or any of
# `to_replace_per_column`, `replacements_per_column`,
# `all_na_per_column` don't contain the `name`
# that exists in `copy_data`.
# ii. There is an OverflowError while trying to cast
# `to_replace_per_column` to `replacements_per_column`.
replaced = col.copy(deep=True)
copy_data.append(replaced)
result = self._from_data_like_self(
self._data._from_columns_like_self(copy_data)
)
else:
result = self.copy()
return self._mimic_inplace(result, inplace=inplace)
@_performance_tracking
def clip(self, lower=None, upper=None, axis=1, inplace=False):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Thresholds can be singular values or array like,
and in the latter case the clipping is performed
element-wise in the specified axis. Currently only
`axis=1` is supported.
Parameters
----------
lower : scalar or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it. If it is None,
there will be no clipping based on lower.
In case of Series/Index, lower is expected to be
a scalar or an array of size 1.
upper : scalar or array_like, default None
Maximum threshold value. All values below this
threshold will be set to it. If it is None,
there will be no clipping based on upper.
In case of Series, upper is expected to be
a scalar or an array of size 1.
inplace : bool, default False
Returns
-------
Clipped DataFrame/Series/Index/MultiIndex
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"a":[1, 2, 3, 4], "b":['a', 'b', 'c', 'd']})
>>> df.clip(lower=[2, 'b'], upper=[3, 'c'])
a b
0 2 b
1 2 b
2 3 c
3 3 c
>>> df.clip(lower=None, upper=[3, 'c'])
a b
0 1 a
1 2 b
2 3 c
3 3 c
>>> df.clip(lower=[2, 'b'], upper=None)
a b
0 2 b
1 2 b
2 3 c
3 4 d
>>> df.clip(lower=2, upper=3, inplace=True)
>>> df
a b
0 2 2
1 2 3
2 3 3
3 3 3
>>> import cudf
>>> sr = cudf.Series([1, 2, 3, 4])
>>> sr.clip(lower=2, upper=3)
0 2
1 2
2 3
3 3
dtype: int64
>>> sr.clip(lower=None, upper=3)
0 1
1 2
2 3
3 3
dtype: int64
>>> sr.clip(lower=2, upper=None, inplace=True)
>>> sr
0 2
1 2
2 3
3 4
dtype: int64
"""
if axis != 1:
raise NotImplementedError("`axis is not yet supported in clip`")
if lower is None and upper is None:
return None if inplace is True else self.copy(deep=True)
if is_scalar(lower):
lower = np.full(self._num_columns, lower)
if is_scalar(upper):
upper = np.full(self._num_columns, upper)
if len(lower) != len(upper):
raise ValueError("Length of lower and upper should be equal")
if len(lower) != self._num_columns:
raise ValueError(
"Length of lower/upper should be equal to number of columns"
)
if self.ndim == 1:
# In case of series and Index,
# swap lower and upper if lower > upper
if (
lower[0] is not None
and upper[0] is not None
and (lower[0] > upper[0])
):
lower[0], upper[0] = upper[0], lower[0]
data = (
col.clip(low, high)
for col, low, high in zip(self._columns, lower, upper, strict=True)
)
output = self._from_data_like_self(
self._data._from_columns_like_self(data)
)
return self._mimic_inplace(output, inplace=inplace)
@_performance_tracking
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
DataFrame/Series
Absolute value of each element.
Examples
--------
Absolute numeric values in a Series
>>> s = cudf.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
"""
return self._unaryop("abs")
@_performance_tracking
def dot(self, other, reflect=False):
"""
Get dot product of frame and other, (binary operator `dot`).
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`,
`dot`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`,
`@`.
Parameters
----------
other : Sequence, Series, or DataFrame
Any multiple element data structure, or list-like object.
reflect : bool, default False
If ``True``, swap the order of the operands. See
https://docs.python.org/3/reference/datamodel.html#object.__ror__
for more information on when this is necessary.
Returns
-------
scalar, Series, or DataFrame
The result of the operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([[1, 2, 3, 4],
... [5, 6, 7, 8]])
>>> df @ df.T
0 1
0 30 70
1 70 174
>>> s = cudf.Series([1, 1, 1, 1])
>>> df @ s
0 10
1 26
dtype: int64
>>> [1, 2, 3, 4] @ s
10
"""
# TODO: This function does not currently support nulls.
lhs = self.values
result_index = None
result_cols = None
if isinstance(self, cudf.Series) and isinstance(
other, (cudf.Series, cudf.DataFrame)
):
common = self.index.union(other.index)
if len(common) > max(len(self), len(other)):
raise ValueError("matrices are not aligned")
lhs = self.reindex(index=common, copy=False).values
rhs = other.reindex(index=common, copy=False).values
if isinstance(other, cudf.DataFrame):
result_index = other._data.to_pandas_index
elif isinstance(self, cudf.DataFrame) and isinstance(
other, (cudf.Series, cudf.DataFrame)
):
common = self._data.to_pandas_index.union(other.index.to_pandas())
if len(common) > max(self._num_columns, len(other)):
raise ValueError("matrices are not aligned")
lhs = self.reindex(columns=common, copy=False)
result_index = lhs.index
rhs = other.reindex(index=common, copy=False).values
lhs = lhs.values
if isinstance(other, cudf.DataFrame):
result_cols = other._data.to_pandas_index
elif isinstance(
other, (cp.ndarray, np.ndarray)
) or can_convert_to_column(other):
rhs = cp.asarray(other)
else:
# TODO: This should raise an exception, not return NotImplemented,
# but __matmul__ relies on the current behavior. We should either
# move this implementation to __matmul__ and call it from here
# (checking for NotImplemented and raising NotImplementedError if
# that's what's returned), or __matmul__ should catch a
# NotImplementedError from here and return NotImplemented. The
# latter feels cleaner (putting the implementation in this method
# rather than in the operator) but will be slower in the (highly
# unlikely) case that we're multiplying a cudf object with another
# type of object that somehow supports this behavior.
return NotImplemented
if reflect:
lhs, rhs = rhs, lhs
result = lhs.dot(rhs)
if result.ndim == 1:
return cudf.Series(
result,
index=self.index if result_index is None else result_index,
)
if result.ndim == 2:
return cudf.DataFrame(
result,
index=self.index if result_index is None else result_index,
columns=result_cols,
)
return result.item()
@_performance_tracking
def __matmul__(self, other):
return self.dot(other)
@_performance_tracking
def __rmatmul__(self, other):
return self.dot(other, reflect=True)
@_performance_tracking
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
DataFrame or Series
The first `n` rows of the caller object.
Examples
--------
**Series**
>>> ser = cudf.Series(['alligator', 'bee', 'falcon',
... 'lion', 'monkey', 'parrot', 'shark', 'whale', 'zebra'])
>>> ser
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
dtype: object
Viewing the first 5 lines
>>> ser.head()
0 alligator
1 bee
2 falcon
3 lion
4 monkey
dtype: object
Viewing the first `n` lines (three in this case)
>>> ser.head(3)
0 alligator
1 bee
2 falcon
dtype: object
For negative values of `n`
>>> ser.head(-3)
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
dtype: object
**DataFrame**
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.head(2)
key val
0 0 10.0
1 1 11.0
"""
return self.iloc[:n]
@_performance_tracking
def tail(self, n=5):
"""
Returns the last n rows as a new DataFrame or Series
Examples
--------
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.tail(2)
key val
3 3 13.0
4 4 14.0
**Series**
>>> import cudf
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.tail(2)
3 1
4 0
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
@_performance_tracking
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``.
Parameters
----------
func : function
Function to apply to the Series/DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the Series/DataFrame.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Examples
--------
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... )
"""
return pipe(self, func, *args, **kwargs)
@_performance_tracking
def sum(
self,
axis=no_default,
skipna: bool = True,
numeric_only: bool = False,
min_count: int = 0,
**kwargs,
):
"""
Return sum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.sum()
a 10
b 34
dtype: int64
"""
return self._reduce(
"sum",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
@_performance_tracking
def product(
self,
axis=no_default,
skipna=True,
numeric_only=False,
min_count=0,
**kwargs,
):
"""
Return product of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.product()
a 24
b 5040
dtype: int64
"""
return self._reduce(
# cuDF columns use "product" as the op name, but cupy uses "prod"
# and we need cupy if axis == 1.
"prod" if axis in {1, "columns"} else "product",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
# Alias for pandas compatibility.
prod = product
@_performance_tracking
def mean(
self,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
"""
Return the mean of the values for the requested axis.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
mean : Series or DataFrame (if level specified)
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.mean()
a 2.5
b 8.5
dtype: float64
"""
return self._reduce(
"mean",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
def median(
self,
axis=no_default,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
"""
Return the median of the values for the requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on. For Series this
parameter is unused and defaults to 0.
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
Returns
-------
scalar
Examples
--------
>>> import cudf
>>> ser = cudf.Series([10, 25, 3, 25, 24, 6])
>>> ser
0 10
1 25
2 3
3 25
4 24
5 6
dtype: int64
>>> ser.median()
17.0
"""
return self._reduce(
"median",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
@_performance_tracking
def std(
self,
axis=no_default,
skipna: bool = True,
ddof: int = 1,
numeric_only: bool = False,
**kwargs,
):
"""
Return sample standard deviation of the DataFrame.
Normalized by N-1 by default. This can be changed using
the `ddof` argument
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is N - ddof, where N represents the number of elements.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.std()
a 1.290994
b 1.290994
dtype: float64
"""
return self._reduce(
"std",
axis=axis,
skipna=skipna,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
@_performance_tracking
def var(
self,
axis=no_default,
skipna: bool = True,
ddof: int = 1,
numeric_only: bool = False,
**kwargs,
):
"""
Return unbiased variance of the DataFrame.
Normalized by N-1 by default. This can be changed using the
ddof argument.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations is
N - ddof, where N represents the number of elements.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
Returns
-------
scalar
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.var()
a 1.666667
b 1.666667
dtype: float64
"""
return self._reduce(
"var",
axis=axis,
skipna=skipna,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
@_performance_tracking
def kurtosis(
self,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
"""
Return Fisher's unbiased kurtosis of a sample.
Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
Returns
-------
Series or scalar
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series.kurtosis()
-1.1999999999999904
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.kurt()
a -1.2
b -1.2
dtype: float64
"""
if axis not in (0, "index", None, no_default):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._reduce(
"kurtosis",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
# Alias for kurtosis.
kurt = kurtosis
@_performance_tracking
def skew(
self,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
"""
Return unbiased Fisher-Pearson skew of a sample.
Parameters
----------
skipna: bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
If True, includes only float, int, boolean columns.
If False, will raise error in-case there are
non-numeric columns.
Returns
-------
Series
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4, 5, 6, 6])
>>> series
0 1
1 2
2 3
3 4
4 5
5 6
6 6
dtype: int64
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 8, 10, 10]})
>>> df.skew()
a 0.00000
b -0.37037
dtype: float64
.. pandas-compat::
:meth:`pandas.DataFrame.skew`, :meth:`pandas.Series.skew`
The `axis` parameter is not currently supported.
"""
if axis not in (0, "index", None, no_default):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._reduce(
"skew",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
@_performance_tracking
def mask(
self,
cond,
other=None,
inplace: bool = False,
axis=None,
level=None,
) -> Self | None:
"""
Replace values where the condition is True.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is False, keep the original value.
Where True, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is True are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimension as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.mask(df % 2 == 0, [-1, -1])
A B
0 1 3
1 -1 5
2 5 -1
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.mask(ser > 2, 10)
0 10
1 10
2 2
3 1
4 0
dtype: int64
>>> ser.mask(ser > 2)
0 <NA>
1 <NA>
2 2
3 1
4 0
dtype: int64
"""
if axis is not None:
raise NotImplementedError("axis is not supported.")
elif level is not None:
raise NotImplementedError("level is not supported.")
if not hasattr(cond, "__invert__"):
# We Invert `cond` below and call `where`, so
# making sure the object supports
# `~`(inversion) operator or `__invert__` method
cond = cp.asarray(cond)
return self.where(cond=~cond, other=other, inplace=inplace)
@_performance_tracking
@copy_docstring(Rolling)
def rolling(
self,
window,
min_periods=None,
center: bool = False,
win_type: str | None = None,
on=None,
axis=0,
closed: str | None = None,
step: int | None = None,
method: str = "single",
):
return Rolling(
self, # type: ignore[arg-type]
window,
min_periods=min_periods,
center=center,
axis=axis,
on=on,
win_type=win_type,
closed=closed,
step=step,
method=method,
)
@copy_docstring(ExponentialMovingWindow)
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: int = 0,
times: str | np.ndarray | None = None,
method: Literal["single", "table"] = "single",
):
return ExponentialMovingWindow(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
)
@_performance_tracking
def nans_to_nulls(self) -> Self:
"""
Convert nans (if any) to nulls
Returns
-------
DataFrame or Series
Examples
--------
**Series**
>>> import cudf, numpy as np
>>> series = cudf.Series([1, 2, np.nan, None, 10], nan_as_null=False)
>>> series
0 1.0
1 2.0
2 NaN
3 <NA>
4 10.0
dtype: float64
>>> series.nans_to_nulls()
0 1.0
1 2.0
2 <NA>
3 <NA>
4 10.0
dtype: float64
**DataFrame**
>>> df = cudf.DataFrame()
>>> df['a'] = cudf.Series([1, None, np.nan], nan_as_null=False)
>>> df['b'] = cudf.Series([None, 3.14, np.nan], nan_as_null=False)
>>> df
a b
0 1.0 <NA>
1 <NA> 3.14
2 NaN NaN
>>> df.nans_to_nulls()
a b
0 1.0 <NA>
1 <NA> 3.14
2 <NA> <NA>
"""
return super().nans_to_nulls()
@_performance_tracking
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace: bool = False,
limit_direction=None,
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate data values between some points.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. Currently,
only 'linear` is supported.
* 'linear': Ignore the index and treat the values as
equally spaced. This is the only method supported on MultiIndexes.
* 'index', 'values': linearly interpolate using the index as
an x-axis. Unsorted indices can lead to erroneous results.
axis : int, default 0
Axis to interpolate along. Currently,
only 'axis=0' is supported.
inplace : bool, default False
Update the data in place if possible.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values
"""
if method in {"pad", "ffill"} and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in {"backfill", "bfill"} and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
if method.lower() in {"ffill", "bfill", "pad", "backfill"}:
warnings.warn(
f"{type(self).__name__}.interpolate with method={method} is "
"deprecated and will raise in a future version. "
"Use obj.ffill() or obj.bfill() instead.",
FutureWarning,
)
elif method not in {"linear", "values", "index"}:
raise ValueError(f"Interpolation method `{method}` not found")
if not isinstance(inplace, bool):
raise ValueError("inplace must be a boolean")
elif inplace is True:
raise NotImplementedError("inplace is not supported")
data = self
if limit is not None:
raise NotImplementedError("limit is not supported")
if limit_direction is not None:
raise NotImplementedError("limit_direction is not supported")
if limit_area is not None:
raise NotImplementedError("limit_area is not supported")
if downcast is not None:
raise NotImplementedError("downcast is not supported")
if not isinstance(data.index, cudf.RangeIndex):
perm_sort = data.index.argsort()
data = data._gather(
GatherMap.from_column_unchecked(
as_column(perm_sort), # type: ignore[arg-type]
len(data),
nullify=False,
)
)
if method == "linear":
interp_index = RangeIndex(self._num_rows)
else:
interp_index = data.index
columns = []
for col in data._columns:
if col.dtype == CUDF_STRING_DTYPE:
warnings.warn(
f"{type(self).__name__}.interpolate with object dtype is "
"deprecated and will raise in a future version.",
FutureWarning,
)
if col.nullable:
col = col.astype(np.dtype(np.float64)).fillna(np.nan)
columns.append(col.interpolate(index=interp_index))
result = self._from_data_like_self(
self._data._from_columns_like_self(columns)
)
result.index = data.index
return (
result
if isinstance(data.index, cudf.RangeIndex)
# TODO: This should be a scatter, avoiding an argsort.
else result._gather(
GatherMap.from_column_unchecked(
as_column(perm_sort.argsort()), # type: ignore[arg-type]
len(result),
nullify=False,
)
)
)
_SUPPORT_AXIS_LOOKUP = {0: 0, "index": 0}
@classmethod
@_performance_tracking
def _get_axis_from_axis_arg(cls, axis: int | str) -> int:
try:
return cls._SUPPORT_AXIS_LOOKUP[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls}")
@_performance_tracking
def shift(
self,
periods=1,
freq=None,
axis=0,
fill_value=None,
suffix: str | None = None,
):
"""Shift values by `periods` positions."""
axis = self._get_axis_from_axis_arg(axis)
if axis != 0:
raise NotImplementedError("Only axis=0 is supported.")
if freq is not None:
raise NotImplementedError(
"The freq argument is not yet supported."
)
if suffix is not None:
raise NotImplementedError(
"The suffix argument is not yet supported."
)
data_columns = (
col.shift(periods, fill_value) for col in self._columns
)
return self._from_data_like_self(
self._data._from_columns_like_self(data_columns)
)._copy_type_metadata(self)
@_performance_tracking
def truncate(self, before=None, after=None, axis=0, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
The truncated Series or DataFrame.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
**Series**
>>> import cudf
>>> cs1 = cudf.Series([1, 2, 3, 4])
>>> cs1
0 1
1 2
2 3
3 4
dtype: int64
>>> cs1.truncate(before=1, after=2)
1 2
2 3
dtype: int64
>>> import cudf
>>> dates = cudf.date_range(
... '2021-01-01 23:45:00', '2021-01-01 23:46:00', freq='s'
... )
>>> cs2 = cudf.Series(range(len(dates)), index=dates)
>>> cs2
2021-01-01 23:45:00 0
2021-01-01 23:45:01 1
2021-01-01 23:45:02 2
2021-01-01 23:45:03 3
2021-01-01 23:45:04 4
2021-01-01 23:45:05 5
2021-01-01 23:45:06 6
2021-01-01 23:45:07 7
2021-01-01 23:45:08 8
2021-01-01 23:45:09 9
2021-01-01 23:45:10 10
2021-01-01 23:45:11 11
2021-01-01 23:45:12 12
2021-01-01 23:45:13 13
2021-01-01 23:45:14 14
2021-01-01 23:45:15 15
2021-01-01 23:45:16 16
2021-01-01 23:45:17 17
2021-01-01 23:45:18 18
2021-01-01 23:45:19 19
2021-01-01 23:45:20 20
2021-01-01 23:45:21 21
2021-01-01 23:45:22 22
2021-01-01 23:45:23 23
2021-01-01 23:45:24 24
...
2021-01-01 23:45:56 56
2021-01-01 23:45:57 57
2021-01-01 23:45:58 58
2021-01-01 23:45:59 59
dtype: int64
>>> cs2.truncate(
... before="2021-01-01 23:45:18", after="2021-01-01 23:45:27"
... )
2021-01-01 23:45:18 18
2021-01-01 23:45:19 19
2021-01-01 23:45:20 20
2021-01-01 23:45:21 21
2021-01-01 23:45:22 22
2021-01-01 23:45:23 23
2021-01-01 23:45:24 24
2021-01-01 23:45:25 25
2021-01-01 23:45:26 26
2021-01-01 23:45:27 27
dtype: int64
>>> cs3 = cudf.Series({'A': 1, 'B': 2, 'C': 3, 'D': 4})
>>> cs3
A 1
B 2
C 3
D 4
dtype: int64
>>> cs3.truncate(before='B', after='C')
B 2
C 3
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({
... 'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']
... }, index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
>>> import cudf
>>> dates = cudf.date_range(
... '2021-01-01 23:45:00', '2021-01-01 23:46:00', freq='s'
... )
>>> df2 = cudf.DataFrame(data={'A': 1, 'B': 2}, index=dates)
>>> df2.head()
A B
2021-01-01 23:45:00 1 2
2021-01-01 23:45:01 1 2
2021-01-01 23:45:02 1 2
2021-01-01 23:45:03 1 2
2021-01-01 23:45:04 1 2
>>> df2.truncate(
... before="2021-01-01 23:45:18", after="2021-01-01 23:45:27"
... )
A B
2021-01-01 23:45:18 1 2
2021-01-01 23:45:19 1 2
2021-01-01 23:45:20 1 2
2021-01-01 23:45:21 1 2
2021-01-01 23:45:22 1 2
2021-01-01 23:45:23 1 2
2021-01-01 23:45:24 1 2
2021-01-01 23:45:25 1 2
2021-01-01 23:45:26 1 2
2021-01-01 23:45:27 1 2
.. pandas-compat::
:meth:`pandas.DataFrame.truncate`, :meth:`pandas.Series.truncate`
The ``copy`` parameter is only present for API compatibility, but
``copy=False`` is not supported. This method always generates a
copy.
"""
if not copy:
raise ValueError("Truncating with copy=False is not supported.")
axis = self._get_axis_from_axis_arg(axis)
ax = self.index if axis == 0 else self._data.to_pandas_index
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if isinstance(ax, cudf.DatetimeIndex):
before = pd.to_datetime(before)
after = pd.to_datetime(after)
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1:
before, after = after, before
slicer = [slice(None, None)] * self.ndim
slicer[axis] = slice(before, after)
return self.loc[tuple(slicer)].copy()
@property
def loc(self):
"""Select rows and columns by label or boolean mask.
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([10, 11, 12], index=['a', 'b', 'c'])
>>> series
a 10
b 11
c 12
dtype: int64
>>> series.loc['b']
11
**DataFrame**
DataFrame with string index.
>>> df
a b
a 0 5
b 1 6
c 2 7
d 3 8
e 4 9
Select a single row by label.
>>> df.loc['a']
a 0
b 5
Name: a, dtype: int64
Select multiple rows and a single column.
>>> df.loc[['a', 'c', 'e'], 'b']
a 5
c 7
e 9
Name: b, dtype: int64
Selection by boolean mask.
>>> df.loc[df.a > 2]
a b
d 3 8
e 4 9
Setting values using loc.
>>> df.loc[['a', 'c', 'e'], 'a'] = 0
>>> df
a b
a 0 5
b 1 6
c 0 7
d 3 8
e 0 9
"""
return self._loc_indexer_type(self)
@property
def iloc(self):
"""Select values by position.
Examples
--------
**Series**
>>> import cudf
>>> s = cudf.Series([10, 20, 30])
>>> s
0 10
1 20
2 30
dtype: int64
>>> s.iloc[2]
30
**DataFrame**
Selecting rows and column by position.
>>> df = cudf.DataFrame({'a': range(20),
... 'b': range(20),
... 'c': range(20)})
Select a single row using an integer index.
>>> df.iloc[1]
a 1
b 1
c 1
Name: 1, dtype: int64
Select multiple rows using a list of integers.
>>> df.iloc[[0, 2, 9, 18]]
a b c
0 0 0 0
2 2 2 2
9 9 9 9
18 18 18 18
Select rows using a slice.
>>> df.iloc[3:10:2]
a b c
3 3 3 3
5 5 5 5
7 7 7 7
9 9 9 9
Select both rows and columns.
>>> df.iloc[[1, 3, 5, 7], 2]
1 1
3 3
5 5
7 7
Name: c, dtype: int64
Setting values in a column using iloc.
>>> df.iloc[:4] = 0
>>> df
a b c
0 0 0 0
1 0 0 0
2 0 0 0
3 0 0 0
4 4 4 4
5 5 5 5
6 6 6 6
7 7 7 7
8 8 8 8
9 9 9 9
[10 more rows]
"""
return self._iloc_indexer_type(self)
@property
@_performance_tracking
def axes(self):
"""
Return a list representing the axes of the Series.
Series.axes returns a list containing the row index.
Examples
--------
>>> import cudf
>>> csf1 = cudf.Series([1, 2, 3, 4])
>>> csf1.axes
[RangeIndex(start=0, stop=4, step=1)]
"""
return [self.index]
def squeeze(self, axis: Literal["index", "columns", 0, 1, None] = None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed. For `Series` this parameter is unused and defaults
to `None`.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = cudf.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = cudf.DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[["a"]]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze("columns")
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ["a"]]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze("rows")
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axes = (
range(self.ndim)
if axis is None
else (self._get_axis_from_axis_arg(axis),)
)
indexer = tuple(
0 if i in axes and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
return self.iloc[indexer]
@_performance_tracking
def scale(self):
"""
Scale values to [0, 1] in float64
Returns
-------
DataFrame or Series
Values scaled to [0, 1].
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 11, 12, 0.5, 1])
>>> series
0 10.0
1 11.0
2 12.0
3 0.5
4 1.0
dtype: float64
>>> series.scale()
0 0.826087
1 0.913043
2 1.000000
3 0.000000
4 0.043478
dtype: float64
"""
vmin = self.min()
vmax = self.max()
scaled = (self - vmin) / (vmax - vmin)
scaled.index = self.index.copy(deep=False)
return scaled
@overload
def sort_index(
self,
axis: Axis = ...,
level=...,
ascending: bool | Iterable[bool] = ...,
inplace: Literal[False] = ...,
kind: str = ...,
na_position: Literal["first", "last"] = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key=...,
) -> Self: ...
@overload
def sort_index(
self,
axis: Axis = ...,
level=...,
ascending: bool | Iterable[bool] = ...,
inplace: Literal[True] = ...,
kind: str = ...,
na_position: Literal["first", "last"] = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key=...,
) -> None: ...
@_performance_tracking
def sort_index(
self,
axis: Axis = 0,
level=None,
ascending: bool | Iterable[bool] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: Literal["first", "last"] = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key=None,
) -> Self | None:
"""Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
This is only useful in the case of MultiIndex.
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : sorting method such as `quick sort` and others.
Not yet supported.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if first; last puts NaNs at the end.
sort_remaining : bool, default True
When sorting a multiindex on a subset of its levels,
should entries be lexsorted by the remaining
(non-specified) levels as well?
ignore_index : bool, default False
if True, index will be replaced with RangeIndex.
key : callable, optional
If not None, apply the key function to the index values before
sorting. This is similar to the key argument in the builtin
sorted() function, with the notable difference that this key
function should be vectorized. It should expect an Index and return
an Index of the same shape. For MultiIndex inputs, the key is
applied per level.
Returns
-------
Frame or None
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> series
3 a
2 b
1 c
4 d
dtype: object
>>> series.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> series.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
**DataFrame**
>>> df = cudf.DataFrame(
... {"b":[3, 2, 1], "a":[2, 1, 3]}, index=[1, 3, 2])
>>> df.sort_index(axis=0)
b a
1 3 2
2 1 3
3 2 1
>>> df.sort_index(axis=1)
a b
1 2 3
3 1 2
2 3 1
.. pandas-compat::
:meth:`pandas.DataFrame.sort_index`, :meth:`pandas.Series.sort_index`
* Not supporting: kind, sort_remaining=False
"""
if kind != "quicksort":
raise NotImplementedError("kind is not yet supported")
if key is not None:
raise NotImplementedError("key is not yet supported.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
if axis in (0, "index"):
idx = self.index
if isinstance(idx, MultiIndex):
if level is not None:
if not is_list_like(level):
level = [level]
by = list(map(idx._get_level_label, level))
if sort_remaining:
handled = set(by)
by.extend(
filter(
lambda n: n not in handled,
self.index._column_names,
)
)
else:
by = list(idx._column_names)
inds = idx._get_sorted_inds(
by=by, ascending=ascending, na_position=na_position
)
out = self._gather(
GatherMap.from_column_unchecked(
inds, len(self), nullify=False
)
)
# TODO: frame factory function should handle multilevel column
# names
if isinstance(self, cudf.DataFrame) and self._data.multiindex:
out._set_columns_like(self._data)
elif (ascending and idx.is_monotonic_increasing) or (
not ascending and idx.is_monotonic_decreasing
):
out = self.copy()
else:
inds = idx.argsort(
ascending=ascending, na_position=na_position
)
out = self._gather(
GatherMap.from_column_unchecked(
as_column(inds), # type: ignore[arg-type]
len(self),
nullify=False,
)
)
if isinstance(self, cudf.DataFrame) and self._data.multiindex:
out._set_columns_like(self._data)
if ignore_index:
out = out.reset_index(drop=True)
else:
labels = sorted(self._column_names, reverse=not ascending)
result_columns = (self._data[label] for label in labels)
if ignore_index:
ca = ColumnAccessor(
dict(enumerate(result_columns)),
rangeindex=True,
verify=False,
)
else:
ca = ColumnAccessor(
dict(zip(labels, result_columns, strict=True)),
rangeindex=self._data.rangeindex,
multiindex=self._data.multiindex,
level_names=self._data.level_names,
label_dtype=self._data.label_dtype,
verify=False,
)
out = self._from_data_like_self(ca)
return self._mimic_inplace(out, inplace=inplace)
def hash_values(
self,
method: Literal[
"murmur3",
"xxhash64",
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
] = "murmur3",
seed: int | None = None,
) -> Series:
"""Compute the hash of values in this column.
Parameters
----------
method : {'murmur3', 'xxhash32', 'xxhash64', 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'}, default 'murmur3'
Hash function to use:
* murmur3: MurmurHash3 hash function
* xxhash32: xxHash32 hash function
* xxhash64: xxHash64 hash function
* md5: MD5 hash function
* sha1: SHA-1 hash function
* sha224: SHA-224 hash function
* sha256: SHA-256 hash function
* sha384: SHA-384 hash function
* sha512: SHA-512 hash function
seed : int, optional
Seed value to use for the hash function. This parameter is only
supported for 'murmur3', 'xxhash32', and 'xxhash64'.
Returns
-------
Series
A Series with hash values.
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([10, 120, 30])
>>> series
0 10
1 120
2 30
dtype: int64
>>> series.hash_values(method="murmur3")
0 -1930516747
1 422619251
2 -941520876
dtype: int32
>>> series.hash_values(method="md5")
0 7be4bbacbfdb05fb3044e36c22b41e8b
1 947ca8d2c5f0f27437f156cfbfab0969
2 d0580ef52d27c043c8e341fd5039b166
dtype: object
>>> series.hash_values(method="murmur3", seed=42)
0 2364453205
1 422621911
2 3353449140
dtype: uint32
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({"a": [10, 120, 30], "b": [0.0, 0.25, 0.50]})
>>> df
a b
0 10 0.00
1 120 0.25
2 30 0.50
>>> df.hash_values(method="murmur3")
0 -330519225
1 -397962448
2 -1345834934
dtype: int32
>>> df.hash_values(method="md5")
0 57ce879751b5169c525907d5c563fae1
1 948d6221a7c4963d4be411bcead7e32b
2 fe061786ea286a515b772d91b0dfcd70
dtype: object
"""
seed_hash_methods = {"murmur3", "xxhash32", "xxhash64"}
if seed is None:
seed = 0
elif method not in seed_hash_methods:
warnings.warn(
"Provided seed value has no effect for the hash method "
f"`{method}`. Only {seed_hash_methods} support seeds."
)
with acquire_spill_lock():
plc_table = plc.Table(
[c.to_pylibcudf(mode="read") for c in self._columns]
)
if method == "murmur3":
plc_column = plc.hashing.murmurhash3_x86_32(plc_table, seed)
elif method == "xxhash32":
plc_column = plc.hashing.xxhash_32(plc_table, seed)
elif method == "xxhash64":
plc_column = plc.hashing.xxhash_64(plc_table, seed)
elif method == "md5":
plc_column = plc.hashing.md5(plc_table)
elif method == "sha1":
plc_column = plc.hashing.sha1(plc_table)
elif method == "sha224":
plc_column = plc.hashing.sha224(plc_table)
elif method == "sha256":
plc_column = plc.hashing.sha256(plc_table)
elif method == "sha384":
plc_column = plc.hashing.sha384(plc_table)
elif method == "sha512":
plc_column = plc.hashing.sha512(plc_table)
else:
raise ValueError(f"Unsupported hashing algorithm {method}.")
result = ColumnBase.from_pylibcudf(plc_column)
return cudf.Series._from_column(
result,
index=self.index,
)
def _gather(
self,
gather_map: GatherMap,
keep_index=True,
):
"""Gather rows of frame specified by indices in `gather_map`.
Maintain the index if keep_index is True.
This function does no expensive bounds checking, but does
check that the number of rows of self matches the validated
number of rows.
"""
if not gather_map.nullify and len(self) != gather_map.nrows:
raise IndexError("Gather map is out of bounds")
return self._from_columns_like_self(
[
ColumnBase.from_pylibcudf(col)
for col in copying.gather(
itertools.chain(self.index._columns, self._columns)
if keep_index
else self._columns,
gather_map.column,
nullify=gather_map.nullify,
)
],
self._column_names,
self.index.names if keep_index else None,
)
def _slice(self, arg: slice, keep_index: bool = True) -> Self:
"""Slice a frame.
Parameters
----------
arg
The slice
keep_index
Preserve the index when slicing?
Returns
-------
Sliced frame
Notes
-----
This slicing has normal python semantics.
"""
if arg.step == 0:
raise ValueError("slice step cannot be zero")
num_rows = len(self)
if num_rows == 0:
return self
start, stop, stride = arg.indices(num_rows)
index = self.index
has_range_index = isinstance(index, RangeIndex)
if len(range(start, stop, stride)) == 0:
# Avoid materialising the range index column
result = self._empty_like(
keep_index=keep_index and not has_range_index
)
if keep_index and has_range_index:
lo = index.start + start * index.step
hi = index.start + stop * index.step
step = index.step * stride
result.index = RangeIndex(
start=lo, stop=hi, step=step, name=index.name
)
return result
if start < 0:
start = start + num_rows
# At this point, we have converted slice arguments into
# indices that no longer wrap around.
# For example slice(4, None, -1) will produce the
# start, stop, stride tuple (4, -1, -1)
# This check makes sure -1 is not wrapped (again) to
# produce -1 + num_rows.
if stop < 0 and not (stride < 0 and stop == -1):
stop = stop + num_rows
stride = 1 if stride is None else stride
if (stop - start) * stride <= 0:
return self._empty_like(keep_index=True)
start = min(start, num_rows)
stop = min(stop, num_rows)
if stride != 1:
return self._gather(
GatherMap.from_column_unchecked(
cast(
NumericalColumn,
as_column(
range(start, stop, stride),
dtype=SIZE_TYPE_DTYPE,
),
),
len(self),
nullify=False,
),
keep_index=keep_index,
)
columns_to_slice = (
itertools.chain(self.index._columns, self._columns)
if keep_index and not has_range_index
else self._columns
)
with acquire_spill_lock():
plc_tables = plc.copying.slice(
plc.Table(
[col.to_pylibcudf(mode="read") for col in columns_to_slice]
),
[start, stop],
)
sliced = [
ColumnBase.from_pylibcudf(col)
for col in plc_tables[0].columns()
]
result = self._from_columns_like_self(
sliced,
self._column_names,
None if has_range_index or not keep_index else self.index.names,
)
if keep_index and has_range_index:
result.index = self.index[start:stop]
return result
def _positions_from_column_names(
self,
column_names: set[Hashable],
offset_by_index_columns: bool = True,
) -> list[int]:
"""Map each column name into their positions in the frame.
Return positions of the provided column names, offset by the number of
index columns if `offset_by_index_columns` is True. The order of
indices returned corresponds to the column order in this Frame.
"""
start = self.index.nlevels if offset_by_index_columns else 0
return [
i
for i, name in enumerate(self._column_names, start=start)
if name in column_names
]
def drop_duplicates(
self,
subset=None,
keep: Literal["first", "last", False] = "first",
nulls_are_equal: bool = True,
ignore_index: bool = False,
):
"""
Drop duplicate rows in frame.
subset : list, optional
List of columns to consider when dropping rows.
keep : ["first", "last", False]
"first" will keep the first duplicate entry, "last" will keep the
last duplicate entry, and False will drop all duplicates.
nulls_are_equal: bool, default True
Null elements are considered equal to other null elements.
ignore_index: bool, default False
If True, the resulting axis will be labeled 0, 1, ..., n - 1.
"""
if not isinstance(ignore_index, (np.bool_, bool)):
raise ValueError(
f"{ignore_index=} must be bool, "
f"not {type(ignore_index).__name__}"
)
subset = self._preprocess_subset(subset)
subset_cols = [name for name in self._column_names if name in subset]
if len(subset_cols) == 0:
return self.copy(deep=True)
keys = self._positions_from_column_names(
subset, offset_by_index_columns=not ignore_index
)
return self._from_columns_like_self(
[
ColumnBase.from_pylibcudf(col)
for col in stream_compaction.drop_duplicates(
list(self._columns)
if ignore_index
else list(self.index._columns + self._columns),
keys=keys,
keep=keep,
nulls_are_equal=nulls_are_equal,
)
],
self._column_names,
self.index.names if not ignore_index else None,
)
@_performance_tracking
def duplicated(
self, subset=None, keep: Literal["first", "last", False] = "first"
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``'first'`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``'last'`` : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series indicating duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider a dataset containing ramen product ratings.
>>> import cudf
>>> df = cudf.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Maggie', 'Maggie', 'Maggie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Maggie cup 3.5
3 Maggie pack 15.0
4 Maggie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set to False and all others to True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set to False and all others to True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` to False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
subset = self._preprocess_subset(subset)
name = None
if isinstance(self, cudf.Series):
columns = [self._column]
name = self.name
else:
columns = [self._data[n] for n in subset]
_keep_options = {
"first": plc.stream_compaction.DuplicateKeepOption.KEEP_FIRST,
"last": plc.stream_compaction.DuplicateKeepOption.KEEP_LAST,
False: plc.stream_compaction.DuplicateKeepOption.KEEP_NONE,
}
if (keep_option := _keep_options.get(keep)) is None:
raise ValueError('keep must be either "first", "last" or False')
with acquire_spill_lock():
plc_column = plc.stream_compaction.distinct_indices(
plc.Table([col.to_pylibcudf(mode="read") for col in columns]),
keep_option,
plc.types.NullEquality.EQUAL,
plc.types.NanEquality.ALL_EQUAL,
)
distinct = ColumnBase.from_pylibcudf(plc_column)
result = as_column(
True, length=len(self), dtype=bool
)._scatter_by_column(
distinct, # type: ignore[arg-type]
pa_scalar_to_plc_scalar(pa.scalar(False)),
bounds_check=False,
)
return cudf.Series._from_column(
result, index=self.index, name=name, attrs=self.attrs
)
@_performance_tracking
def _empty_like(self, keep_index: bool = True) -> Self:
with acquire_spill_lock():
plc_table = plc.copying.empty_like(
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in (
itertools.chain(self.index._columns, self._columns)
if keep_index
else self._columns
)
]
)
)
columns = [
ColumnBase.from_pylibcudf(col) for col in plc_table.columns()
]
result = self._from_columns_like_self(
columns,
self._column_names,
self.index.names if keep_index else None,
)
result._data.label_dtype = self._data.label_dtype
result._data.rangeindex = self._data.rangeindex
return result
def _split(self, splits: list[int], keep_index: bool = True) -> list[Self]:
if self._num_rows == 0:
return []
columns_split = copying.columns_split(
itertools.chain(self.index._columns, self._columns)
if keep_index
else self._columns,
splits,
)
@acquire_spill_lock()
def split_from_pylibcudf(split: list[plc.Column]) -> list[ColumnBase]:
return [ColumnBase.from_pylibcudf(col) for col in split]
return [
self._from_columns_like_self(
split_from_pylibcudf(split),
self._column_names,
self.index.names if keep_index else None,
)
for split in columns_split
]
@_performance_tracking
def bfill(
self,
value=None,
axis=None,
inplace: bool = False,
limit=None,
limit_area=None,
) -> Self | None:
"""
Synonym for :meth:`Series.fillna` with ``method='bfill'``.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
if limit_area is not None:
raise NotImplementedError("limit_area is currently not supported.")
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
return self.fillna(
method="bfill",
value=value,
axis=axis,
inplace=inplace,
limit=limit,
)
@_performance_tracking
def backfill(
self, value=None, axis=None, inplace: bool = False, limit=None
) -> Self | None:
"""
Synonym for :meth:`Series.fillna` with ``method='bfill'``.
.. deprecated:: 23.06
Use `DataFrame.bfill/Series.bfill` instead.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
# Do not remove until pandas removes this.
warnings.warn(
"DataFrame.backfill/Series.backfill is deprecated. Use "
"DataFrame.bfill/Series.bfill instead",
FutureWarning,
)
return self.bfill(value=value, axis=axis, inplace=inplace, limit=limit)
@_performance_tracking
def ffill(
self,
value=None,
axis=None,
inplace: bool = False,
limit=None,
limit_area: Literal["inside", "outside", None] = None,
):
"""
Synonym for :meth:`Series.fillna` with ``method='ffill'``.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
if limit_area is not None:
raise NotImplementedError("limit_area is currently not supported.")
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
return self.fillna(
method="ffill",
value=value,
axis=axis,
inplace=inplace,
limit=limit,
)
@_performance_tracking
def pad(self, value=None, axis=None, inplace: bool = False, limit=None):
"""
Synonym for :meth:`Series.fillna` with ``method='ffill'``.
.. deprecated:: 23.06
Use `DataFrame.ffill/Series.ffill` instead.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
# Do not remove until pandas removes this.
warnings.warn(
"DataFrame.pad/Series.pad is deprecated. Use "
"DataFrame.ffill/Series.ffill instead",
FutureWarning,
)
return self.ffill(value=value, axis=axis, inplace=inplace, limit=limit)
def add_prefix(self, prefix, axis=None):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series with updated labels or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string 'suffix'.
DataFrame.add_suffix: Suffix column labels with string 'suffix'.
Examples
--------
**Series**
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
raise NotImplementedError(
"`IndexedFrame.add_prefix` not currently implemented. \
Use `Series.add_prefix` or `DataFrame.add_prefix`"
)
def add_suffix(self, suffix, axis=None):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
prefix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series with updated labels or DataFrame with updated labels.
See Also
--------
Series.add_prefix: prefix row labels with string 'prefix'.
DataFrame.add_prefix: Prefix column labels with string 'prefix'.
Examples
--------
**Series**
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
raise NotImplementedError
@acquire_spill_lock()
@_performance_tracking
def _apply(self, func, kernel_class, *args, **kwargs):
"""Apply `func` across the rows of the frame."""
if kwargs:
raise ValueError("UDFs using **kwargs are not yet supported.")
try:
kr = kernel_class(self, func, args)
kernel, retty = kr.get_kernel()
except Exception as e:
raise ValueError(
"user defined function compilation failed."
) from e
# Mask and data column preallocated
ans_col = _return_arr_from_dtype(retty, len(self))
ans_mask = as_column(True, length=len(self), dtype=np.dtype("bool"))
output_args = [(ans_col, ans_mask), len(self)]
input_args = _get_input_args_from_frame(self)
launch_args = output_args + input_args + list(args)
try:
with _CUDFNumbaConfig():
kernel.forall(len(self))(*launch_args)
except Exception as e:
raise RuntimeError("UDF kernel execution failed.") from e
if retty == CUDF_STRING_DTYPE:
col = ColumnBase.from_pylibcudf(
strings_udf.column_from_managed_udf_string_array(ans_col)
)
free_kernel = _make_free_string_kernel()
with _CUDFNumbaConfig():
free_kernel.forall(len(col))(ans_col, len(col))
else:
col = as_column(ans_col, retty)
col = col.set_mask(ans_mask.as_mask())
result = cudf.Series._from_column(
col, index=self.index, attrs=self.attrs
)
return result
def sort_values(
self,
by,
axis: Axis = 0,
ascending: bool | Iterable[bool] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: Literal["first", "last"] = "last",
ignore_index: bool = False,
key=None,
):
"""Sort by the values along either axis.
Parameters
----------
by : str or list of str
Name or list of names to sort by.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of the
by.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end
ignore_index : bool, default False
If True, index will not be sorted.
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the ``key`` argument in the
builtin ``sorted`` function, with the notable difference that
this ``key`` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
Currently not supported.
Returns
-------
Frame : Frame with sorted values.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['a'] = [0, 1, 2]
>>> df['b'] = [-3, 2, 0]
>>> df.sort_values('b')
a b
0 0 -3
2 2 0
1 1 2
.. pandas-compat::
:meth:`pandas.DataFrame.sort_values`, :meth:`pandas.Series.sort_values`
* Support axis='index' only.
* Not supporting: inplace, kind
"""
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
if inplace:
raise NotImplementedError("`inplace` not currently implemented.")
if kind != "quicksort":
if kind not in {"mergesort", "heapsort", "stable"}:
raise AttributeError(
f"{kind} is not a valid sorting algorithm for "
f"'DataFrame' object"
)
warnings.warn(
f"GPU-accelerated {kind} is currently not supported, "
f"defaulting to quicksort."
)
if axis != 0:
raise NotImplementedError("`axis` not currently implemented.")
if key is not None:
raise NotImplementedError("key is not currently supported.")
if len(self) == 0:
return self
try:
by_in_columns = self._get_columns_by_label(by)
except KeyError:
by_in_columns = None
if self.ndim == 1:
# For Series case, we're never selecting an index level.
by_in_index = None
else:
try:
by_in_index = self.index._get_columns_by_label(by)
except KeyError:
by_in_index = None
if by_in_columns is not None and by_in_index is not None:
raise ValueError(
f"{by=} appears in the {type(self).__name__} columns "
"and as an index level which is ambiguous."
)
elif by_in_columns is not None:
by_columns = by_in_columns
elif by_in_index is not None:
by_columns = by_in_index
else:
raise KeyError(by)
if cudf.get_option("mode.pandas_compatible"):
by_columns = by_columns.nans_to_nulls()
# argsort the `by` column
out = self._gather(
GatherMap.from_column_unchecked(
by_columns._get_sorted_inds(
ascending=ascending, na_position=na_position
),
len(self),
nullify=False,
),
keep_index=not ignore_index,
)
return out
def _n_largest_or_smallest(
self, largest: bool, n: int, columns, keep: Literal["first", "last"]
) -> Self:
# Get column to operate on
if isinstance(columns, str):
columns = [columns]
method = "nlargest" if largest else "nsmallest"
for col in columns:
if self._data[col].dtype == CUDF_STRING_DTYPE:
if isinstance(self, cudf.DataFrame):
error_msg = (
f"Column '{col}' has dtype {self._data[col].dtype}, "
f"cannot use method '{method}' with this dtype"
)
else:
error_msg = (
f"Cannot use method '{method}' with "
f"dtype {self._data[col].dtype}"
)
raise TypeError(error_msg)
if len(self) == 0:
return self
if keep == "first":
if n < 0:
n = 0
# argsort the `by` column
return self._gather(
GatherMap.from_column_unchecked(
self._get_columns_by_label(columns)
._get_sorted_inds(ascending=not largest)
.slice(*slice(None, n).indices(len(self))),
len(self),
nullify=False,
),
keep_index=True,
)
elif keep == "last":
indices = self._get_columns_by_label(columns)._get_sorted_inds(
ascending=largest
)
if n <= 0:
# Empty slice.
indices = indices.slice(0, 0)
else:
indices = indices.slice(
*slice(None, -n - 1, -1).indices(len(self))
)
return self._gather(
GatherMap.from_column_unchecked(
indices, len(self), nullify=False
),
keep_index=True,
)
else:
raise ValueError('keep must be either "first", "last"')
def _align_to_index(
self,
index: ColumnLike,
how: str = "outer",
sort: bool = True,
allow_non_unique: bool = False,
) -> Self:
index = ensure_index(index)
if self.index.equals(index):
return self
if not allow_non_unique:
if not self.index.is_unique or not index.is_unique:
raise ValueError("Cannot align indices with non-unique values")
lhs = cudf.DataFrame._from_data(self._data, index=self.index)
rhs = cudf.DataFrame._from_data({}, index=index)
# create a temporary column that we will later sort by
# to recover ordering after index alignment.
sort_col_id = str(uuid4())
if how == "left":
lhs[sort_col_id] = as_column(range(len(lhs)))
elif how == "right":
rhs[sort_col_id] = as_column(range(len(rhs)))
result = lhs.join(rhs, how=how, sort=sort)
if how in ("left", "right"):
result = result.sort_values(sort_col_id)
del result[sort_col_id]
out = self._from_data(
self._data._from_columns_like_self(result._columns)
)
out.index = result.index
out.index.names = self.index.names
return out
@_performance_tracking
def _reindex(
self,
column_names,
dtypes=None,
deep=False,
index=None,
inplace=False,
fill_value=NA,
level=None,
method=None,
limit=None,
tolerance=None,
):
"""
Helper for `.reindex`
Parameters
----------
columns_names : array-like
array-like of columns to select from the Frame,
if ``columns`` is a superset of ``Frame.columns`` new
columns are created.
dtypes : dict
Mapping of dtypes for the empty columns being created.
deep : boolean, optional, default False
Whether to make deep copy or shallow copy of the columns.
index : Index or array-like, default None
The ``index`` to be used to reindex the Frame with.
inplace : bool, default False
Whether to perform the operation in place on the data.
fill_value : value with which to replace nulls in the result
Returns
-------
Series or DataFrame
"""
if method is not None:
raise NotImplementedError("method is not currently supported.")
if level is not None:
raise NotImplementedError("level is not currently supported.")
if limit is not None:
raise NotImplementedError("limit is not currently supported.")
if tolerance is not None:
raise NotImplementedError("tolerance is not currently supported.")
if dtypes is None:
dtypes = {}
df = self
if index is not None:
if not df.index.is_unique:
raise ValueError(
"cannot reindex on an axis with duplicate labels"
)
index = cudf.Index(
index, name=getattr(index, "name", self.index.name)
)
idx_dtype_match = (df.index.nlevels == index.nlevels) and all(
_is_same_dtype(left_dtype, right_dtype)
for left_dtype, right_dtype in zip(
(dtype for _, dtype in df.index._dtypes),
(dtype for _, dtype in index._dtypes),
strict=True,
)
)
if not idx_dtype_match:
column_names = (
column_names
if column_names is not None
else list(df._column_names)
)
df = cudf.DataFrame()
else:
lhs = cudf.DataFrame._from_data({}, index=index)
rhs = cudf.DataFrame._from_data(
{
# bookkeeping workaround for unnamed series
(name or 0)
if isinstance(self, cudf.Series)
else name: col
for name, col in df._column_labels_and_values
},
index=df.index,
)
diff = index.difference(df.index)
df = lhs.join(rhs, how="left", sort=True)
if fill_value is not NA and len(diff) > 0:
df.loc[diff] = fill_value
# double-argsort to map back from sorted to unsorted positions
df = df.take(index.argsort(ascending=True).argsort())
index = index if index is not None else df.index
if column_names is None:
names = list(df._column_names)
level_names = self._data.level_names
multiindex = self._data.multiindex
rangeindex = self._data.rangeindex
elif isinstance(column_names, (pd.Index, cudf.Index)):
if isinstance(column_names, (pd.MultiIndex, cudf.MultiIndex)):
multiindex = True
if isinstance(column_names, cudf.MultiIndex):
names = list(iter(column_names.to_pandas()))
else:
names = list(iter(column_names))
rangeindex = False
else:
multiindex = False
names = column_names
if isinstance(names, cudf.Index):
names = names.to_pandas()
rangeindex = isinstance(
column_names, (pd.RangeIndex, cudf.RangeIndex)
)
level_names = tuple(column_names.names)
else:
names = column_names
level_names = None
multiindex = False
rangeindex = False
cols = {
name: (
df._data[name].copy(deep=deep)
if name in df._data
else (
column_empty(
dtype=dtypes.get(name, np.dtype(np.float64)),
row_count=len(index),
).fillna(fill_value)
if fill_value is not NA
else column_empty(
dtype=dtypes.get(name, np.dtype(np.float64)),
row_count=len(index),
)
)
)
for name in names
}
result = self.__class__._from_data(
data=ColumnAccessor(
cols,
multiindex=multiindex,
level_names=level_names,
rangeindex=rangeindex,
),
index=index,
attrs=self.attrs,
)
return self._mimic_inplace(result, inplace=inplace)
def round(self, decimals=0, how="half_even"):
"""
Round to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. This parameter
must be an int for a Series. For a DataFrame, a dict or a Series
are also valid inputs. If an int is given, round each column to the
same number of places. Otherwise dict and Series round to variable
numbers of places. Column names should be in the keys if
`decimals` is a dict-like, or in the index if `decimals` is a
Series. Any columns not included in `decimals` will be left as is.
Elements of `decimals` which are not columns of the input will be
ignored.
how : str, optional
Type of rounding. Can be either "half_even" (default)
or "half_up" rounding.
Returns
-------
Series or DataFrame
A Series or DataFrame with the affected columns rounded to the
specified number of decimal places.
Examples
--------
**Series**
>>> s = cudf.Series([0.1, 1.4, 2.9])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
**DataFrame**
>>> df = cudf.DataFrame(
... [(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'],
... )
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places.
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as keys and the number of decimal
places as values.
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as the index and the number of
decimal places as the values.
>>> decimals = cudf.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
if isinstance(decimals, cudf.Series):
decimals = decimals.to_pandas()
if isinstance(decimals, pd.Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
decimals = decimals.to_dict()
elif isinstance(decimals, int):
decimals = {name: decimals for name in self._column_names}
elif not isinstance(decimals, Mapping):
raise TypeError(
"decimals must be an integer, a dict-like or a Series"
)
cols = (
col.round(decimals[name], how=how)
if name in decimals
and (
col.dtype.kind in "fiu"
or isinstance(
col.dtype, (cudf.Decimal32Dtype, cudf.Decimal64Dtype)
)
)
else col.copy(deep=True)
for name, col in self._column_labels_and_values
)
return self._from_data_like_self(
self._data._from_columns_like_self(cols)
)
def resample(
self,
rule,
axis=0,
closed: Literal["right", "left"] | None = None,
label: Literal["right", "left"] | None = None,
convention: Literal["start", "end", "s", "e"] = "start",
kind=None,
on=None,
level=None,
origin="start_day",
offset=None,
group_keys: bool = False,
):
"""
Convert the frequency of ("resample") the given time series data.
Parameters
----------
rule: str
The offset string representing the frequency to use.
Note that DateOffset objects are not yet supported.
closed: {"right", "left"}, default None
Which side of bin interval is closed. The default is
"left" for all frequency offsets except for "M" and "W",
which have a default of "right".
label: {"right", "left"}, default None
Which bin edge label to label bucket with. The default is
"left" for all frequency offsets except for "M" and "W",
which have a default of "right".
on: str, optional
For a DataFrame, column to use instead of the index for
resampling. Column must be a datetime-like.
level: str or int, optional
For a MultiIndex, level to use instead of the index for
resampling. The level must be a datetime-like.
Returns
-------
A Resampler object
Examples
--------
First, we create a time series with 1 minute intervals:
>>> index = cudf.date_range(start="2001-01-01", periods=10, freq="1T")
>>> sr = cudf.Series(range(10), index=index)
>>> sr
2001-01-01 00:00:00 0
2001-01-01 00:01:00 1
2001-01-01 00:02:00 2
2001-01-01 00:03:00 3
2001-01-01 00:04:00 4
2001-01-01 00:05:00 5
2001-01-01 00:06:00 6
2001-01-01 00:07:00 7
2001-01-01 00:08:00 8
2001-01-01 00:09:00 9
dtype: int64
Downsampling to 3 minute intervals, followed by a "sum" aggregation:
>>> sr.resample("3T").sum()
2001-01-01 00:00:00 3
2001-01-01 00:03:00 12
2001-01-01 00:06:00 21
2001-01-01 00:09:00 9
dtype: int64
Use the right side of each interval to label the bins:
>>> sr.resample("3T", label="right").sum()
2001-01-01 00:03:00 3
2001-01-01 00:06:00 12
2001-01-01 00:09:00 21
2001-01-01 00:12:00 9
dtype: int64
Close the right side of the interval instead of the left:
>>> sr.resample("3T", closed="right").sum()
2000-12-31 23:57:00 0
2001-01-01 00:00:00 6
2001-01-01 00:03:00 15
2001-01-01 00:06:00 24
dtype: int64
Upsampling to 30 second intervals:
>>> sr.resample("30s").asfreq()[:5] # show the first 5 rows
2001-01-01 00:00:00 0
2001-01-01 00:00:30 <NA>
2001-01-01 00:01:00 1
2001-01-01 00:01:30 <NA>
2001-01-01 00:02:00 2
dtype: int64
Upsample and fill nulls using the "bfill" method:
>>> sr.resample("30s").bfill()[:5]
2001-01-01 00:00:00 0
2001-01-01 00:00:30 1
2001-01-01 00:01:00 1
2001-01-01 00:01:30 2
2001-01-01 00:02:00 2
dtype: int64
Resampling by a specified column of a Dataframe:
>>> df = cudf.DataFrame({
... "price": [10, 11, 9, 13, 14, 18, 17, 19],
... "volume": [50, 60, 40, 100, 50, 100, 40, 50],
... "week_starting": cudf.date_range(
... "2018-01-01", periods=8, freq="7D"
... )
... })
>>> df
price volume week_starting
0 10 50 2018-01-01
1 11 60 2018-01-08
2 9 40 2018-01-15
3 13 100 2018-01-22
4 14 50 2018-01-29
5 18 100 2018-02-05
6 17 40 2018-02-12
7 19 50 2018-02-19
>>> df.resample("M", on="week_starting").mean()
price volume
week_starting
2018-01-31 11.4 60.000000
2018-02-28 18.0 63.333333
.. pandas-compat::
:meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample`
Note that the dtype of the index (or the 'on' column if using
'on=') in the result will be of a frequency closest to the
resampled frequency. For example, if resampling from
nanoseconds to milliseconds, the index will be of dtype
'datetime64[ms]'.
"""
from cudf.core.resample import DataFrameResampler, SeriesResampler
if kind is not None:
warnings.warn(
"The 'kind' keyword in is "
"deprecated and will be removed in a future version. ",
FutureWarning,
)
raise NotImplementedError("kind is currently not supported.")
if axis != 0:
warnings.warn(
"The 'axis' keyword in is "
"deprecated and will be removed in a future version. ",
FutureWarning,
)
raise NotImplementedError("axis is currently not supported.")
if convention != "start":
warnings.warn(
"The 'convention' keyword in is "
"deprecated and will be removed in a future version. ",
FutureWarning,
)
raise NotImplementedError("convention is currently not supported.")
if origin != "start_day":
raise NotImplementedError("origin is currently not supported.")
if offset is not None:
raise NotImplementedError("offset is currently not supported.")
if group_keys is not False:
raise NotImplementedError("group_keys is currently not supported.")
by = cudf.Grouper(
key=on, freq=rule, closed=closed, label=label, level=level
)
return (
SeriesResampler(self, by=by)
if isinstance(self, cudf.Series)
else DataFrameResampler(self, by=by)
)
def dropna(
self,
axis=0,
how="any",
thresh=None,
subset=None,
inplace=False,
ignore_index: bool = False,
):
"""
Drop rows (or columns) containing nulls from a Column.
Parameters
----------
axis : {0, 1}, optional
Whether to drop rows (axis=0, default) or columns (axis=1)
containing nulls.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row (or column).
any (default) drops rows (or columns) containing at least
one null value. all drops only rows (or columns) containing
*all* null values.
thresh: int, optional
If specified, then drops every row (or column) containing
less than `thresh` non-null values
subset : list, optional
List of columns to consider when dropping rows (all columns
are considered by default). Alternatively, when dropping
columns, subset is a list of rows to consider.
inplace : bool, default False
If True, do operation inplace and return None.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
Returns
-------
Copy of the DataFrame with rows/columns containing nulls dropped.
See Also
--------
cudf.DataFrame.isna
Indicate null values.
cudf.DataFrame.notna
Indicate non-null values.
cudf.DataFrame.fillna
Replace null values.
cudf.Series.dropna
Drop null values.
cudf.Index.dropna
Drop null indices.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": ['Batmobile', None, 'Bullwhip'],
... "born": [np.datetime64("1940-04-25"),
... np.datetime64("NaT"),
... np.datetime64("NaT")]})
>>> df
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
1 Batman <NA> <NA>
2 Catwoman Bullwhip <NA>
Drop the rows where at least one element is null.
>>> df.dropna()
name toy born
0 Alfred Batmobile 1940-04-25
Drop the columns where at least one element is null.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are null.
>>> df.dropna(how='all')
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
1 Batman <NA> <NA>
2 Catwoman Bullwhip <NA>
Keep only the rows with at least 2 non-null values.
>>> df.dropna(thresh=2)
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
2 Catwoman Bullwhip <NA>
Define in which columns to look for null values.
>>> df.dropna(subset=['name', 'born'])
name toy born
0 Alfred Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
0 Alfred Batmobile 1940-04-25
"""
if axis in [0, "index"]:
result = self._drop_na_rows(how=how, subset=subset, thresh=thresh)
if ignore_index:
result.index = RangeIndex(len(result))
else:
result = self._drop_na_columns(
how=how, subset=subset, thresh=thresh
)
return self._mimic_inplace(result, inplace=inplace)
@_performance_tracking
def _drop_na_columns(self, how="any", subset=None, thresh=None):
"""
Drop columns containing nulls
"""
out_cols = []
if subset is None:
df = self
else:
df = self.take(subset)
if thresh is None:
if how == "all":
thresh = 1
else:
thresh = len(df)
for name, col in df._column_labels_and_values:
check_col = col.nans_to_nulls()
no_threshold_valid_count = (
len(col) - check_col.null_count
) < thresh
if no_threshold_valid_count:
continue
out_cols.append(name)
return self[out_cols]
def _drop_na_rows(self, how="any", subset=None, thresh=None):
"""
Drop null rows from `self`.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row.
any (default) drops rows containing at least
one null value. all drops only rows containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows.
thresh : int, optional
If specified, then drops every row containing
less than `thresh` non-null values.
"""
subset = self._preprocess_subset(subset)
if len(subset) == 0:
return self.copy(deep=True)
data_columns = [col.nans_to_nulls() for col in self._columns]
return self._from_columns_like_self(
[
ColumnBase.from_pylibcudf(col)
for col in stream_compaction.drop_nulls(
[*self.index._columns, *data_columns],
how=how,
keys=self._positions_from_column_names(subset),
thresh=thresh,
)
],
self._column_names,
self.index.names,
)
def _apply_boolean_mask(self, boolean_mask: BooleanMask, keep_index=True):
"""Apply boolean mask to each row of `self`.
Rows corresponding to `False` is dropped.
If keep_index is False, the index is not preserved.
"""
if len(boolean_mask.column) != len(self):
raise IndexError(
"Boolean mask has wrong length: "
f"{len(boolean_mask.column)} not {len(self)}"
)
return self._from_columns_like_self(
[
ColumnBase.from_pylibcudf(col)
for col in stream_compaction.apply_boolean_mask(
list(self.index._columns + self._columns)
if keep_index
else list(self._columns),
boolean_mask.column,
)
],
column_names=self._column_names,
index_names=self.index.names if keep_index else None,
)
def _pandas_repr_compatible(self) -> Self:
"""Return Self but with columns prepared for a pandas-like repr."""
result = super()._pandas_repr_compatible()
result.index = self.index._pandas_repr_compatible()
return result
def take(self, indices, axis=0):
"""Return a new frame containing the rows specified by *indices*.
Parameters
----------
indices : array-like
Array of ints indicating which positions to take.
axis : Unsupported
Returns
-------
out : Series or DataFrame
New object with desired subset of rows.
Examples
--------
**Series**
>>> s = cudf.Series(['a', 'b', 'c', 'd', 'e'])
>>> s.take([2, 0, 4, 3])
2 c
0 a
4 e
3 d
dtype: object
**DataFrame**
>>> a = cudf.DataFrame({'a': [1.0, 2.0, 3.0],
... 'b': cudf.Series(['a', 'b', 'c'])})
>>> a.take([0, 2, 2])
a b
0 1.0 a
2 3.0 c
2 3.0 c
>>> a.take([True, False, True])
a b
0 1.0 a
2 3.0 c
"""
if self._get_axis_from_axis_arg(axis) != 0:
raise NotImplementedError("Only axis=0 is supported.")
return self._gather(GatherMap(indices, len(self), nullify=False))
def _reset_index(
self,
level,
drop,
col_level=0,
col_fill="",
allow_duplicates: bool = False,
names: Hashable | Sequence[Hashable] | None = None,
):
"""Shared path for DataFrame.reset_index and Series.reset_index."""
if allow_duplicates is not False:
raise NotImplementedError(
"allow_duplicates is not currently supported."
)
elif names is not None:
raise NotImplementedError("names is not currently supported.")
if level is not None:
if (
isinstance(level, int)
and level > 0
and not isinstance(self.index, MultiIndex)
):
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
if not isinstance(level, (tuple, list)):
level = (level,)
_check_duplicate_level_names(level, self.index.names)
index = self.index._new_index_for_reset_index(level, self.index.name)
if index is None:
index = RangeIndex(len(self))
if drop:
return self._data, index
new_column_data = {}
for name, col in self.index._columns_for_reset_index(level):
if name == "index" and "index" in self._data:
name = "level_0"
name = (
tuple(
name if i == col_level else col_fill
for i in range(self._data.nlevels)
)
if self._data.multiindex
else name
)
new_column_data[name] = col
# This is to match pandas where the new data columns are always
# inserted to the left of existing data columns.
return (
ColumnAccessor(
{**new_column_data, **self._data},
self._data.multiindex,
self._data._level_names,
),
index,
)
def _first_or_last(
self, offset, idx: int, op: Callable, side: str, slice_func: Callable
) -> "IndexedFrame":
"""Shared code path for ``first`` and ``last``."""
if not isinstance(self.index, cudf.DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index.")
if not isinstance(offset, str):
raise NotImplementedError(
f"Unsupported offset type {type(offset)}."
)
if len(self) == 0:
return self.copy()
pd_offset = pd.tseries.frequencies.to_offset(offset)
to_search = op(
pd.Timestamp(self.index._column.element_indexing(idx)), pd_offset
)
if (
idx == 0
and not isinstance(pd_offset, pd.tseries.offsets.Tick)
and pd_offset.is_on_offset(pd.Timestamp(self.index[0]))
):
# Special handle is required when the start time of the index
# is on the end of the offset. See pandas gh29623 for detail.
to_search = to_search - pd_offset.base
return self.loc[:to_search]
needle = as_column(to_search, dtype=self.index.dtype)
end_point = int(
self.index._column.searchsorted(
needle, side=side
).element_indexing(0)
)
return slice_func(end_point)
def first(self, offset):
"""Select initial periods of time series data based on a date offset.
When having a DataFrame with **sorted** dates as index, this function
can select the first few rows based on a date offset.
Parameters
----------
offset: str
The offset length of the data that will be selected. For instance,
'1M' will display all rows having their index within the first
month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a ``DatetimeIndex``
Examples
--------
>>> i = cudf.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = cudf.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
"""
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, "Need to drop after pandas-3.0 support is added."
warnings.warn(
"first is deprecated and will be removed in a future version. "
"Please create a mask and filter using `.loc` instead",
FutureWarning,
)
return self._first_or_last(
offset,
idx=0,
op=operator.__add__,
side="left",
slice_func=lambda i: self.iloc[:i],
)
def last(self, offset):
"""Select final periods of time series data based on a date offset.
When having a DataFrame with **sorted** dates as index, this function
can select the last few rows based on a date offset.
Parameters
----------
offset: str
The offset length of the data that will be selected. For instance,
'3D' will display all rows having their index within the last 3
days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a ``DatetimeIndex``
Examples
--------
>>> i = cudf.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = cudf.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
"""
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, "Need to drop after pandas-3.0 support is added."
warnings.warn(
"last is deprecated and will be removed in a future version. "
"Please create a mask and filter using `.loc` instead",
FutureWarning,
)
return self._first_or_last(
offset,
idx=-1,
op=operator.__sub__,
side="right",
slice_func=lambda i: self.iloc[i:],
)
@_performance_tracking
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
ignore_index=False,
):
"""Return a random sample of items from an axis of object.
If reproducible results are required, a random number generator may be
provided via the `random_state` parameter. This function will always
produce the same sample given an identical `random_state`.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if frac = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with n.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
`replace == True` is not supported for axis = 1/"columns".
`replace == False` is not supported for axis = 0/"index" given
`random_state` is `None` or a cupy random state, and `weights` is
specified.
weights : ndarray-like, optional
Default `None` for uniform probability distribution over rows to
sample from. If `ndarray` is passed, the length of `weights` should
equal to the number of rows to sample from, and will be normalized
to have a sum of 1. Unlike pandas, index alignment is not currently
not performed.
random_state : int, numpy/cupy RandomState, or None, default None
If None, default cupy random state is chosen.
If int, the seed for the default cupy random state.
If RandomState, rows-to-sample are generated from the RandomState.
axis : {0 or `index`, 1 or `columns`, None}, default None
Axis to sample. Accepts axis number or name.
Default is stat axis for given data type
(0 for Series and DataFrames). Series doesn't support axis=1.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series or DataFrame
A new object of same type as caller containing n items
randomly sampled from the caller object.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"a":{1, 2, 3, 4, 5}})
>>> df.sample(3)
a
1 2
3 4
0 1
>>> sr = cudf.Series([1, 2, 3, 4, 5])
>>> sr.sample(10, replace=True)
1 4
3 1
2 4
0 5
0 1
4 5
4 1
0 2
0 3
3 2
dtype: int64
>>> df = cudf.DataFrame(
... {"a": [1, 2], "b": [2, 3], "c": [3, 4], "d": [4, 5]}
... )
>>> df.sample(2, axis=1)
a c
0 1 3
1 2 4
.. pandas-compat::
:meth:`pandas.DataFrame.sample`, :meth:`pandas.Series.sample`
When sampling from ``axis=0/'index'``, ``random_state`` can be
either a numpy random state (``numpy.random.RandomState``)
or a cupy random state (``cupy.random.RandomState``). When a numpy
random state is used, the output is guaranteed to match the output
of the corresponding pandas method call, but generating the sample
maybe slow. If exact pandas equivalence is not required, using a
cupy random state will achieve better performance,
especially when sampling large number of
items. It's advised to use the matching `ndarray` type to
the random state for the `weights` array.
"""
axis = 0 if axis is None else self._get_axis_from_axis_arg(axis)
size = self.shape[axis]
# Compute `n` from parameter `frac`.
if frac is None:
n = 1 if n is None else n
else:
if frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when upsampling the "
"population `frac` > 1."
)
if n is not None:
raise ValueError(
"Please enter a value for `frac` OR `n`, not both."
)
n = int(round(size * frac))
if n > 0 and size == 0:
raise ValueError(
"Cannot take a sample larger than 0 when axis is empty."
)
if isinstance(random_state, cp.random.RandomState):
lib = cp
elif isinstance(random_state, np.random.RandomState):
lib = np
else:
# Construct random state if `random_state` parameter is None or a
# seed. By default, cupy random state is used to sample rows
# and numpy is used to sample columns. This is because row data
# is stored on device, and the column objects are stored on host.
lib = cp if axis == 0 else np
random_state = lib.random.RandomState(seed=random_state)
# Normalize `weights` array.
if weights is not None:
if isinstance(weights, str):
raise NotImplementedError(
"Weights specified by string is unsupported yet."
)
if size != len(weights):
raise ValueError(
"Weights and axis to be sampled must be of same length."
)
weights = lib.asarray(weights)
weights = weights / weights.sum()
if axis == 0:
return self._sample_axis_0(
n, weights, replace, random_state, ignore_index
)
else:
if isinstance(random_state, cp.random.RandomState):
raise ValueError(
"Sampling from `axis=1`/`columns` with cupy random state"
"isn't supported."
)
return self._sample_axis_1(
n, weights, replace, random_state, ignore_index
)
def _sample_axis_0(
self,
n: int,
weights: ColumnLike | None,
replace: bool,
random_state: np.random.RandomState | cp.random.RandomState,
ignore_index: bool,
):
try:
gather_map = GatherMap.from_column_unchecked(
cast(
NumericalColumn,
as_column(
random_state.choice(
len(self), size=n, replace=replace, p=weights
)
),
),
len(self),
nullify=False,
)
except NotImplementedError as e:
raise NotImplementedError(
"Random sampling with cupy does not support these inputs."
) from e
return self._gather(gather_map, keep_index=not ignore_index)
def _sample_axis_1(
self,
n: int,
weights: ColumnLike | None,
replace: bool,
random_state: np.random.RandomState,
ignore_index: bool,
):
raise NotImplementedError(
f"Sampling from axis 1 is not implemented for {self.__class__}."
)
def _binaryop(
self,
other: Any,
op: str,
fill_value: Any = None,
can_reindex: bool = False,
*args,
**kwargs,
):
reflect, op = self._check_reflected_op(op)
(
operands,
out_index,
ca_attributes,
) = self._make_operands_and_index_for_binop(
other, op, fill_value, reflect, can_reindex
)
if operands is NotImplemented:
return NotImplemented
return self._from_data(
ColumnAccessor(
type(self)._colwise_binop(operands, op),
**ca_attributes,
),
index=out_index,
attrs=self.attrs,
)
def _make_operands_and_index_for_binop(
self,
other: Any,
fn: str,
fill_value: Any = None,
reflect: bool = False,
can_reindex: bool = False,
) -> tuple[
dict[str | None, tuple[ColumnBase, Any, bool, Any]]
| NotImplementedType,
cudf.Index | None,
dict[str, Any],
]:
raise NotImplementedError(
f"Binary operations are not supported for {self.__class__}"
)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
ret = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
fname = ufunc.__name__
if ret is not None:
return ret
# Attempt to dispatch all other functions to cupy.
cupy_func = getattr(cp, fname)
if cupy_func:
if ufunc.nin == 2:
other = inputs[self is inputs[0]]
inputs, index, _ = self._make_operands_and_index_for_binop(
other, fname
)
else:
# This works for Index too
inputs = {
name: (col, None, False, None)
for name, col in self._column_labels_and_values
}
index = self.index
data = self._apply_cupy_ufunc_to_operands(
ufunc, cupy_func, inputs, **kwargs
)
out = tuple(self._from_data(out, index=index) for out in data)
return out[0] if ufunc.nout == 1 else out
return NotImplemented
@_performance_tracking
def repeat(self, repeats, axis=None):
"""Repeats elements consecutively.
Returns a new object of caller type(DataFrame/Series) where each
element of the current object is repeated consecutively a given
number of times.
Parameters
----------
repeats : int, or array of ints
The number of repetitions for each element. This should
be a non-negative integer. Repeating 0 times will return
an empty object.
Returns
-------
Series/DataFrame
A newly created object of same type as caller
with repeated elements.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3], 'b': [10, 20, 30]})
>>> df
a b
0 1 10
1 2 20
2 3 30
>>> df.repeat(3)
a b
0 1 10
0 1 10
0 1 10
1 2 20
1 2 20
1 2 20
2 3 30
2 3 30
2 3 30
Repeat on Series
>>> s = cudf.Series([0, 2])
>>> s
0 0
1 2
dtype: int64
>>> s.repeat([3, 4])
0 0
0 0
0 0
1 2
1 2
1 2
1 2
dtype: int64
>>> s.repeat(2)
0 0
0 0
1 2
1 2
dtype: int64
"""
res = self._from_columns_like_self(
self._repeat(
[*self.index._columns, *self._columns], repeats, axis
),
self._column_names,
self._index_names,
)
if isinstance(res.index, cudf.DatetimeIndex):
res.index._freq = None
return res
def astype(
self,
dtype: Dtype | dict[Hashable, Dtype],
copy: bool | None = None,
errors: Literal["raise", "ignore"] = "raise",
) -> Self:
"""Cast the object to the given dtype.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a :class:`numpy.dtype` or Python type to cast entire DataFrame
object to the same type. Alternatively, use ``{col: dtype, ...}``,
where col is a column label and dtype is a :class:`numpy.dtype`
or Python type to cast one or more of the DataFrame's columns to
column-specific types.
copy : bool, default False
Return a deep-copy when ``copy=True``. Note by default
``copy=False`` setting is used and hence changes to
values then may propagate to other cudf objects.
errors : {'raise', 'ignore', 'warn'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original
object.
Returns
-------
DataFrame/Series
Examples
--------
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [10, 20, 30], 'b': [1, 2, 3]})
>>> df
a b
0 10 1
1 20 2
2 30 3
>>> df.dtypes
a int64
b int64
dtype: object
Cast all columns to `int32`:
>>> df.astype('int32').dtypes
a int32
b int32
dtype: object
Cast `a` to `float32` using a dictionary:
>>> df.astype({'a': 'float32'}).dtypes
a float32
b int64
dtype: object
>>> df.astype({'a': 'float32'})
a b
0 10.0 1
1 20.0 2
2 30.0 3
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2], dtype='int32')
>>> series
0 1
1 2
dtype: int32
>>> series.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> series.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = cudf.CategoricalDtype(categories=[2, 1], ordered=True)
>>> series.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` (enabled by default)
and changing data on a new Series will
propagate changes:
>>> s1 = cudf.Series([1, 2])
>>> s1
0 1
1 2
dtype: int64
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1
0 10
1 2
dtype: int64
"""
if errors not in ("ignore", "raise"):
raise ValueError("invalid error value specified")
try:
return super().astype(dtype, copy)
except Exception as e:
if errors == "raise":
raise e
return self
@_performance_tracking
def _drop_column(
self, name: Hashable, errors: Literal["ignore", "raise"] = "raise"
) -> None:
"""Drop a column by *name* inplace."""
try:
del self._data[name]
except KeyError as err:
if errors != "ignore":
raise KeyError(f"column '{name}' does not exist") from err
@_performance_tracking
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool = False,
errors: Literal["ignore", "raise"] = "raise",
) -> Self | None:
"""Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or Series
DataFrame or Series without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.reindex
Return only specified index labels of Series
Series.dropna
Return series without null values
Series.drop_duplicates
Return series with duplicate values removed
Examples
--------
**Series**
>>> s = cudf.Series([1,2,3], index=['x', 'y', 'z'])
>>> s
x 1
y 2
z 3
dtype: int64
Drop labels x and z
>>> s.drop(labels=['x', 'z'])
y 2
dtype: int64
Drop a label from the second level in MultiIndex Series.
>>> midx = cudf.MultiIndex.from_product([[0, 1, 2], ['x', 'y']])
>>> s = cudf.Series(range(6), index=midx)
>>> s
0 x 0
y 1
1 x 2
y 3
2 x 4
y 5
dtype: int64
>>> s.drop(labels='y', level=1)
0 x 0
1 x 2
2 x 4
Name: 2, dtype: int64
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({"A": [1, 2, 3, 4],
... "B": [5, 6, 7, 8],
... "C": [10, 11, 12, 13],
... "D": [20, 30, 40, 50]})
>>> df
A B C D
0 1 5 10 20
1 2 6 11 30
2 3 7 12 40
3 4 8 13 50
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 1 20
1 2 30
2 3 40
3 4 50
>>> df.drop(columns=['B', 'C'])
A D
0 1 20
1 2 30
2 3 40
3 4 50
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 3 7 12 40
3 4 8 13 50
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = cudf.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = cudf.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
if labels is not None:
if index is not None or columns is not None:
raise ValueError(
"Cannot specify both 'labels' and 'index'/'columns'"
)
target = labels
elif index is not None:
target = index
axis = 0
elif columns is not None:
target = columns
axis = 1
else:
raise ValueError(
"Need to specify at least one of 'labels', "
"'index' or 'columns'"
)
if not isinstance(inplace, bool):
raise ValueError("inplace must be a boolean")
elif inplace:
out = self
else:
out = self.copy()
if axis in (1, "columns"):
for label in _get_unique_drop_labels(target):
out._drop_column(label, errors=errors)
elif axis in (0, "index"):
dropped = _drop_rows_by_labels(out, target, level, errors)
if columns is not None:
for label in _get_unique_drop_labels(columns):
dropped._drop_column(label, errors=errors)
out._mimic_inplace(dropped, inplace=True)
if not inplace:
return out
return None
@_performance_tracking
def _explode(self, explode_column: Any, ignore_index: bool):
# Helper function for `explode` in `Series` and `Dataframe`, explodes a
# specified nested column. Other columns' corresponding rows are
# duplicated. If ignore_index is set, the original index is not
# exploded and will be replaced with a `RangeIndex`.
if not isinstance(self._data[explode_column].dtype, ListDtype):
result = self.copy()
if ignore_index:
result.index = RangeIndex(len(result))
return result
column_index = self._column_names.index(explode_column)
if not ignore_index:
idx_cols = self.index._columns
else:
idx_cols = ()
with acquire_spill_lock():
plc_table = plc.lists.explode_outer(
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in itertools.chain(idx_cols, self._columns)
]
),
column_index + len(idx_cols),
)
exploded = [
ColumnBase.from_pylibcudf(col) for col in plc_table.columns()
]
# We must copy inner datatype of the exploded list column to
# maintain struct dtype key names
element_type = cast(
ListDtype, self._columns[column_index].dtype
).element_type
column_index += len(idx_cols)
exploded = [
new_column._with_type_metadata(
element_type,
)
if i == column_index
else new_column._with_type_metadata(old_column.dtype)
for i, (new_column, old_column) in enumerate(
zip(
exploded,
itertools.chain(idx_cols, self._columns),
strict=True,
)
)
]
data = type(self._data)(
dict(
zip(self._column_names, exploded[len(idx_cols) :], strict=True)
),
multiindex=self._data.multiindex,
level_names=self._data.level_names,
rangeindex=self._data.rangeindex,
label_dtype=self._data.label_dtype,
verify=False,
)
if len(idx_cols):
index = _index_from_data(
dict(enumerate(exploded[: len(idx_cols)]))
)._copy_type_metadata(self.index)
if (
isinstance(self.index, cudf.CategoricalIndex)
and not isinstance(index, cudf.CategoricalIndex)
) or (
isinstance(self.index, cudf.MultiIndex)
and not isinstance(index, cudf.MultiIndex)
):
index = type(self.index)._from_data(index._data)
if isinstance(self.index, cudf.MultiIndex):
index.names = self.index.names
else:
index.name = self.index.name
else:
index = None
result = type(self)._from_data(data, index)
return result
@_performance_tracking
def tile(self, count: int):
"""Repeats the rows `count` times to form a new Frame.
Parameters
----------
self : input Table containing columns to interleave.
count : Number of times to tile "rows". Must be non-negative.
Examples
--------
>>> import cudf
>>> df = cudf.Dataframe([[8, 4, 7], [5, 2, 3]])
>>> count = 2
>>> df.tile(df, count)
0 1 2
0 8 4 7
1 5 2 3
0 8 4 7
1 5 2 3
Returns
-------
The indexed frame containing the tiled "rows".
"""
with acquire_spill_lock():
plc_table = plc.reshape.tile(
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in itertools.chain(
self.index._columns, self._columns
)
]
),
count,
)
tiled = [
ColumnBase.from_pylibcudf(plc) for plc in plc_table.columns()
]
return self._from_columns_like_self(
tiled,
column_names=self._column_names,
index_names=self._index_names,
)
@_performance_tracking
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=no_default,
group_keys=False,
observed=True,
dropna=True,
):
if sort is no_default:
sort = cudf.get_option("mode.pandas_compatible")
if axis not in (0, "index"):
raise NotImplementedError("axis parameter is not yet implemented")
if not observed:
raise NotImplementedError(
"observed parameter is not yet implemented"
)
if by is None and level is None:
raise TypeError(
"groupby() requires either by or level to be specified."
)
if group_keys is None:
group_keys = False
return (
self.__class__._resampler(self, by=by)
if isinstance(by, cudf.Grouper) and by.freq
else self.__class__._groupby(
self,
by=by,
level=level,
as_index=as_index,
dropna=dropna,
sort=sort,
group_keys=group_keys,
)
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Addition",
op_name="add",
equivalent_op="frame + other",
df_op_example=textwrap.dedent(
"""
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
""",
),
ser_op_example=textwrap.dedent(
"""
>>> a.add(b)
a 2
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.add(b, fill_value=0)
a 2
b 1
c 1
d 1
e <NA>
dtype: int64
"""
),
)
)
def add(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__add__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Addition",
op_name="radd",
equivalent_op="other + frame",
df_op_example=textwrap.dedent(
"""
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.radd(b)
a 2
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.radd(b, fill_value=0)
a 2
b 1
c 1
d 1
e <NA>
dtype: int64
"""
),
)
)
def radd(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__radd__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Subtraction",
op_name="sub",
equivalent_op="frame - other",
df_op_example=textwrap.dedent(
"""
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.sub(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.sub(b, fill_value=0)
a 2
b 1
c 1
d -1
e <NA>
dtype: int64
"""
),
)
)
def subtract(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__sub__", fill_value)
sub = subtract
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Subtraction",
op_name="rsub",
equivalent_op="other - frame",
df_op_example=textwrap.dedent(
"""
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rsub(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rsub(b, fill_value=0)
a 0
b -1
c -1
d 1
e <NA>
dtype: int64
"""
),
)
)
def rsub(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rsub__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Multiplication",
op_name="mul",
equivalent_op="frame * other",
df_op_example=textwrap.dedent(
"""
>>> df.multiply(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.multiply(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.multiply(b, fill_value=0)
a 1
b 0
c 0
d 0
e <NA>
dtype: int64
"""
),
)
)
def multiply(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__mul__", fill_value)
mul = multiply
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Multiplication",
op_name="rmul",
equivalent_op="other * frame",
df_op_example=textwrap.dedent(
"""
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rmul(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rmul(b, fill_value=0)
a 1
b 0
c 0
d 0
e <NA>
dtype: int64
"""
),
)
)
def rmul(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rmul__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Modulo",
op_name="mod",
equivalent_op="frame % other",
df_op_example=textwrap.dedent(
"""
>>> df.mod(1)
angles degrees
circle 0 0
triangle 0 0
rectangle 0 0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.mod(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.mod(b, fill_value=0)
a 0
b 4294967295
c 4294967295
d 0
e <NA>
dtype: int64
"""
),
)
)
def mod(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__mod__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Modulo",
op_name="rmod",
equivalent_op="other % frame",
df_op_example=textwrap.dedent(
"""
>>> df.rmod(1)
angles degrees
circle 4294967295 1
triangle 1 1
rectangle 1 1
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rmod(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rmod(b, fill_value=0)
a 0
b 0
c 0
d 4294967295
e <NA>
dtype: int64
"""
),
)
)
def rmod(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rmod__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Exponential",
op_name="pow",
equivalent_op="frame ** other",
df_op_example=textwrap.dedent(
"""
>>> df.pow(1)
angles degrees
circle 0 360
triangle 2 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.pow(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.pow(b, fill_value=0)
a 1
b 1
c 1
d 0
e <NA>
dtype: int64
"""
),
)
)
def pow(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__pow__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Exponential",
op_name="rpow",
equivalent_op="other ** frame",
df_op_example=textwrap.dedent(
"""
>>> df.rpow(1)
angles degrees
circle 1 1
triangle 1 1
rectangle 1 1
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rpow(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rpow(b, fill_value=0)
a 1
b 0
c 0
d 1
e <NA>
dtype: int64
"""
),
)
)
def rpow(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rpow__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Integer division",
op_name="floordiv",
equivalent_op="frame // other",
df_op_example=textwrap.dedent(
"""
>>> df.floordiv(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.floordiv(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.floordiv(b, fill_value=0)
a 1
b 9223372036854775807
c 9223372036854775807
d 0
e <NA>
dtype: int64
"""
),
)
)
def floordiv(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__floordiv__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Integer division",
op_name="rfloordiv",
equivalent_op="other // frame",
df_op_example=textwrap.dedent(
"""
>>> df.rfloordiv(1)
angles degrees
circle 9223372036854775807 0
triangle 0 0
rectangle 0 0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rfloordiv(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rfloordiv(b, fill_value=0)
a 1
b 0
c 0
d 9223372036854775807
e <NA>
dtype: int64
"""
),
)
)
def rfloordiv(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rfloordiv__", fill_value)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Floating division",
op_name="truediv",
equivalent_op="frame / other",
df_op_example=textwrap.dedent(
"""
>>> df.truediv(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.truediv(b)
a 1.0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: float64
>>> a.truediv(b, fill_value=0)
a 1.0
b Inf
c Inf
d 0.0
e <NA>
dtype: float64
"""
),
)
)
def truediv(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__truediv__", fill_value)
# Alias for truediv
div = truediv
divide = truediv
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Floating division",
op_name="rtruediv",
equivalent_op="other / frame",
df_op_example=textwrap.dedent(
"""
>>> df.rtruediv(1)
angles degrees
circle inf 0.002778
triangle 0.333333 0.005556
rectangle 0.250000 0.002778
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rtruediv(b)
a 1.0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: float64
>>> a.rtruediv(b, fill_value=0)
a 1.0
b 0.0
c 0.0
d Inf
e <NA>
dtype: float64
"""
),
)
)
def rtruediv(self, other, axis, level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rtruediv__", fill_value)
# Alias for rtruediv
rdiv = rtruediv
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Equal to",
op_name="eq",
equivalent_op="frame == other",
df_op_example=textwrap.dedent(
"""
>>> df.eq(1)
angles degrees
circle False False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.eq(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.eq(b, fill_value=0)
a True
b False
c False
d False
e <NA>
dtype: bool
"""
),
)
)
def eq(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__eq__", fill_value=fill_value, can_reindex=True
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Not equal to",
op_name="ne",
equivalent_op="frame != other",
df_op_example=textwrap.dedent(
"""
>>> df.ne(1)
angles degrees
circle True True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.ne(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.ne(b, fill_value=0)
a False
b True
c True
d True
e <NA>
dtype: bool
"""
),
)
)
def ne(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__ne__", fill_value=fill_value, can_reindex=True
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Less than",
op_name="lt",
equivalent_op="frame < other",
df_op_example=textwrap.dedent(
"""
>>> df.lt(1)
angles degrees
circle True False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.lt(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.lt(b, fill_value=0)
a False
b False
c False
d True
e <NA>
dtype: bool
"""
),
)
)
def lt(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__lt__", fill_value=fill_value, can_reindex=True
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Less than or equal to",
op_name="le",
equivalent_op="frame <= other",
df_op_example=textwrap.dedent(
"""
>>> df.le(1)
angles degrees
circle True False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.le(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.le(b, fill_value=0)
a True
b False
c False
d True
e <NA>
dtype: bool
"""
),
)
)
def le(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__le__", fill_value=fill_value, can_reindex=True
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Greater than",
op_name="gt",
equivalent_op="frame > other",
df_op_example=textwrap.dedent(
"""
>>> df.gt(1)
angles degrees
circle False True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.gt(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.gt(b, fill_value=0)
a False
b True
c True
d False
e <NA>
dtype: bool
"""
),
)
)
def gt(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__gt__", fill_value=fill_value, can_reindex=True
)
@_performance_tracking
@docutils.doc_apply(
doc_binop_template.format(
operation="Greater than or equal to",
op_name="ge",
equivalent_op="frame >= other",
df_op_example=textwrap.dedent(
"""
>>> df.ge(1)
angles degrees
circle False True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.ge(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.ge(b, fill_value=0)
a True
b True
c True
d False
e <NA>
dtype: bool
"""
),
)
)
def ge(self, other, axis="columns", level=None, fill_value=None):
return self._binaryop(
other=other, op="__ge__", fill_value=fill_value, can_reindex=True
)
def _preprocess_subset(self, subset) -> set[Hashable]:
if subset is None:
subset = self._column_names
elif (
is_scalar(subset)
or isinstance(subset, tuple)
and subset in self._column_names
):
subset = (subset,)
diff = set(subset) - set(self._column_names)
if len(diff) != 0:
raise KeyError(f"columns {diff} do not exist")
return subset
@_performance_tracking
def rank(
self,
axis: Literal[0, "index"] = 0,
method: Literal["average", "min", "max", "first", "dense"] = "average",
numeric_only: bool = False,
na_option: Literal["keep", "top", "bottom"] = "keep",
ascending: bool = True,
pct: bool = False,
) -> Self:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, default False
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
"""
if method not in {"average", "min", "max", "first", "dense"}:
raise KeyError(method)
method_enum = plc.aggregation.RankMethod[method.upper()]
if na_option not in {"keep", "top", "bottom"}:
raise ValueError(
"na_option must be one of 'keep', 'top', or 'bottom'"
)
if axis not in (0, "index"):
raise NotImplementedError(
f"axis must be `0`/`index`, "
f"axis={axis} is not yet supported in rank"
)
num_cols = self._num_columns
dropped_cols = False
source = self
if numeric_only:
if isinstance(source, cudf.Series) and not is_dtype_obj_numeric(
source.dtype, include_decimal=False
):
raise TypeError(
"Series.rank does not allow numeric_only=True with "
"non-numeric dtype."
)
numeric_cols = (
name
for name, dtype in self._dtypes
if is_dtype_obj_numeric(dtype, include_decimal=False)
)
source = self._get_columns_by_label(numeric_cols)
if source.empty:
return source.astype(np.dtype(np.float64))
elif source._num_columns != num_cols:
dropped_cols = True
column_order = (
plc.types.Order.ASCENDING
if ascending
else plc.types.Order.DESCENDING
)
# ascending
# #top = na_is_smallest
# #bottom = na_is_largest
# #keep = na_is_largest
# descending
# #top = na_is_largest
# #bottom = na_is_smallest
# #keep = na_is_smallest
if ascending:
if na_option == "top":
null_precedence = plc.types.NullOrder.BEFORE
else:
null_precedence = plc.types.NullOrder.AFTER
else:
if na_option == "top":
null_precedence = plc.types.NullOrder.AFTER
else:
null_precedence = plc.types.NullOrder.BEFORE
c_null_handling = (
plc.types.NullPolicy.EXCLUDE
if na_option == "keep"
else plc.types.NullPolicy.INCLUDE
)
if cudf.get_option("mode.pandas_compatible"):
source = source.nans_to_nulls()
result_columns = [
col.rank(
method=method_enum,
column_order=column_order,
null_handling=c_null_handling,
null_precedence=null_precedence,
pct=pct,
)
for col in source._columns
]
if dropped_cols:
result = type(source)._from_data(
ColumnAccessor(
dict(
zip(source._column_names, result_columns, strict=True)
),
multiindex=source._data.multiindex,
level_names=source._data.level_names,
label_dtype=source._data.label_dtype,
verify=False,
),
)
else:
result = source._from_data_like_self(
self._data._from_columns_like_self(result_columns)
)
result.index = source.index
return result.astype(np.float64)
def convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
dtype_backend=None,
) -> Self:
"""
Convert columns to the best possible nullable dtypes.
If the dtype is numeric, and consists of all integers, convert
to an appropriate integer extension type. Otherwise, convert
to an appropriate floating type.
All other dtypes are always returned as-is as all dtypes in
cudf are nullable.
"""
if not (convert_floating and convert_integer):
return self.copy()
else:
if (
cudf.get_option("mode.pandas_compatible")
and dtype_backend is None
):
raise NotImplementedError(
"The `dtype_backend` argument is not supported in "
"pandas_compatible mode."
)
cols = []
for col in self._columns:
if col.dtype.kind == "f":
col = col.fillna(0)
as_int = col.astype(np.dtype(np.int64))
if cp.allclose(col, as_int):
cols.append(as_int)
continue
cols.append(col)
return self._from_data_like_self(
self._data._from_columns_like_self(cols, verify=False)
)
@_performance_tracking
def serialize(self):
header, frames = super().serialize()
header["index"], index_frames = self.index.device_serialize()
header["index_frame_count"] = len(index_frames)
# For backwards compatibility with older versions of cuDF, index
# columns are placed before data columns.
frames = index_frames + frames
return header, frames
@classmethod
@_performance_tracking
def deserialize(cls, header, frames):
index_nframes = header["index_frame_count"]
obj = super().deserialize(
header, frames[header["index_frame_count"] :]
)
index = cls.device_deserialize(header["index"], frames[:index_nframes])
obj.index = index
return obj
@_warn_no_dask_cudf
def __dask_tokenize__(self):
from dask.base import normalize_token
return [
type(self),
str(dict(self._dtypes)),
*[
normalize_token(col.dtype.categories)
for col in self._columns
if col.dtype == "category"
],
normalize_token(self.index),
normalize_token(self.hash_values().values_host),
]
def _check_duplicate_level_names(specified, level_names):
"""Raise if any of `specified` has duplicates in `level_names`."""
if specified is None:
return
if len(set(level_names)) == len(level_names):
return
duplicates = {key for key, val in Counter(level_names).items() if val > 1}
duplicates_specified = [spec for spec in specified if spec in duplicates]
if not len(duplicates_specified) == 0:
# Note: pandas raises first encountered duplicates, cuDF raises all.
raise ValueError(
f"The names {duplicates_specified} occurs multiple times, use a"
" level number"
)
@_performance_tracking
def _get_replacement_values_for_columns(
to_replace: Any, value: Any, columns_dtype_map: dict[Any, DtypeObj]
) -> tuple[
dict[Any, bool], dict[Any, ColumnBase | list], dict[Any, ColumnBase | list]
]:
"""
Returns a per column mapping for the values to be replaced, new
values to be replaced with and if all the values are empty.
Parameters
----------
to_replace : numeric, str, list-like or dict
Contains the values to be replaced.
value : numeric, str, list-like, or dict
Contains the values to replace `to_replace` with.
columns_dtype_map : dict
A column to dtype mapping representing dtype of columns.
Returns
-------
all_na_columns : dict
A dict mapping of all columns if they contain all na values
to_replace_columns : dict
A dict mapping of all columns and the existing values that
have to be replaced.
values_columns : dict
A dict mapping of all columns and the corresponding values
to be replaced with.
"""
to_replace_columns: dict[Any, ColumnBase | list] = {}
values_columns: dict[Any, ColumnBase | list] = {}
all_na_columns: dict[Any, bool] = {}
if is_scalar(to_replace) and is_scalar(value):
to_replace_columns = {col: [to_replace] for col in columns_dtype_map}
values_columns = {col: [value] for col in columns_dtype_map}
elif is_list_like(to_replace) or isinstance(
to_replace, (ColumnBase, Index)
):
if is_scalar(value):
to_replace_columns = {col: to_replace for col in columns_dtype_map}
values_columns = {
col: [value]
if is_dtype_obj_numeric(dtype, include_decimal=False)
else as_column(
value,
length=len(to_replace),
dtype=cudf.dtype(type(value)),
)
for col, dtype in columns_dtype_map.items()
}
elif is_list_like(value) or is_column_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must be "
f"of same length."
f" Expected {len(to_replace)}, got {len(value)}."
)
else:
to_replace_columns = {
col: to_replace for col in columns_dtype_map
}
values_columns = {col: value for col in columns_dtype_map}
else:
raise TypeError(
"value argument must be scalar, list-like or Series"
)
elif _is_series(to_replace):
if value is None or value is no_default:
to_replace_columns = {
col: as_column(to_replace.index) for col in columns_dtype_map
}
values_columns = {col: to_replace for col in columns_dtype_map}
elif is_dict_like(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: value[col] for col in to_replace_columns if col in value
}
elif is_scalar(value) or _is_series(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: [value] if is_scalar(value) else value[col]
for col in to_replace_columns
if col in value
}
else:
raise ValueError(
"Series.replace cannot use dict-like to_replace and non-None "
"value"
)
elif is_dict_like(to_replace):
if value is None or value is no_default:
to_replace_columns = {
col: list(to_replace.keys()) for col in columns_dtype_map
}
values_columns = {
col: list(to_replace.values()) for col in columns_dtype_map
}
elif is_dict_like(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: value[col] for col in columns_dtype_map if col in value
}
elif is_scalar(value) or _is_series(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: [value] if is_scalar(value) else value
for col in columns_dtype_map
if col in to_replace
}
else:
raise TypeError("value argument must be scalar, dict, or Series")
else:
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"'{type(to_replace).__name__}'"
)
to_replace_columns = {
key: [value]
if is_scalar(value)
else (value if isinstance(value, list) else as_column(value))
for key, value in to_replace_columns.items()
}
values_columns = {
key: [value]
if is_scalar(value)
else (value if isinstance(value, list) else as_column(value))
for key, value in values_columns.items()
}
for i in to_replace_columns:
if i in values_columns:
if isinstance(values_columns[i], list):
val_col = cast(list, values_columns[i])
all_na = any(val is None for val in val_col)
else:
all_na = False
all_na_columns[i] = all_na
return all_na_columns, to_replace_columns, values_columns
def _is_series(obj: Any) -> bool:
"""
Checks if the `obj` is of type `cudf.Series`
instead of checking for isinstance(obj, cudf.Series)
to avoid circular imports.
"""
return isinstance(obj, IndexedFrame) and obj.ndim == 1
@_performance_tracking
def _drop_rows_by_labels(
obj: DataFrameOrSeries,
labels: ColumnLike | Iterable | str,
level: int | str,
errors: str,
) -> DataFrameOrSeries:
"""Remove rows specified by `labels`.
If `errors="raise"`, an error is raised if some items in `labels` do not
exist in `obj.index`.
Will raise if level(int) is greater or equal to index nlevels.
"""
if isinstance(level, int) and level >= obj.index.nlevels:
raise ValueError("Param level out of bounds.")
if not isinstance(labels, (cudf.Series, cudf.Index)):
labels = as_column(labels)
if isinstance(obj.index, cudf.MultiIndex):
if level is None:
level = 0
levels_index = obj.index.get_level_values(level)
if errors == "raise" and not labels.isin(levels_index).all():
raise KeyError("One or more values not found in axis")
if isinstance(level, int):
ilevel = level
else:
ilevel = obj.index.names.index(level)
# 1. Merge Index df and data df along column axis:
# | id | .index df | data column(s) |
idx_nlv = obj.index.nlevels
working_df = obj.index.to_frame(index=False)
working_df.columns = list(range(idx_nlv))
for i, col in enumerate(obj._data):
working_df[idx_nlv + i] = obj._data[col]
# 2. Set `level` as common index:
# | level | .index df w/o level | data column(s) |
working_df = working_df.set_index(level)
# 3. Use "leftanti" join to drop
# TODO: use internal API with "leftanti" and specify left and right
# join keys to bypass logic check
if isinstance(labels, ColumnBase):
join_index = cudf.Index._from_column(labels, name=level)
else:
join_index = cudf.Index(labels, name=level)
to_join = cudf.DataFrame(index=join_index)
join_res = working_df.join(to_join, how="leftanti")
# 4. Reconstruct original layout, and rename
join_res._insert(
ilevel, name=join_res.index.name, value=join_res.index
)
midx = cudf.MultiIndex.from_frame(
join_res.iloc[:, 0:idx_nlv], names=obj.index.names
)
if isinstance(obj, cudf.Series):
return obj.__class__._from_data(
join_res.iloc[:, idx_nlv:]._data,
index=midx,
name=obj.name,
attrs=obj.attrs,
)
else:
return obj.__class__._from_data(
join_res.iloc[:, idx_nlv:]._data,
index=midx,
columns=obj._data.to_pandas_index,
attrs=obj.attrs,
)
else:
orig_index_type = obj.index.dtype
if errors == "raise" and not labels.isin(obj.index).all():
raise KeyError("One or more values not found in axis")
if isinstance(labels, ColumnBase):
idx = cudf.Index._from_column(labels, name=obj.index.name)
else:
idx = cudf.Index(labels, name=labels.name)
key_df = cudf.DataFrame._from_data(data={}, index=idx)
if isinstance(obj, cudf.DataFrame):
res = obj.join(key_df, how="leftanti")
else:
res = obj.to_frame(name="tmp").join(key_df, how="leftanti")["tmp"]
res.name = obj.name
# Join changes the index to common type,
# but we need to preserve the type of
# index being returned, Hence this type-cast.
res.index = res.index.astype(orig_index_type)
res._attrs = obj.attrs
return res
def _is_same_dtype(lhs_dtype, rhs_dtype):
# Utility specific to `_reindex` to check
# for matching column dtype.
if lhs_dtype == rhs_dtype:
return True
elif (
isinstance(lhs_dtype, cudf.CategoricalDtype)
and isinstance(rhs_dtype, cudf.CategoricalDtype)
and lhs_dtype.categories.dtype == rhs_dtype.categories.dtype
):
# OK if categories are not all the same
return True
elif (
isinstance(lhs_dtype, cudf.CategoricalDtype)
and not isinstance(rhs_dtype, cudf.CategoricalDtype)
and lhs_dtype.categories.dtype == rhs_dtype
):
return True
elif (
isinstance(rhs_dtype, cudf.CategoricalDtype)
and not isinstance(lhs_dtype, cudf.CategoricalDtype)
and rhs_dtype.categories.dtype == lhs_dtype
):
return True
else:
return False
def _append_new_row_inplace(col: ColumnBase, value: ScalarLike) -> None:
"""Append a scalar `value` to the end of `col` inplace.
Cast to common type if possible
"""
val_col = as_column(
value,
dtype=col.dtype
if (
cudf.utils.utils._is_null_host_scalar(value)
or value in {None, np.nan}
)
else None,
)
if val_col.dtype.kind != "f" and val_col.can_cast_safely(col.dtype):
# If the value can be cast to the column dtype, do so
val_col = val_col.astype(col.dtype)
to_type = col.dtype
else:
if (
cudf.get_option("mode.pandas_compatible")
and is_pandas_nullable_extension_dtype(col.dtype)
and val_col.dtype.kind == "f"
):
# If the column is a pandas nullable extension type, we need to
# convert the nans to a nullable type as well.
val_col = val_col.nans_to_nulls()
if len(val_col) == val_col.null_count:
# If the column is all nulls, we can use the column dtype
# to avoid unnecessary casting.
val_col = val_col.astype(col.dtype)
to_type = find_common_type([val_col.dtype, col.dtype])
if (
cudf.get_option("mode.pandas_compatible")
and is_string_dtype(to_type)
and is_mixed_with_object_dtype(val_col, col)
):
raise MixedTypeError("Cannot append mixed types")
if cudf.get_option(
"mode.pandas_compatible"
) and val_col.can_cast_safely(col.dtype):
to_type = col.dtype
val_col = val_col.astype(to_type)
old_col = col.astype(to_type)
res_col = concat_columns([old_col, val_col])._with_type_metadata(to_type)
if (
cudf.get_option("mode.pandas_compatible")
and res_col.dtype != col.dtype
and isinstance(col, CategoricalColumn)
):
raise MixedTypeError(
"Cannot append mixed types: "
f"Column dtype {col.dtype} is not compatible with {res_col.dtype}"
)
col._mimic_inplace(res_col, inplace=True)
| IndexedFrame |
python | python-poetry__poetry | src/poetry/factory.py | {
"start": 1348,
"end": 12367
} | class ____(BaseFactory):
"""
Factory class to create various elements needed by Poetry.
"""
def _ensure_valid_poetry_version(self, cwd: Path | None) -> None:
poetry_file = self.locate(cwd)
pyproject = PyProjectTOML(path=poetry_file)
poetry_config = pyproject.data.get("tool", {}).get("poetry", {})
if version_str := poetry_config.get("requires-poetry"):
version_constraint = parse_constraint(version_str)
version = Version.parse(__version__)
if not version_constraint.allows(version):
raise PoetryError(
f"This project requires Poetry {version_constraint},"
f" but you are using Poetry {version}"
)
def create_poetry(
self,
cwd: Path | None = None,
with_groups: bool = True,
io: IO | None = None,
disable_plugins: bool = False,
disable_cache: bool = False,
) -> Poetry:
if io is None:
io = NullIO()
self._ensure_valid_poetry_version(cwd)
base_poetry = super().create_poetry(cwd=cwd, with_groups=with_groups)
poetry_file = base_poetry.pyproject_path
locker = Locker(poetry_file.parent / "poetry.lock", base_poetry.pyproject.data)
# Loading global configuration
config = Config.create()
# Loading local configuration
local_config_file = TOMLFile(poetry_file.parent / "poetry.toml")
if local_config_file.exists():
if io.is_debug():
io.write_line(f"Loading configuration file {local_config_file.path}")
config.merge(local_config_file.read())
# Load local sources
repositories = {}
existing_repositories = config.get("repositories", {})
for source in base_poetry.local_config.get("source", []):
name = source.get("name")
url = source.get("url")
if name and url and name not in existing_repositories:
repositories[name] = {"url": url}
config.merge({"repositories": repositories})
poetry = Poetry(
poetry_file,
base_poetry.local_config,
base_poetry.package,
locker,
config,
disable_cache,
)
poetry.set_pool(
self.create_pool(
config,
poetry.local_config.get("source", []),
io,
disable_cache=disable_cache,
)
)
if not disable_plugins:
plugin_manager = PluginManager(Plugin.group)
plugin_manager.load_plugins()
plugin_manager.activate(poetry, io)
return poetry
@classmethod
def create_pool(
cls,
config: Config,
sources: Iterable[dict[str, Any]] = (),
io: IO | None = None,
disable_cache: bool = False,
) -> RepositoryPool:
from poetry.repositories import RepositoryPool
from poetry.repositories.repository_pool import Priority
if io is None:
io = NullIO()
if disable_cache:
logger.debug("Disabling source caches")
pool = RepositoryPool(config=config)
explicit_pypi = False
for source in sources:
repository = cls.create_package_source(
source, config, disable_cache=disable_cache
)
priority = Priority[source.get("priority", Priority.PRIMARY.name).upper()]
if io.is_debug():
io.write_line(
f"Adding repository {repository.name} ({repository.url})"
f" and setting it as {priority.name.lower()}"
)
pool.add_repository(repository, priority=priority)
if repository.name.lower() == "pypi":
explicit_pypi = True
# Only add PyPI if no primary repository is configured
if not explicit_pypi:
if pool.has_primary_repositories():
if io.is_debug():
io.write_line("Deactivating the PyPI repository")
else:
pool.add_repository(
cls.create_package_source(
{"name": "pypi"}, config, disable_cache=disable_cache
),
priority=Priority.PRIMARY,
)
if not pool.repositories:
raise PoetryError(
"At least one source must not be configured as 'explicit'."
)
return pool
@classmethod
def create_package_source(
cls, source: dict[str, str], config: Config, disable_cache: bool = False
) -> HTTPRepository:
from poetry.repositories.exceptions import InvalidSourceError
from poetry.repositories.legacy_repository import LegacyRepository
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.repositories.single_page_repository import SinglePageRepository
try:
name = source["name"]
except KeyError:
raise InvalidSourceError("Missing [name] in source.")
pool_size = config.installer_max_workers
if name.lower() == "pypi":
if "url" in source:
raise InvalidSourceError(
"The PyPI repository cannot be configured with a custom url."
)
return PyPiRepository(
config=config,
disable_cache=disable_cache,
pool_size=pool_size,
)
try:
url = source["url"]
except KeyError:
raise InvalidSourceError(f"Missing [url] in source {name!r}.")
repository_class = LegacyRepository
if re.match(r".*\.(htm|html)$", url):
repository_class = SinglePageRepository
return repository_class(
name,
url,
config=config,
disable_cache=disable_cache,
pool_size=pool_size,
)
@classmethod
def create_legacy_pyproject_from_package(cls, package: Package) -> TOMLDocument:
import tomlkit
from poetry.utils.dependency_specification import dependency_to_specification
pyproject: dict[str, Any] = tomlkit.document()
pyproject["tool"] = tomlkit.table(is_super_table=True)
content: dict[str, Any] = tomlkit.table()
pyproject["tool"]["poetry"] = content
content["name"] = package.name
content["version"] = package.version.text
content["description"] = package.description
content["authors"] = package.authors
content["license"] = package.license.id if package.license else ""
if package.classifiers:
content["classifiers"] = package.classifiers
if package.documentation_url:
content["documentation"] = package.documentation_url
if package.repository_url:
content["repository"] = package.repository_url
if package.homepage:
content["homepage"] = package.homepage
if package.maintainers:
content["maintainers"] = package.maintainers
if package.keywords:
content["keywords"] = package.keywords
readmes = []
for readme in package.readmes:
readme_posix_path = readme.as_posix()
with contextlib.suppress(ValueError):
if package.root_dir:
readme_posix_path = readme.relative_to(package.root_dir).as_posix()
readmes.append(readme_posix_path)
if readmes:
content["readme"] = readmes
optional_dependencies = set()
extras_section = None
if package.extras:
extras_section = tomlkit.table()
for extra in package.extras:
_dependencies = []
for dependency in package.extras[extra]:
_dependencies.append(dependency.name)
optional_dependencies.add(dependency.name)
extras_section[extra] = _dependencies
optional_dependencies = set(optional_dependencies)
dependency_section = content["dependencies"] = tomlkit.table()
dependency_section["python"] = package.python_versions
for dep in package.all_requires:
constraint: DependencySpec | str = dependency_to_specification(
dep, tomlkit.inline_table()
)
if not isinstance(constraint, str):
if dep.name in optional_dependencies:
constraint["optional"] = True
if len(constraint) == 1 and "version" in constraint:
assert isinstance(constraint["version"], str)
constraint = constraint["version"]
elif not constraint:
constraint = "*"
for group in dep.groups:
if group == MAIN_GROUP:
dependency_section[dep.name] = constraint
else:
if "group" not in content:
content["group"] = tomlkit.table(is_super_table=True)
if group not in content["group"]:
content["group"][group] = tomlkit.table(is_super_table=True)
if "dependencies" not in content["group"][group]:
content["group"][group]["dependencies"] = tomlkit.table()
content["group"][group]["dependencies"][dep.name] = constraint
if extras_section:
content["extras"] = extras_section
pyproject = cast("TOMLDocument", pyproject)
return pyproject
@classmethod
def validate(
cls, toml_data: dict[str, Any], strict: bool = False
) -> dict[str, list[str]]:
results = super().validate(toml_data, strict)
poetry_config = toml_data["tool"]["poetry"]
results["errors"].extend(
[e.replace("data.", "tool.poetry.") for e in validate_object(poetry_config)]
)
# A project should not depend on itself.
# TODO: consider [project.dependencies] and [project.optional-dependencies]
dependencies = set(poetry_config.get("dependencies", {}).keys())
dependencies.update(poetry_config.get("dev-dependencies", {}).keys())
groups = poetry_config.get("group", {}).values()
for group in groups:
dependencies.update(group.get("dependencies", {}).keys())
dependencies = {canonicalize_name(d) for d in dependencies}
project_name = toml_data.get("project", {}).get("name") or poetry_config.get(
"name"
)
if project_name is not None and canonicalize_name(project_name) in dependencies:
results["errors"].append(
f"Project name ({project_name}) is same as one of its dependencies"
)
return results
| Factory |
python | numba__llvmlite | llvmlite/tests/customize.py | {
"start": 8097,
"end": 8442
} | class ____(runner.TextTestRunner):
resultclass = RefleakTestResult
def _flatten_suite(test):
"""Expand suite into list of tests
"""
if isinstance(test, unittest.TestSuite):
tests = []
for x in test:
tests.extend(_flatten_suite(x))
return tests
else:
return [test]
| RefleakTestRunner |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/_line.py | {
"start": 233,
"end": 5274
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.line"
_valid_props = {"color", "dash", "smoothing", "width"}
@property
def color(self):
"""
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def smoothing(self):
"""
Sets the amount of smoothing for the contour lines, where 0
corresponds to no smoothing.
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
@property
def width(self):
"""
Sets the contour line width in (in px)
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px)
"""
def __init__(
self, arg=None, color=None, dash=None, smoothing=None, width=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2dcontour.Line`
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px)
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("smoothing", arg, smoothing)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/tasks.py | {
"start": 26494,
"end": 29756
} | class ____(GoogleCloudBaseOperator):
"""
Resumes a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueResumeOperator`
:param location: The location name in which the queue will be resumed.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.resume_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
| CloudTasksQueueResumeOperator |
python | pypa__setuptools | setuptools/_vendor/jaraco/context.py | {
"start": 8550,
"end": 9552
} | class ____(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(self, action='error', /, code=1):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'
| on_interrupt |
python | tiangolo__fastapi | tests/test_filter_pydantic_sub_model/app_pv1.py | {
"start": 127,
"end": 172
} | class ____(BaseModel):
username: str
| ModelB |
python | bokeh__bokeh | src/bokeh/models/layouts.py | {
"start": 14539,
"end": 15237
} | class ____(LayoutDOM, GridCommon):
""" A CSS grid-based grid container. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
children = List(Either(
Tuple(Instance(UIElement), Int, Int),
Tuple(Instance(UIElement), Int, Int, Int, Int)), default=[], help="""
A list of children with their associated position in the grid (row, column).
""")
@error(REPEATED_LAYOUT_CHILD)
def _check_repeated_layout_children(self):
children = [ child[0] for child in self.children ]
if len(children) != len(set(children)):
return str(self)
| GridBox |
python | streamlit__streamlit | lib/streamlit/runtime/caching/storage/local_disk_cache_storage.py | {
"start": 3518,
"end": 4531
} | class ____(CacheStorageManager):
def create(self, context: CacheStorageContext) -> CacheStorage:
"""Creates a new cache storage instance wrapped with in-memory cache layer."""
persist_storage = LocalDiskCacheStorage(context)
return InMemoryCacheStorageWrapper(
persist_storage=persist_storage, context=context
)
def clear_all(self) -> None:
cache_path = get_cache_folder_path()
if os.path.isdir(cache_path):
shutil.rmtree(cache_path)
def check_context(self, context: CacheStorageContext) -> None:
if (
context.persist == "disk"
and context.ttl_seconds is not None
and not math.isinf(context.ttl_seconds)
):
_LOGGER.warning(
"The cached function '%s' has a TTL that will be ignored. "
"Persistent cached functions currently don't support TTL.",
context.function_display_name,
)
| LocalDiskCacheStorageManager |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 1457,
"end": 1499
} | class ____(A[int], Generic[T]):
var: T
| D |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/filesystem.py | {
"start": 270,
"end": 3533
} | class ____(Completer):
"""
Complete for Path variables.
:param get_paths: Callable which returns a list of directories to look into
when the user enters a relative path.
:param file_filter: Callable which takes a filename and returns whether
this file should show up in the completion. ``None``
when no filtering has to be done.
:param min_input_len: Don't do autocompletion when the input string is shorter.
"""
def __init__(
self,
only_directories: bool = False,
get_paths: Callable[[], list[str]] | None = None,
file_filter: Callable[[str], bool] | None = None,
min_input_len: int = 0,
expanduser: bool = False,
) -> None:
self.only_directories = only_directories
self.get_paths = get_paths or (lambda: ["."])
self.file_filter = file_filter or (lambda _: True)
self.min_input_len = min_input_len
self.expanduser = expanduser
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
text = document.text_before_cursor
# Complete only when we have at least the minimal input length,
# otherwise, we can too many results and autocompletion will become too
# heavy.
if len(text) < self.min_input_len:
return
try:
# Do tilde expansion.
if self.expanduser:
text = os.path.expanduser(text)
# Directories where to look.
dirname = os.path.dirname(text)
if dirname:
directories = [
os.path.dirname(os.path.join(p, text)) for p in self.get_paths()
]
else:
directories = self.get_paths()
# Start of current file.
prefix = os.path.basename(text)
# Get all filenames.
filenames = []
for directory in directories:
# Look for matches in this directory.
if os.path.isdir(directory):
for filename in os.listdir(directory):
if filename.startswith(prefix):
filenames.append((directory, filename))
# Sort
filenames = sorted(filenames, key=lambda k: k[1])
# Yield them.
for directory, filename in filenames:
completion = filename[len(prefix) :]
full_name = os.path.join(directory, filename)
if os.path.isdir(full_name):
# For directories, add a slash to the filename.
# (We don't add them to the `completion`. Users can type it
# to trigger the autocompletion themselves.)
filename += "/"
elif self.only_directories:
continue
if not self.file_filter(full_name):
continue
yield Completion(
text=completion,
start_position=0,
display=filename,
)
except OSError:
pass
| PathCompleter |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 50405,
"end": 50879
} | class ____:
"""Global info about a Python string constant held by GlobalState.
"""
# cname string
# encoding string
# intern boolean
# is_unicode boolean
def __init__(self, cname, encoding, intern=False, is_unicode=False):
self.cname = cname
self.encoding = encoding
self.is_unicode = is_unicode
self.intern = intern
def __lt__(self, other):
return self.cname < other.cname
| PyStringConst |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 766,
"end": 961
} | class ____:
TABSTOP = 'tabstop'
PLACEHOLDER = 'placeholder'
CHOICE = 'choice'
VARIABLE = 'variable'
VARIABLE_PLACEHOLDER = 'variable_placeholder'
REGEX = 'regex'
| SnippetKind |
python | apache__airflow | devel-common/src/sphinx_exts/docroles.py | {
"start": 1009,
"end": 3512
} | class ____(Exception):
"""Exception for roles extension"""
def get_template_field(env, fullname) -> list[str]:
"""
Gets template fields for specific operator class.
:param env: env config
:param fullname: Full path to operator class.
For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator``
:return: List of template field
"""
modname, classname = fullname.rsplit(".", 1)
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
clazz = getattr(mod, classname)
if not clazz:
raise RoleException(f"Error finding {classname} class in {modname} module.")
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(f"Could not find the template fields for {classname} class in {modname} module.")
return list(template_fields)
def template_field_role(
app,
typ,
rawtext,
text,
lineno,
inliner,
options=None,
content=None,
):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.operators.bash.BashOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error(
f"invalid class name {text} \n{e}",
line=lineno,
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], []
def setup(app):
"""Sets the extension up"""
from docutils.parsers.rst import roles
roles.register_local_role("template-fields", partial(template_field_role, app))
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
| RoleException |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 21059,
"end": 21261
} | class ____(TypedDict):
ids: IDs
embeddings: Embeddings
metadatas: Optional[Metadatas]
documents: Optional[Documents]
uris: Optional[URIs]
# Upsert result doesn't exist.
| UpsertRequest |
python | django-haystack__django-haystack | test_haystack/elasticsearch5_tests/test_inputs.py | {
"start": 77,
"end": 3508
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.query_obj = connections["elasticsearch"].get_query()
def test_raw_init(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {})
self.assertEqual(raw.post_process, False)
raw = inputs.Raw("hello OR there, :you", test="really")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {"test": "really"})
self.assertEqual(raw.post_process, False)
def test_raw_prepare(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.prepare(self.query_obj), "hello OR there, :you")
def test_clean_init(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.query_string, "hello OR there, :you")
self.assertEqual(clean.post_process, True)
def test_clean_prepare(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.prepare(self.query_obj), "hello or there, \\:you")
def test_exact_init(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.query_string, "hello OR there, :you")
self.assertEqual(exact.post_process, True)
def test_exact_prepare(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.prepare(self.query_obj), '"hello OR there, :you"')
exact = inputs.Exact("hello OR there, :you", clean=True)
self.assertEqual(exact.prepare(self.query_obj), '"hello or there, \\:you"')
def test_not_init(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.query_string, "hello OR there, :you")
self.assertEqual(not_it.post_process, True)
def test_not_prepare(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.prepare(self.query_obj), "NOT (hello or there, \\:you)")
def test_autoquery_init(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.query_string, 'panic -don\'t "froody dude"')
self.assertEqual(autoquery.post_process, False)
def test_autoquery_prepare(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(
autoquery.prepare(self.query_obj), 'panic NOT don\'t "froody dude"'
)
def test_altparser_init(self):
altparser = inputs.AltParser("dismax")
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "")
self.assertEqual(altparser.kwargs, {})
self.assertEqual(altparser.post_process, False)
altparser = inputs.AltParser("dismax", "douglas adams", qf="author", mm=1)
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "douglas adams")
self.assertEqual(altparser.kwargs, {"mm": 1, "qf": "author"})
self.assertEqual(altparser.post_process, False)
def test_altparser_prepare(self):
altparser = inputs.AltParser("dismax", "douglas adams", qf="author", mm=1)
self.assertEqual(
altparser.prepare(self.query_obj),
"""{!dismax mm=1 qf=author v='douglas adams'}""",
)
| Elasticsearch5InputTestCase |
python | huggingface__transformers | src/transformers/models/ernie4_5/modular_ernie4_5.py | {
"start": 4896,
"end": 5630
} | class ____(LlamaForCausalLM):
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5ForCausalLM",
"Ernie4_5Model", # noqa: F822
"Ernie4_5PreTrainedModel", # noqa: F822
]
| Ernie4_5ForCausalLM |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 23907,
"end": 24034
} | class ____(DagsterUserCodeExecutionError):
"""Error raised during the execution of a user-defined hook."""
| HookExecutionError |
python | oauthlib__oauthlib | tests/oauth1/rfc5849/endpoints/test_signature_only.py | {
"start": 237,
"end": 1957
} | class ____(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.check_client_key.return_value = True
self.validator.allowed_signature_methods = ['HMAC-SHA1']
self.validator.get_client_secret.return_value = 'bar'
self.validator.timestamp_lifetime = 600
self.validator.validate_client_key.return_value = True
self.validator.validate_timestamp_and_nonce.return_value = True
self.validator.dummy_client = 'dummy'
self.validator.dummy_secret = 'dummy'
self.endpoint = SignatureOnlyEndpoint(self.validator)
self.client = Client('foo', client_secret='bar')
self.uri, self.headers, self.body = self.client.sign(
'https://i.b/protected_resource')
def test_missing_parameters(self):
v, r = self.endpoint.validate_request(
self.uri)
self.assertFalse(v)
def test_validate_client_key(self):
self.validator.validate_client_key.return_value = False
v, r = self.endpoint.validate_request(
self.uri, headers=self.headers)
self.assertFalse(v)
def test_validate_signature(self):
client = Client('foo')
_, headers, _ = client.sign(self.uri + '/extra')
v, r = self.endpoint.validate_request(
self.uri, headers=headers)
self.assertFalse(v)
def test_valid_request(self):
v, r = self.endpoint.validate_request(
self.uri, headers=self.headers)
self.assertTrue(v)
self.validator.validate_timestamp_and_nonce.assert_called_once_with(
self.client.client_key, ANY, ANY, ANY)
| SignatureOnlyEndpointTest |
python | marshmallow-code__marshmallow | performance/benchmark.py | {
"start": 1609,
"end": 3859
} | class ____:
def __init__(
self,
id,
author,
content,
posted_at,
book_name,
page_number,
line_number,
col_number,
):
self.id = id
self.author = author
self.content = content
self.posted_at = posted_at
self.book_name = book_name
self.page_number = page_number
self.line_number = line_number
self.col_number = col_number
def run_timeit(quotes, iterations, repeat, *, profile=False):
quotes_schema = QuoteSchema(many=True)
if profile:
profile = cProfile.Profile()
profile.enable()
gc.collect()
best = min(
timeit.repeat(
lambda: quotes_schema.dump(quotes),
"gc.enable()",
number=iterations,
repeat=repeat,
)
)
if profile:
profile.disable()
profile.dump_stats("marshmallow.pprof")
return best * 1e6 / iterations / len(quotes)
def main():
parser = argparse.ArgumentParser(description="Runs a benchmark of Marshmallow.")
parser.add_argument(
"--iterations",
type=int,
default=1000,
help="Number of iterations to run per test.",
)
parser.add_argument(
"--repeat",
type=int,
default=5,
help="Number of times to repeat the performance test. The minimum will "
"be used.",
)
parser.add_argument(
"--object-count", type=int, default=20, help="Number of objects to dump."
)
parser.add_argument(
"--profile",
action="store_true",
help="Whether or not to profile marshmallow while running the benchmark.",
)
args = parser.parse_args()
quotes = [
Quote(
i,
Author(i, "Foo", "Bar", 42, 66, "123 Fake St"),
"Hello World",
datetime.datetime(2019, 7, 4, tzinfo=datetime.timezone.utc),
"The World",
34,
3,
70,
)
for i in range(args.object_count)
]
print(
f"Benchmark Result: {run_timeit(quotes, args.iterations, args.repeat, profile=args.profile):.2f} usec/dump"
)
if __name__ == "__main__":
main()
| Quote |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/event/base.py | {
"start": 13914,
"end": 14905
} | class ____(Generic[_ET]):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events: Type[_HasEventsDispatch[_ET]]):
self.dispatch = events.dispatch
self.events = events
@overload
def __get__(
self, obj: Literal[None], cls: Type[Any]
) -> Type[_Dispatch[_ET]]: ...
@overload
def __get__(self, obj: Any, cls: Type[Any]) -> _DispatchCommon[_ET]: ...
def __get__(self, obj: Any, cls: Type[Any]) -> Any:
if obj is None:
return self.dispatch
disp = self.dispatch._for_instance(obj)
try:
obj.__dict__["dispatch"] = disp
except AttributeError as ae:
raise TypeError(
"target %r doesn't have __dict__, should it be "
"defining _slots_dispatch?" % (obj,)
) from ae
return disp
| dispatcher |
python | allegroai__clearml | clearml/automation/trigger.py | {
"start": 3379,
"end": 4201
} | class ____(BaseTrigger):
_task_param = "${model.id}"
_key = "models"
_update_field = "last_update"
_change_field = "last_change"
on_publish = attrib(type=bool, default=None)
on_archive = attrib(type=bool, default=None)
def build_query(self, ref_time: datetime, client: Optional[APIClient] = None) -> dict:
query = super(ModelTrigger, self).build_query(ref_time, client)
if self.on_publish:
query.update({"ready": True})
if self.on_archive:
system_tags = list(set(query.get("system_tags", []) + ["archived"]))
query.update({"system_tags": system_tags})
return query
@property
def _only_fields(self) -> Set[str]:
return {"id", "name", "ready", "tags", self._update_field, self._change_field}
@attrs
| ModelTrigger |
python | jazzband__django-oauth-toolkit | oauth2_provider/models.py | {
"start": 16970,
"end": 17111
} | class ____(AbstractAccessToken):
class Meta(AbstractAccessToken.Meta):
swappable = "OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL"
| AccessToken |
python | pytorch__pytorch | test/inductor/test_utils.py | {
"start": 459,
"end": 8078
} | class ____(TestCase):
def test_zip_schema(self):
def foo(x: torch.Tensor) -> None:
pass
result = torch.library.custom_op("mylib::foo", foo, mutates_args={"x"})
schema = result._opoverload._schema
g = torch.tensor([11, 2])
found = False
for arg, val in torch._library.utils.zip_schema(schema, [], {"x": g}):
if arg.name == "x":
found = True
self.assertTrue(found)
found = False
for arg, val in torch._library.utils.zip_schema(schema, [g], {}):
if arg.name == "x":
found = True
self.assertTrue(found)
def testSympySubs(self):
# integer and nonnegetaive attributes are preserved.
expr = Symbol("x")
result = sympy_subs(expr, {expr: "y"})
self.assertEqual(result.name, "y")
self.assertEqual(result.is_integer, None)
self.assertEqual(result.is_nonnegative, None)
expr = Symbol("x", integer=True, nonnegative=False)
result = sympy_subs(expr, {expr: "y"})
self.assertEqual(result.name, "y")
self.assertEqual(result.is_integer, True)
self.assertEqual(result.is_nonnegative, False)
# invalid replacement.
expr = Symbol("x", integer=True)
result = sympy_subs(expr, {Symbol("x"): Symbol("y")})
self.assertEqual(result.name, "x")
# valid replacement since properties match.
expr = Symbol("x", integer=True)
result = sympy_subs(expr, {Symbol("x", integer=True): Symbol("y")})
self.assertEqual(result.name, "y")
# invalid replacement.
expr = Symbol("x", integer=None)
result = sympy_subs(expr, {Symbol("x", integer=False): Symbol("y")})
self.assertEqual(result.name, "x")
# replaced can't be string
self.assertRaises(AssertionError, sympy_subs, expr, {"x": "y"})
# replaced can be an expression
expr = Symbol("x")
expr = abs(expr)
self.assertEqual(expr.is_integer, None)
self.assertEqual(expr.is_nonnegative, None)
# replace abs(x) with y
# propagate abs(x) sympy properties.
result = sympy_subs(expr, {expr: Symbol("y")})
self.assertEqual(result.name, "y")
self.assertEqual(result.is_integer, None)
self.assertEqual(result.is_nonnegative, None)
def test_sympy_str(self):
self.assertEqual(sympy_str(sympify("a+b+c")), "a + b + c")
self.assertEqual(sympy_str(sympify("a*b+c")), "c + a * b")
self.assertEqual(sympy_str(sympify("a+b*(c+d)")), "a + b * (c + d)")
self.assertEqual(sympy_str(sympify("(a+b)*(c+d)")), "(a + b) * (c + d)")
self.assertEqual(sympy_str(sympify("-a")), "-a")
self.assertEqual(sympy_str(sympify("a-b")), "a - b")
self.assertEqual(sympy_str(sympify("a+-b")), "a - b")
def test_flops_fx(self):
def create_fx_node(
aten, op_overload: torch._ops.OpOverload, args, kwargs
) -> tuple[torch.fx.Node, torch.fx.Node]:
node1 = torch.fx.Node(
graph=torch.fx.Graph(),
name="",
op="call_function",
target=aten,
args=args,
kwargs=kwargs,
)
# name: str = aten.overloads()[0]
# if aten == torch.ops.aten.addmm:
# name = "default"
# print(aten)
# print(aten.overloads())
# print(name)
# op_overload: torch._ops.OpOverload = getattr(aten, name)
node2 = torch.fx.Node(
graph=torch.fx.Graph(),
name="",
op="call_function",
target=op_overload,
args=args,
kwargs=kwargs,
)
return node1, node2
with V.set_fake_mode(
torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
):
trues = [
(
torch.ops.aten.addmm,
torch.ops.aten.addmm.default,
(torch.Tensor(4, 4), torch.Tensor(4, 5), torch.Tensor(5, 4)),
{},
),
(
torch.ops.aten.bmm,
torch.ops.aten.bmm.default,
(torch.Tensor(10, 4, 5), torch.Tensor(10, 5, 4)),
{},
),
(
torch.ops.aten.mm,
torch.ops.aten.mm.default,
(torch.Tensor(2, 3), torch.Tensor(3, 2)),
{},
),
(
torch.ops.aten.convolution,
torch.ops.aten.convolution.default,
(
torch.Tensor(2, 2, 3),
torch.Tensor(2, 2, 2),
torch.Tensor(2),
(1,),
(0,),
(1,),
True,
(0,),
1,
),
{},
),
(
torch.ops.aten._convolution,
torch.ops.aten._convolution.deprecated,
(
torch.Tensor(2, 2, 2),
torch.Tensor(2, 2, 2),
torch.Tensor(2),
(1,),
(0,),
(1,),
True,
(0,),
1,
False,
True,
False,
),
{},
),
]
# we don't support pointwise ops
falses = [
(
torch.ops.aten.add,
torch.ops.aten.add.Tensor,
(torch.Tensor(1, 2, 3), torch.Tensor(1, 2, 3)),
{},
),
(
torch.ops.aten.mul,
torch.ops.aten.mul.Tensor,
(torch.Tensor(1, 2, 3), torch.Tensor(1, 2, 3)),
{},
),
]
for t, t2, args, kwargs in trues:
fx_node_1, fx_node_2 = create_fx_node(t, t2, args, kwargs)
self.assertTrue(
countable_fx(fx_node_1), f"Expected true {t}: {fx_node_1}"
)
self.assertTrue(
countable_fx(fx_node_2), f"Expected true {t}: {fx_node_2}"
)
self.assertNotEqual(count_flops_fx(fx_node_1), None)
self.assertNotEqual(count_flops_fx(fx_node_2), None)
for f, f2, args, kwargs in falses:
fx_node_1, fx_node_2 = create_fx_node(f, f2, args, kwargs)
self.assertFalse(
countable_fx(fx_node_1), f"Expected false {f}: {fx_node_1}"
)
self.assertFalse(
countable_fx(fx_node_2), f"Expected false {f}: {fx_node_2}"
)
@unittest.skipIf(not torch.cuda.is_available(), "skip if no device")
@dtypes(torch.float16, torch.bfloat16, torch.float32)
def test_get_device_tflops(self, dtype):
ret = get_device_tflops(dtype)
self.assertTrue(type(ret) is float)
instantiate_device_type_tests(TestUtils, globals(), allow_xpu=True)
if __name__ == "__main__":
run_tests()
| TestUtils |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 8850,
"end": 8998
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
did_save: bool = False
@dataclasses.dataclass(frozen=True)
| TextDocumentSyncClientCapabilities |
python | huggingface__transformers | src/transformers/models/hiera/modeling_hiera.py | {
"start": 21348,
"end": 24716
} | class ____(GradientCheckpointingLayer):
def __init__(
self,
config,
depth: int,
hidden_size: int,
hidden_size_output: int,
num_heads: int,
drop_path: list[float],
query_stride: list[int],
window_size: int,
use_mask_unit_attn: bool,
stage_num: Optional[int] = None,
) -> None:
super().__init__()
# we need to know if the previous stage used masked attention
# mask unit or global attention.
# lag by 1 layer, so that global attention,
# applied post pooling on lower resolution
previous_stage_used_masked_attention = False
if stage_num is not None:
previous_stage_used_masked_attention = config.masked_unit_attention[stage_num - 1 if stage_num > 0 else 0]
self.layers = nn.ModuleList(
[
HieraLayer(
config=config,
hidden_size=hidden_size if i == 0 else hidden_size_output,
hidden_size_output=hidden_size_output,
num_heads=num_heads,
drop_path=drop_path[i],
query_stride=query_stride[i],
window_size=window_size,
use_mask_unit_attn=use_mask_unit_attn or (previous_stage_used_masked_attention and i == 0),
)
for i in range(depth)
]
)
def forward(
self, hidden_states: torch.Tensor, output_attentions: bool = False
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
for i, layer_module in enumerate(self.layers):
(hidden_states, attn_weights) = layer_module(hidden_states, output_attentions=output_attentions)
return hidden_states, attn_weights
def undo_windowing(hidden_states: torch.Tensor, shape: list[int], mask_unit_shape: list[int]) -> torch.Tensor:
"""
Restore spatial organization by undoing windowed organization of mask units.
Args:
hidden_states (`torch.Tensor`): The hidden states tensor of shape `[batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]`.
shape (`list[int]`): The original shape of the hidden states tensor before windowing.
mask_unit_shape (`list[int]`): The shape of the mask units used for windowing.
Returns:
torch.Tensor: The restored hidden states tensor of shape [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size].
"""
batch_size, hidden_size = hidden_states.shape[0], hidden_states.shape[-1]
# From: [batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]
# To: [batch_size, num_mask_unit_height, num_mask_unit_width, mask_unit_height, mask_unit_width, hidden_size]
num_mask_units = [s // mu for s, mu in zip(shape, mask_unit_shape)]
hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size)
# From: [batch_size, num_mask_unit_height, num_mask_unit_width, mask_unit_height, mask_unit_width, hidden_size]
# To: [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size]
hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5)
hidden_states = hidden_states.reshape(batch_size, *shape, hidden_size)
return hidden_states
| HieraStage |
python | ansible__ansible | test/units/_internal/templating/fixtures/valid_collection/ansible_collections/valid/also_valid/plugins/filter/correct.py | {
"start": 203,
"end": 316
} | class ____:
@property
def accept_args_markers(self) -> t.NoReturn:
raise NotImplementedError()
| Bomb |
python | pytorch__pytorch | torch/utils/benchmark/examples/compare.py | {
"start": 202,
"end": 2931
} | class ____:
"""Emulate different versions of pytorch.
In normal circumstances this would be done with multiple processes
writing serialized measurements, but this simplifies that model to
make the example clearer.
"""
def __init__(self, real_torch, extra_ns_per_element) -> None:
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
def extra_overhead(self, result):
# time.sleep has a ~65 us overhead, so only fake a
# per-element overhead if numel is large enough.
numel = int(result.numel())
if numel > 5000:
time.sleep(numel * self._extra_ns_per_element * 1e-9)
return result
def add(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.add(*args, **kwargs))
def mul(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.mul(*args, **kwargs))
def cat(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.cat(*args, **kwargs))
def matmul(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.matmul(*args, **kwargs))
def main() -> None:
tasks = [
("add", "add", "torch.add(x, y)"),
("add", "add (extra +0)", "torch.add(x, y + zero)"),
]
serialized_results = []
repeats = 2
timers = [
benchmark_utils.Timer(
stmt=stmt,
globals={
"torch": torch if branch == "master" else FauxTorch(torch, overhead_ns),
"x": torch.ones((size, 4)),
"y": torch.ones((1, 4)),
"zero": torch.zeros(()),
},
label=label,
sub_label=sub_label,
description=f"size: {size}",
env=branch,
num_threads=num_threads,
)
for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)]
for label, sub_label, stmt in tasks
for size in [1, 10, 100, 1000, 10000, 50000]
for num_threads in [1, 4]
]
for i, timer in enumerate(timers * repeats):
serialized_results.append(pickle.dumps(
timer.blocked_autorange(min_run_time=0.05)
))
print(f"\r{i + 1} / {len(timers) * repeats}", end="")
sys.stdout.flush()
print()
comparison = benchmark_utils.Compare([
pickle.loads(i) for i in serialized_results
])
print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
comparison.print()
print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n")
comparison.trim_significant_figures()
comparison.colorize()
comparison.print()
if __name__ == "__main__":
main()
| FauxTorch |
python | ray-project__ray | python/ray/data/_internal/logical/rules/configure_map_task_memory.py | {
"start": 349,
"end": 2591
} | class ____(Rule, abc.ABC):
def apply(self, plan: PhysicalPlan) -> PhysicalPlan:
for op in plan.dag.post_order_iter():
if not isinstance(op, MapOperator):
continue
def ray_remote_args_fn(
op: MapOperator = op, original_ray_remote_args_fn=op._ray_remote_args_fn
) -> Dict[str, Any]:
assert isinstance(op, MapOperator), op
static_ray_remote_args = copy.deepcopy(op._ray_remote_args)
dynamic_ray_remote_args = {}
if original_ray_remote_args_fn is not None:
dynamic_ray_remote_args = original_ray_remote_args_fn()
if (
"memory" not in static_ray_remote_args
and "memory" not in dynamic_ray_remote_args
# If this rule configures memory but the user hasn't specified
# memory in the placement group, then Ray won't be able to
# schedule tasks.
and not any(
isinstance(
scheduling_strategy, PlacementGroupSchedulingStrategy
)
for scheduling_strategy in (
static_ray_remote_args.get("scheduling_strategy"),
dynamic_ray_remote_args.get("scheduling_strategy"),
op.data_context.scheduling_strategy,
op.data_context.scheduling_strategy_large_args,
)
)
):
memory = self.estimate_per_task_memory_requirement(op)
if memory is not None:
dynamic_ray_remote_args["memory"] = memory
return dynamic_ray_remote_args
op._ray_remote_args_fn = ray_remote_args_fn
return plan
@abc.abstractmethod
def estimate_per_task_memory_requirement(self, op: MapOperator) -> Optional[int]:
"""Estimate the per-task memory requirement for the given map operator.
This is used to configure the `memory` argument in `ray.remote`.
"""
...
| ConfigureMapTaskMemoryRule |
python | huggingface__transformers | src/transformers/models/sam2/modeling_sam2.py | {
"start": 40893,
"end": 42986
} | class ____(nn.Module):
def __init__(self, config: Sam2MaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(Sam2TwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = Sam2Attention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
| Sam2TwoWayTransformer |
python | django__django | django/contrib/admin/widgets.py | {
"start": 643,
"end": 1651
} | class ____(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = [
"admin/js/core.js",
"admin/js/SelectBox.js",
"admin/js/SelectFilter2.js",
]
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super().__init__(attrs, choices)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["widget"]["attrs"]["class"] = "selectfilter"
if self.is_stacked:
context["widget"]["attrs"]["class"] += "stacked"
context["widget"]["attrs"]["data-field-name"] = self.verbose_name
context["widget"]["attrs"]["data-is-stacked"] = int(self.is_stacked)
return context
| FilteredSelectMultiple |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 11331,
"end": 11530
} | class ____(serializers.ModelSerializer):
class Meta:
model = BasicPermModel
fields = '__all__'
# Custom object-level permission, that includes 'view' permissions
| BasicPermSerializer |
python | facelessuser__soupsieve | tests/test_level3/test_enabled.py | {
"start": 52,
"end": 4203
} | class ____(util.TestCase):
"""Test enabled selectors."""
MARKUP = """
<body>
<form action="#">
<fieldset id='a' disabled>
<legend>
Simple fieldset <input type="radio" id="1" checked>
<fieldset id='b' disabled>
<legend>Simple fieldset <input type="radio" id="2" checked></legend>
<input type="radio" id="3" checked>
<label for="radio">radio</label>
</fieldset>
</legend>
<fieldset id='c' disabled>
<legend>Simple fieldset <input type="radio" id="4" checked></legend>
<input type="radio" id="5" checked>
<label for="radio">radio</label>
</fieldset>
<input type="radio" id="6" checked>
<label for="radio">radio</label>
</fieldset>
<optgroup id="opt-enable">
<option id="7" disabled>option</option>
</optgroup>
<optgroup id="8" disabled>
<option id="9">option</option>
</optgroup>
<a href="" id="link">text</a>
</form>
</body>
"""
MAKRUP_NESTED = """
<body>
<form action="#">
<fieldset id='a' disabled>
<legend>
Simple fieldset <input type="radio" id="1" checked>
<fieldset id='b'>
<legend>Simple fieldset <input type="radio" id="2" checked></legend>
<input type="radio" id="3" checked>
<label for="radio">radio</label>
</fieldset>
</legend>
<fieldset id='c' disabled>
<legend>Simple fieldset <input type="radio" id="4" checked></legend>
<input type="radio" id="5" checked>
<label for="radio">radio</label>
</fieldset>
<input type="radio" id="6" checked>
<label for="radio">radio</label>
</fieldset>
<optgroup id="opt-enable">
<option id="7" disabled>option</option>
</optgroup>
<optgroup id="8" disabled>
<option id="9">option</option>
</optgroup>
<a href="" id="link">text</a>
</form>
</body>
"""
def test_enable_html5(self):
"""
Test enable in the HTML5 parser.
`:any-link`
Form elements that have `disabled`.
Form elements that are children of a disabled `fieldset`, but not it's `legend`.
"""
self.assert_selector(
self.MARKUP,
":enabled",
['1', '2', 'opt-enable'],
flags=util.HTML5
)
def test_enable_lxml(self):
"""
Test enable in the `lxml` HTML parser.
`:any-link`
Form elements that have `disabled`.
Form elements that are children of a disabled `fieldset`, but not it's `legend`.
"""
self.assert_selector(
self.MARKUP,
":enabled",
['1', 'opt-enable'],
flags=util.LXML_HTML
)
def test_enable_python(self):
"""
Test enable in the built-in HTML parser.
`:any-link`
Form elements that have `disabled`.
Form elements that are children of a disabled `fieldset`, but not it's `legend`.
"""
self.assert_selector(
self.MARKUP,
":enabled",
['1', '2', 'opt-enable'],
flags=util.PYHTML
)
def test_enable_with_nested_disabled_form_html5(self):
"""Test enable in the HTML5 parser."""
self.assert_selector(
self.MAKRUP_NESTED,
":enabled",
['1', '2', 'opt-enable', 'b', '3'],
flags=util.HTML5
)
def test_enable_with_nested_disabled_form_lxml(self):
"""Test enable in the `lxml` HTML parser."""
self.assert_selector(
self.MAKRUP_NESTED,
":enabled",
['1', 'opt-enable'],
flags=util.LXML_HTML
)
def test_enable_with_nested_disabled_form_python(self):
"""Test enable in the built-in HTML parser."""
self.assert_selector(
self.MAKRUP_NESTED,
":enabled",
['1', '2', 'opt-enable', 'b', '3'],
flags=util.PYHTML
)
| TestEnabled |
python | pytorch__pytorch | torch/_inductor/tiling_utils.py | {
"start": 8768,
"end": 21600
} | class ____:
"""
Finds a Pointwise, Reduction Split that compatible with all nodes in a SchedulerNode.
"""
def __init__(
self,
node: Union["FusedSchedulerNode", "SchedulerNode"],
):
self.node = node
self.pointwise_numel: sympy.Expr = node.group[1][0]
self.red_numel: sympy.Expr = node.group[1][1]
self.pw_split_options: dict[int, OrderedSet[Split]] = defaultdict(OrderedSet)
self.reduction_split: Split = ()
self.all_node_sizes: OrderedSet[tuple[Split, Split]] = OrderedSet()
fused_group = node.group[1]
for n in reversed(node.get_nodes()):
if not isinstance(n, torch._inductor.scheduler.SchedulerNode):
continue
# if we can't split the pw ranges into a (pw, red) split,
# dont add as a split option, but do make sure we check that this size
# is splittable
maybe_splits = get_pw_red_splits(
n, self.pointwise_numel, self.red_numel, none_if_not_divisible=True
)
if maybe_splits is None:
self.all_node_sizes.add(n._body.sizes)
continue
(_, n_pw_splits), (_, n_red_splits) = maybe_splits
# fill in reduction size
n_pw_splits, n_red_splits = (
torch._inductor.codegen.simd.SIMDKernel.prepare_split_iteration_lengths(
fused_group, (n_pw_splits, n_red_splits), self.red_numel
)
)
self.pw_split_options[len(n_pw_splits)].add(tuple(n_pw_splits))
# initially, we are just going to do a single reduction split since
# reduction tiling is off by default. even if we miss a reduction split,
# we can recover it in the split var analysis.
# TODO: an earlier version for this code tried to iteratively try the maximum number
# of split vars, by iterating over both pointwise and reduction. but not worth
# the complexity yet.
if n_red_splits != ():
self.reduction_split = (sympy_product(n_red_splits),)
n_size = (tuple(n_pw_splits), tuple(n_red_splits))
self.all_node_sizes.add(n_size)
self.seen_pw_splits: OrderedSet[Split] = OrderedSet()
def get_node_splits(self) -> tuple[Split, Split]:
"""
Get a compatible pointwise, reduction split of the node
"""
if len(self.all_node_sizes) == 1:
return next(iter(self.all_node_sizes))
max_pw_split = max(self.pw_split_options.keys())
for pw_split_len in range(max_pw_split, 0, -1):
for pw_split in self.pw_split_options[pw_split_len]:
if out := self.try_split(pw_split, self.reduction_split):
return out
# combine dims for next round
for pw_split in self.pw_split_options[pw_split_len]:
for i in range(len(pw_split) - 1):
new_split = tuple(
pw_split[0:i]
+ (sympy_product(pw_split[i : i + 2]),)
+ pw_split[i + 2 :]
)
self.pw_split_options[len(new_split)].add(new_split)
# if for whatever reason we couldn't split above, return default split
return ((self.pointwise_numel,), (self.red_numel,))
def try_split(self, pw: Split, red: Split) -> Optional[tuple[Split, Split]]:
"""
See if this split is compatible, and potentially returning a longer split
than the input.
"""
from torch._inductor.codegen.simd import CantSplit, SIMDKernel
if pw in self.seen_pw_splits:
return None
self.seen_pw_splits.add(pw)
for n_pw, n_red in self.all_node_sizes:
try:
groups = pw + red
lengths = (n_pw, n_red)
splits, getters = SIMDKernel._split_iteration_ranges(groups, lengths)
except CantSplit:
return None
assert len(getters) == 2
pw_group_splits = splits[: len(pw)]
# if we had to divide a variable into two to do this split,
# then lets try the larger, induced split.
# e.g. splitting (12, 2) into (2, 12) will split the first var into:
# (2, 6) and produce an overall split of (2, 6, 2)
flattened_pw_splits = tuple(itertools.chain.from_iterable(pw_group_splits))
if flattened_pw_splits != pw:
if out := self.try_split(flattened_pw_splits, red):
return out
return pw, red
def apply_var_mapping(
iter_vars: list[sympy.Symbol],
red_vars: list[sympy.Symbol],
norm_pw_vars: list[sympy.Symbol],
norm_red_vars: list[sympy.Symbol],
new_ranges: list[list[sympy.Expr]],
return_getters_groups: list[list[Callable[[list[sympy.Expr]], sympy.Expr]]],
) -> dict[sympy.Symbol, sympy.Expr]:
"""Maps original variables to expressions using normalized variables."""
# the output of split_iteration_range is a new_ranges, return_getters_groups
# new_ranges is a flattened list of ranges corresponding to the new pw and red vars
# for example, taking in pw vars of range (6, 6) to normalized range [36],
# new_ranges would be [[6, 6]]
# There is a return_getter callable for each input iter_var and red_vars.
# if you flatten out all of the ranges, and create a variable for each index,
# then applying the flattening vars to the callables in return_getters_groups
# gives you the mapping from input vars -> flattened vars.
# From there, we can compute the output, normalized variables.
# For instance [6, 6] corresponding to flat vars v0, v1 will be
# v0 + 6 * v1
# Create flattened iteration variables
num_vars = sum(len(s) for s in new_ranges)
flat_vars = sympy.symbols(f"v_0:{num_vars}")
count = 0
if len(iter_vars) == 0 and len(red_vars) == 0:
return {}
assert len(new_ranges) == len(norm_pw_vars + norm_red_vars)
apply_groups = []
for group in return_getters_groups:
apply_groups.append([g(flat_vars) for g in group])
iter_vars_to_flat_vars = {}
for i, (group, var_group) in enumerate(
zip(apply_groups, (iter_vars, red_vars), strict=True)
):
# if the node has sizes (p0, 1) and the fused node is (p0, r0)
# the reduction var gets filled in for split_iteration_range
if len(group) != len(var_group):
assert i == 1
assert len(var_group) == 0
continue
iter_vars_to_flat_vars.update({v: g for g, v in zip(group, var_group)})
count = 0
flat_vars_to_new_vars = {}
for new_range, new_var in zip(
new_ranges, norm_pw_vars + norm_red_vars, strict=True
):
range_vars = []
for _ in range(len(new_range)):
range_vars.append(flat_vars[count])
count += 1
prod = 1
for i in range(len(new_range) - 1, -1, -1):
flat_vars_to_new_vars[range_vars[i]] = new_var * prod
prod = new_range[i] * prod
return {
k: sympy_subs(v, flat_vars_to_new_vars)
for k, v in iter_vars_to_flat_vars.items()
}
def extract_normalized_read_writes(
node: Union["FusedSchedulerNode", "SchedulerNode"],
) -> Optional[FusedNormalizedReadsWrites]:
"""Extracts index variables, reduce variables, read/write expressions, and variable ranges from a fused node."""
reads: dict[sympy.Expr, OrderedSet[str]] = defaultdict(OrderedSet)
writes: dict[sympy.Expr, OrderedSet[str]] = defaultdict(OrderedSet)
all_output_names = node.get_buffer_names()
op_names = node.get_operation_names()
outputs: OrderedSet[str] = OrderedSet()
removed_buffers: OrderedSet[str] = OrderedSet()
for buf_name in all_output_names:
if V.graph.scheduler.can_buffer_be_removed_through_fusion(buf_name, op_names):
removed_buffers.add(buf_name)
else:
outputs.add(buf_name)
inputs = OrderedSet(
dep.name for dep in node.read_writes.reads if dep.name not in removed_buffers
)
pointwise_numel: sympy.Expr = node.group[1][0]
red_numel: sympy.Expr = node.group[1][1]
# TODO - a few dynamic shapes issues to resolve
if any(
(isinstance(var, sympy.Expr) and not var.is_constant())
for var in (pointwise_numel, red_numel)
):
return None
pw_splits, red_splits = NodeSplitGetter(node).get_node_splits()
# lets use different prefix (`n`) to distinguish
(norm_pw_vars, norm_red_vars), ranges = index_vars_no_squeeze(
pw_splits, red_splits, prefix="n"
)
for n in list(node.get_nodes()):
if not isinstance(n, torch._inductor.scheduler.SchedulerNode):
continue
body = n._body
# TODO - not handled well. indirect loads will not be coalesced,
# need to account for that in analysis.
if body.indirect_vars:
return None
n_reads: dict[sympy.Expr, OrderedSet[str]] = defaultdict(OrderedSet)
n_writes: dict[sympy.Expr, OrderedSet[str]] = defaultdict(OrderedSet)
# TODO - will the names for all the inputs/outputs accurately
# reflect mutation, or do I need to remap with mutation_real_name
for inp in inputs:
for expr in body.get_all_read_expr(inp):
n_reads[expr].add(inp)
for out in outputs:
for expr in body.get_all_write_expr(out):
n_writes[expr].add(out)
if not n_reads and not n_writes:
continue
(iter_vars, n_pw_splits), (red_vars, n_red_splits) = get_pw_red_splits(
n, pointwise_numel, red_numel
)
groups = pw_splits + red_splits
lengths = (n_pw_splits, (n_red_splits))
lengths = (
torch._inductor.codegen.simd.SIMDKernel.prepare_split_iteration_lengths(
groups, lengths, red_numel
)
)
new_ranges, return_getters_groups = (
torch._inductor.codegen.simd.SIMDKernel._split_iteration_ranges(
groups, lengths
)
)
var_map = apply_var_mapping(
iter_vars,
red_vars,
norm_pw_vars,
norm_red_vars,
new_ranges,
return_getters_groups,
)
# We create Identity sympy.Functions to prevent expansion to int64,
# unwrap for tiling analysis.
def remove_identity(expr: sympy.Expr) -> sympy.Expr:
return expr.replace(Identity, lambda x: x)
n_reads_new = {
sympy_subs(remove_identity(read), var_map): v for read, v in n_reads.items()
}
n_writes_new = {
sympy_subs(remove_identity(write), var_map): v
for write, v in n_writes.items()
}
for expr, buf_names in n_reads_new.items():
reads[expr] |= buf_names
for expr, buf_names in n_writes_new.items():
writes[expr] |= buf_names
reads = {
V.graph.sizevars.simplify_with_ranges(r, ranges): v for r, v in reads.items()
}
writes = {
V.graph.sizevars.simplify_with_ranges(w, ranges): v for w, v in writes.items()
}
fused_out = FusedNormalizedReadsWrites(
norm_pw_vars, # type: ignore[arg-type]
norm_red_vars, # type: ignore[arg-type]
reads,
writes,
ranges,
)
loop_tiling_log.info("Normalized Fused reads: %s", fused_out)
return fused_out
def get_score(
addr: sympy.Expr, var_ranges: dict[sympy.Symbol, int], buf_names: OrderedSet[str]
) -> int:
"""
Score addr according to its approximate size.
"""
# TODO - deduplicate with candidate_tilings
var_sizes = []
for v in addr.free_symbols:
v_size = var_ranges.get(v)
# TODO - reason about indirect vars
if not symbol_is_type(v, SymT.INDIRECT) and v_size is not None:
var_sizes.append(v_size)
from .virtualized import V
return V.graph.sizevars.atomically_apply_size_hint(
sympy_product(var_sizes), fallback=config.unbacked_symint_fallback
)
def try_get_buf_size(buf_name: str) -> Optional[int]:
buf = V.graph.try_get_buffer(buf_name)
if not buf:
return None
return V.graph.sizevars.atomically_apply_size_hint(
sympy_product(buf.get_size()), fallback=config.unbacked_symint_fallback
)
def get_hint(v: Union[sympy.Expr, int]) -> int:
if isinstance(v, int):
return v
else:
return V.graph.sizevars.size_hint(v, fallback=config.unbacked_symint_fallback)
@dataclasses.dataclass(frozen=True)
| NodeSplitGetter |
python | getsentry__sentry | tests/sentry/new_migrations/monkey/test_executor.py | {
"start": 636,
"end": 10249
} | class ____:
@pytest.fixture(autouse=True)
def _mock_getsentry_if_not_registered(self) -> Generator[None]:
if "getsentry" in settings.INSTALLED_APPS:
yield
return
with (
patch.dict(apps.app_configs, {"getsentry": DummyGetsentryAppConfig("getsentry", None)}),
patch.object(settings, "INSTALLED_APPS", new=settings.INSTALLED_APPS + ("getsentry",)),
):
yield
def test_check_db_routing_pass(self) -> None:
class TestMigration(migrations.Migration):
operations = [
migrations.CreateModel(
name="Test",
fields=[
(
"id",
models.IntegerField(serialize=False, primary_key=True),
),
(
"type",
models.IntegerField(
choices=[
(1, "set_resolved"),
]
),
),
],
options={"db_table": "sentry_test"},
),
migrations.AlterUniqueTogether(
name="test", unique_together={("project_id", "key", "value")}
),
migrations.AddField(
model_name="release",
name="projects",
field=models.ManyToManyField(related_name="releases", to="sentry.Project"),
),
SafeRunSQL(
"TEST SQL",
hints={"tables": ["sentry_savedsearch"]},
),
migrations.RunPython(
migrations.RunPython.noop,
migrations.RunPython.noop,
hints={"tables": ["sentry_test"]},
),
]
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="sentry"))
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="uptime"))
def test_check_db_routing_pass_2(self) -> None:
class TestMigration(migrations.Migration):
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[],
database_operations=[
migrations.CreateModel(
name="Test",
fields=[
(
"id",
models.IntegerField(serialize=False, primary_key=True),
),
(
"type",
models.IntegerField(
choices=[
(1, "set_resolved"),
]
),
),
],
options={"db_table": "sentry_test"},
),
migrations.AlterUniqueTogether(
name="test", unique_together={("project_id", "key", "value")}
),
migrations.AddField(
model_name="release",
name="projects",
field=models.ManyToManyField(
related_name="releases", to="sentry.Project"
),
),
SafeRunSQL(
"TEST SQL",
hints={"tables": ["sentry_savedsearch"]},
),
migrations.RunPython(
migrations.RunPython.noop,
migrations.RunPython.noop,
hints={"tables": ["sentry_test"]},
),
],
),
]
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="sentry"))
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="uptime"))
def test_check_db_routing_missing_hints(self) -> None:
class TestMigration(migrations.Migration):
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[],
database_operations=[
migrations.AlterUniqueTogether(
name="test", unique_together={("project_id", "key", "value")}
),
migrations.AddField(
model_name="release",
name="projects",
field=models.ManyToManyField(
related_name="releases", to="sentry.Project"
),
),
SafeRunSQL("TEST SQL"),
migrations.RunPython(
migrations.RunPython.noop,
migrations.RunPython.noop,
hints={"tables": ["sentry_test"]},
),
],
),
]
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="sentry")
)
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="uptime")
)
def test_check_db_routing_missing_hints_2(self) -> None:
class TestMigration(migrations.Migration):
operations = [
SafeRunSQL("TEST SQL"),
]
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="getsentry")
)
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="uptime")
)
def test_check_db_routing_missing_hints_3(self) -> None:
class TestMigration(migrations.Migration):
operations = [
migrations.RunPython(
migrations.RunPython.noop,
migrations.RunPython.noop,
),
]
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="getsentry")
)
with pytest.raises(MissingDatabaseRoutingInfo):
SentryMigrationExecutor._check_db_routing(
TestMigration(name="test", app_label="uptime")
)
def test_check_db_routing_dont_run_for_3rd_party(self) -> None:
class TestMigration(migrations.Migration):
operations = [
SafeRunSQL("TEST SQL"),
]
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="auth"))
def test_check_db_routing_extensions(self) -> None:
class TestMigration(migrations.Migration):
operations = [BtreeGistExtension()]
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="sentry"))
SentryMigrationExecutor._check_db_routing(TestMigration(name="test", app_label="uptime"))
@pytest.mark.parametrize(
("before", "after"),
(
pytest.param(["a", "b", "c"], ["a", "b", "c"], id="noop"),
pytest.param(["a", "b", "c"], ["a", "b", "c", "d"], id="append"),
),
)
def test_check_bitfield_flags_ok(before: list[str], after: list[str]) -> None:
_check_bitfield_flags("001_migration", before, after)
def test_check_bitfield_flags_deletion() -> None:
expected = """\
migration `001_migration` alters a BitField in an unsafe way!
the following flags were removed: b
unused flags must remain to preserve padding for future flags
""".rstrip()
with pytest.raises(ValueError) as excinfo:
_check_bitfield_flags("001_migration", ["a", "b", "c"], ["a", "c"])
(msg,) = excinfo.value.args
assert msg == expected
def test_check_bitfield_flags_insertion() -> None:
expected = """\
migration `001_migration` alters a BitField in an unsafe way!
the following flags were inserted between old flags: d
new flags must be added at the end or flags will change meaning
""".rstrip()
with pytest.raises(ValueError) as excinfo:
_check_bitfield_flags("001_migration", ["a", "b", "c"], ["a", "d", "b", "c"])
(msg,) = excinfo.value.args
assert msg == expected
def test_check_bitfield_flags_reorder() -> None:
expected = """\
migration `001_migration` alters a BitField in an unsafe way!
the following old flags were reordered:
--- old
+++ new
@@ -1,3 +1,3 @@
+b
a
-b
c
flags must retain historical order or flags will change meaning
""".rstrip()
with pytest.raises(ValueError) as excinfo:
_check_bitfield_flags("001_migration", ["a", "b", "c"], ["b", "a", "c"])
(msg,) = excinfo.value.args
assert msg == expected
| TestSentryMigrationExecutor |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/test_cards.py | {
"start": 1649,
"end": 1915
} | class ____(MetaflowCard):
type = "test_mock_card"
def __init__(self, options={"key": "dummy_key"}, **kwargs):
self._key = options["key"]
def render(self, task):
task_data = task[self._key].data
return "%s" % task_data
| TestMockCard |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1055683,
"end": 1056322
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'added_to_project' event on a given issue or pull
request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "database_id")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
| AddedToProjectEvent |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_operators.py | {
"start": 183,
"end": 4553
} | class ____:
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, factor)
def test_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
result = factor[factor == "a"]
expected = factor[np.asarray(factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = factor[factor != "a"]
expected = factor[np.asarray(factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = factor[factor < "c"]
expected = factor[np.asarray(factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = factor[factor > "a"]
expected = factor[np.asarray(factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = factor[factor >= "b"]
expected = factor[np.asarray(factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = factor[factor <= "b"]
expected = factor[np.asarray(factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(factor)
other = factor[np.random.default_rng(2).permutation(n)]
result = factor == other
expected = np.asarray(factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = factor == "d"
expected = np.zeros(len(factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unordered = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError, match=msg):
cat > cat_unordered
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"], dtype=object)
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"], dtype=object)
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
| TestCategoricalOpsWithFactor |
python | lepture__authlib | authlib/integrations/django_oauth2/authorization_server.py | {
"start": 531,
"end": 4463
} | class ____(_AuthorizationServer):
"""Django implementation of :class:`authlib.oauth2.rfc6749.AuthorizationServer`.
Initialize it with client model and token model::
from authlib.integrations.django_oauth2 import AuthorizationServer
from your_project.models import OAuth2Client, OAuth2Token
server = AuthorizationServer(OAuth2Client, OAuth2Token)
"""
def __init__(self, client_model, token_model):
super().__init__()
self.client_model = client_model
self.token_model = token_model
self.load_config(getattr(settings, "AUTHLIB_OAUTH2_PROVIDER", {}))
def load_config(self, config):
self.config = config
scopes_supported = self.config.get("scopes_supported")
self.scopes_supported = scopes_supported
# add default token generator
self.register_token_generator("default", self.create_bearer_token_generator())
def query_client(self, client_id):
"""Default method for ``AuthorizationServer.query_client``. Developers MAY
rewrite this function to meet their own needs.
"""
try:
return self.client_model.objects.get(client_id=client_id)
except self.client_model.DoesNotExist:
return None
def save_token(self, token, request):
"""Default method for ``AuthorizationServer.save_token``. Developers MAY
rewrite this function to meet their own needs.
"""
client = request.client
if request.user:
user_id = request.user.pk
else:
user_id = client.user_id
item = self.token_model(client_id=client.client_id, user_id=user_id, **token)
item.save()
return item
def create_oauth2_request(self, request):
return DjangoOAuth2Request(request)
def create_json_request(self, request):
return DjangoJsonRequest(request)
def handle_response(self, status_code, payload, headers):
if isinstance(payload, dict):
payload = json_dumps(payload)
resp = HttpResponse(payload, status=status_code)
for k, v in headers:
resp[k] = v
return resp
def send_signal(self, name, *args, **kwargs):
if name == "after_authenticate_client":
client_authenticated.send(*args, sender=self.__class__, **kwargs)
elif name == "after_revoke_token":
token_revoked.send(*args, sender=self.__class__, **kwargs)
def create_bearer_token_generator(self):
"""Default method to create BearerToken generator."""
conf = self.config.get("access_token_generator", True)
access_token_generator = create_token_generator(conf, 42)
conf = self.config.get("refresh_token_generator", False)
refresh_token_generator = create_token_generator(conf, 48)
conf = self.config.get("token_expires_in")
expires_generator = create_token_expires_in_generator(conf)
return BearerTokenGenerator(
access_token_generator=access_token_generator,
refresh_token_generator=refresh_token_generator,
expires_generator=expires_generator,
)
def create_token_generator(token_generator_conf, length=42):
if callable(token_generator_conf):
return token_generator_conf
if isinstance(token_generator_conf, str):
return import_string(token_generator_conf)
elif token_generator_conf is True:
def token_generator(*args, **kwargs):
return _generate_token(length)
return token_generator
def create_token_expires_in_generator(expires_in_conf=None):
data = {}
data.update(BearerTokenGenerator.GRANT_TYPES_EXPIRES_IN)
if expires_in_conf:
data.update(expires_in_conf)
def expires_in(client, grant_type):
return data.get(grant_type, BearerTokenGenerator.DEFAULT_EXPIRES_IN)
return expires_in
| AuthorizationServer |
python | zostera__django-bootstrap4 | src/bootstrap4/renderers.py | {
"start": 7170,
"end": 20928
} | class ____(BaseRenderer):
"""Default field renderer."""
# These widgets will not be wrapped in a form-control class
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
# Find the placeholder in kwargs, even if it's empty
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP4 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div")
enclosing_div.attrs["class"] = classes
for inner_div in enclosing_div.find_all("div"):
inner_div.attrs["class"] = inner_div.attrs.get("class", []) + [self.form_check_class]
# Apply bootstrap4 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return f'<div class="row bootstrap4-multi-input">{html}</div>'
def fix_file_input_label(self, html):
if self.layout != "horizontal":
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = f'<div class="form-check">{html}</div>'
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = f'<span class="{inner_class}">{content}</span>'
return f'<div class="{outer_class}">{content}</div>'
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput, URLInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors(f"{before}{html}{after}")
html = f'<div class="input-group">{html}</div>'
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap4/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = f'<div class="{field_class}">{html}</div>'
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
| FieldRenderer |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 130233,
"end": 132149
} | class ____(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
if not pxd_def.defined_in_pxd:
return node
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
| AlignFunctionDefinitions |
python | wandb__wandb | wandb/errors/errors.py | {
"start": 688,
"end": 782
} | class ____(Error):
"""Raised when an invalid usage of the SDK API is detected."""
| UsageError |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 602,
"end": 689
} | class ____(Exception):
"""The root of all Distutils evil."""
pass
| DistutilsError |
python | giampaolo__psutil | psutil/_psbsd.py | {
"start": 18463,
"end": 29275
} | class ____:
"""Wrapper class around underlying C implementation."""
__slots__ = ["_cache", "_name", "_ppid", "pid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
def _assert_alive(self):
"""Raise NSP if the process disappeared on us."""
# For those C function who do not raise NSP, possibly returning
# incorrect or incomplete result.
cext.proc_name(self.pid)
@wrap_exceptions
@memoize_when_activated
def oneshot(self):
"""Retrieves multiple process info in one shot as a raw tuple."""
ret = cext.proc_oneshot_info(self.pid)
assert len(ret) == len(kinfo_proc_map)
return ret
def oneshot_enter(self):
self.oneshot.cache_activate(self)
def oneshot_exit(self):
self.oneshot.cache_deactivate(self)
@wrap_exceptions
def name(self):
name = self.oneshot()[kinfo_proc_map['name']]
return name if name is not None else cext.proc_name(self.pid)
@wrap_exceptions
def exe(self):
if FREEBSD:
if self.pid == 0:
return '' # else NSP
return cext.proc_exe(self.pid)
elif NETBSD:
if self.pid == 0:
# /proc/0 dir exists but /proc/0/exe doesn't
return ""
with wrap_exceptions_procfs(self):
return os.readlink(f"/proc/{self.pid}/exe")
else:
# OpenBSD: exe cannot be determined; references:
# https://chromium.googlesource.com/chromium/src/base/+/
# master/base_paths_posix.cc
# We try our best guess by using which against the first
# cmdline arg (may return None).
import shutil
cmdline = self.cmdline()
if cmdline:
return shutil.which(cmdline[0]) or ""
else:
return ""
@wrap_exceptions
def cmdline(self):
if OPENBSD and self.pid == 0:
return [] # ...else it crashes
elif NETBSD:
# XXX - most of the times the underlying sysctl() call on
# NetBSD and OpenBSD returns a truncated string. Also
# /proc/pid/cmdline behaves the same so it looks like this
# is a kernel bug.
try:
return cext.proc_cmdline(self.pid)
except OSError as err:
if err.errno == errno.EINVAL:
pid, name, ppid = self.pid, self._name, self._ppid
if cext.proc_is_zombie(self.pid):
raise ZombieProcess(pid, name, ppid) from err
if not pid_exists(self.pid):
raise NoSuchProcess(pid, name, ppid) from err
# XXX: this happens with unicode tests. It means the C
# routine is unable to decode invalid unicode chars.
debug(f"ignoring {err!r} and returning an empty list")
return []
else:
raise
else:
return cext.proc_cmdline(self.pid)
@wrap_exceptions
def environ(self):
return cext.proc_environ(self.pid)
@wrap_exceptions
def terminal(self):
tty_nr = self.oneshot()[kinfo_proc_map['ttynr']]
tmap = _psposix.get_terminal_map()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def ppid(self):
self._ppid = self.oneshot()[kinfo_proc_map['ppid']]
return self._ppid
@wrap_exceptions
def uids(self):
rawtuple = self.oneshot()
return ntp.puids(
rawtuple[kinfo_proc_map['real_uid']],
rawtuple[kinfo_proc_map['effective_uid']],
rawtuple[kinfo_proc_map['saved_uid']],
)
@wrap_exceptions
def gids(self):
rawtuple = self.oneshot()
return ntp.pgids(
rawtuple[kinfo_proc_map['real_gid']],
rawtuple[kinfo_proc_map['effective_gid']],
rawtuple[kinfo_proc_map['saved_gid']],
)
@wrap_exceptions
def cpu_times(self):
rawtuple = self.oneshot()
return ntp.pcputimes(
rawtuple[kinfo_proc_map['user_time']],
rawtuple[kinfo_proc_map['sys_time']],
rawtuple[kinfo_proc_map['ch_user_time']],
rawtuple[kinfo_proc_map['ch_sys_time']],
)
if FREEBSD:
@wrap_exceptions
def cpu_num(self):
return self.oneshot()[kinfo_proc_map['cpunum']]
@wrap_exceptions
def memory_info(self):
rawtuple = self.oneshot()
return ntp.pmem(
rawtuple[kinfo_proc_map['rss']],
rawtuple[kinfo_proc_map['vms']],
rawtuple[kinfo_proc_map['memtext']],
rawtuple[kinfo_proc_map['memdata']],
rawtuple[kinfo_proc_map['memstack']],
)
memory_full_info = memory_info
@wrap_exceptions
def create_time(self, monotonic=False):
ctime = self.oneshot()[kinfo_proc_map['create_time']]
if NETBSD and not monotonic:
# NetBSD: ctime subject to system clock updates.
ctime = adjust_proc_create_time(ctime)
return ctime
@wrap_exceptions
def num_threads(self):
if HAS_PROC_NUM_THREADS:
# FreeBSD / NetBSD
return cext.proc_num_threads(self.pid)
else:
return len(self.threads())
@wrap_exceptions
def num_ctx_switches(self):
rawtuple = self.oneshot()
return ntp.pctxsw(
rawtuple[kinfo_proc_map['ctx_switches_vol']],
rawtuple[kinfo_proc_map['ctx_switches_unvol']],
)
@wrap_exceptions
def threads(self):
# Note: on OpenSBD this (/dev/mem) requires root access.
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = ntp.pthread(thread_id, utime, stime)
retlist.append(ntuple)
if OPENBSD:
self._assert_alive()
return retlist
@wrap_exceptions
def net_connections(self, kind='inet'):
families, types = conn_tmap[kind]
ret = []
if NETBSD:
rawlist = cext.net_connections(self.pid, kind)
elif OPENBSD:
rawlist = cext.net_connections(self.pid, families, types)
else:
rawlist = cext.proc_net_connections(self.pid, families, types)
for item in rawlist:
fd, fam, type, laddr, raddr, status = item[:6]
if FREEBSD:
if (fam not in families) or (type not in types):
continue
nt = conn_to_ntuple(
fd, fam, type, laddr, raddr, status, TCP_STATUSES
)
ret.append(nt)
self._assert_alive()
return ret
@wrap_exceptions
def wait(self, timeout=None):
return _psposix.wait_pid(self.pid, timeout, self._name)
@wrap_exceptions
def nice_get(self):
return cext.proc_priority_get(self.pid)
@wrap_exceptions
def nice_set(self, value):
return cext.proc_priority_set(self.pid, value)
@wrap_exceptions
def status(self):
code = self.oneshot()[kinfo_proc_map['status']]
# XXX is '?' legit? (we're not supposed to return it anyway)
return PROC_STATUSES.get(code, '?')
@wrap_exceptions
def io_counters(self):
rawtuple = self.oneshot()
return ntp.pio(
rawtuple[kinfo_proc_map['read_io_count']],
rawtuple[kinfo_proc_map['write_io_count']],
-1,
-1,
)
@wrap_exceptions
def cwd(self):
"""Return process current working directory."""
# sometimes we get an empty string, in which case we turn
# it into None
if OPENBSD and self.pid == 0:
return "" # ...else it would raise EINVAL
return cext.proc_cwd(self.pid)
nt_mmap_grouped = namedtuple(
'mmap', 'path rss, private, ref_count, shadow_count'
)
nt_mmap_ext = namedtuple(
'mmap', 'addr, perms path rss, private, ref_count, shadow_count'
)
@wrap_exceptions
def open_files(self):
"""Return files opened by process as a list of namedtuples."""
rawlist = cext.proc_open_files(self.pid)
return [ntp.popenfile(path, fd) for path, fd in rawlist]
@wrap_exceptions
def num_fds(self):
"""Return the number of file descriptors opened by this process."""
ret = cext.proc_num_fds(self.pid)
if NETBSD:
self._assert_alive()
return ret
# --- FreeBSD only APIs
if FREEBSD:
@wrap_exceptions
def cpu_affinity_get(self):
return cext.proc_cpu_affinity_get(self.pid)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = set(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
msg = f"invalid CPU {cpu!r} (choose between {allcpus})"
raise ValueError(msg)
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in {errno.EINVAL, errno.EDEADLK}:
for cpu in cpus:
if cpu not in allcpus:
msg = (
f"invalid CPU {cpu!r} (choose between"
f" {allcpus})"
)
raise ValueError(msg) from err
raise
@wrap_exceptions
def memory_maps(self):
return cext.proc_memory_maps(self.pid)
@wrap_exceptions
def rlimit(self, resource, limits=None):
if limits is None:
return cext.proc_getrlimit(self.pid, resource)
else:
if len(limits) != 2:
msg = (
"second argument must be a (soft, hard) tuple, got"
f" {limits!r}"
)
raise ValueError(msg)
soft, hard = limits
return cext.proc_setrlimit(self.pid, resource, soft, hard)
| Process |
python | pola-rs__polars | py-polars/src/polars/io/database/_arrow_registry.py | {
"start": 67,
"end": 3015
} | class ____(TypedDict):
# name of the method that fetches all arrow data; tuple form
# calls the fetch_all method with the given chunk size (int)
fetch_all: str
# name of the method that fetches arrow data in batches
fetch_batches: str | None
# indicate whether the given batch size is respected exactly
exact_batch_size: bool | None
# repeat batch calls (if False, the batch call is a generator)
repeat_batch_calls: bool
# if arrow/polars functionality requires a minimum module version
minimum_version: str | None
# arrow driver properties should be specified from highest `minimum_version` to lowest
ARROW_DRIVER_REGISTRY: dict[str, list[ArrowDriverProperties]] = {
# In version 1.6.0, ADBC released `Cursor.fetch_arrow`, returning an object
# implementing the Arrow PyCapsule interface (not requiring PyArrow). This should be
# used if the version permits.
"adbc": [
{
"fetch_all": "fetch_arrow",
"fetch_batches": "fetch_record_batch",
"exact_batch_size": False,
"repeat_batch_calls": False,
"minimum_version": "1.6.0",
},
{
"fetch_all": "fetch_arrow_table",
"fetch_batches": "fetch_record_batch",
"exact_batch_size": False,
"repeat_batch_calls": False,
"minimum_version": None,
},
],
"arrow_odbc_proxy": [
{
"fetch_all": "fetch_arrow_table",
"fetch_batches": "fetch_record_batches",
"exact_batch_size": True,
"repeat_batch_calls": False,
"minimum_version": None,
}
],
"databricks": [
{
"fetch_all": "fetchall_arrow",
"fetch_batches": "fetchmany_arrow",
"exact_batch_size": True,
"repeat_batch_calls": True,
"minimum_version": None,
}
],
"duckdb": [
{
"fetch_all": "fetch_arrow_table",
"fetch_batches": "fetch_record_batch",
"exact_batch_size": True,
"repeat_batch_calls": False,
"minimum_version": None,
}
],
"kuzu": [
{
"fetch_all": "get_as_pl",
"fetch_batches": None,
"exact_batch_size": None,
"repeat_batch_calls": False,
"minimum_version": "0.3.2",
}
],
"snowflake": [
{
"fetch_all": "fetch_arrow_all",
"fetch_batches": "fetch_arrow_batches",
"exact_batch_size": False,
"repeat_batch_calls": False,
"minimum_version": None,
}
],
"turbodbc": [
{
"fetch_all": "fetchallarrow",
"fetch_batches": "fetcharrowbatches",
"exact_batch_size": False,
"repeat_batch_calls": False,
"minimum_version": None,
}
],
}
| ArrowDriverProperties |
python | cython__cython | tests/run/py_classbody.py | {
"start": 423,
"end": 820
} | class ____(object):
"""
>>> TestCdefAttr.cdefvar # doctest: +ELLIPSIS
Traceback (most recent call last):
AttributeError: ...TestCdefAttr...has no attribute 'cdefvar'...
>>> TestCdefAttr.cdefval1
11
>>> #TestCdefAttr.cdefval2
"""
cdefvar = 11
cdefval1 = cdefvar
del cdefvar
# cdefval2 = cdefvar # FIXME: doesn't currently work ...
| TestCdefAttr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.