language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scikit-learn__scikit-learn | examples/miscellaneous/plot_metadata_routing.py | {
"start": 5499,
"end": 13510
} | class ____(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def get_metadata_routing(self):
# This method defines the routing for this meta-estimator.
# In order to do so, a `MetadataRouter` instance is created, and the
# routing is added to it. More explanations follow below.
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict")
.add(caller="score", callee="score"),
)
return router
def fit(self, X, y, **fit_params):
# `get_routing_for_object` returns a copy of the `MetadataRouter`
# constructed by the above `get_metadata_routing` method, that is
# internally called.
request_router = get_routing_for_object(self)
# Meta-estimators are responsible for validating the given metadata.
# `method` refers to the parent's method, i.e. `fit` in this example.
request_router.validate_metadata(params=fit_params, method="fit")
# `MetadataRouter.route_params` maps the given metadata to the metadata
# required by the underlying estimator based on the routing information
# defined by the MetadataRouter. The output of type `Bunch` has a key
# for each consuming object and those hold keys for their consuming
# methods, which then contain key for the metadata which should be
# routed to them.
routed_params = request_router.route_params(params=fit_params, caller="fit")
# A sub-estimator is fitted and its classes are attributed to the
# meta-estimator.
self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)
self.classes_ = self.estimator_.classes_
return self
def predict(self, X, **predict_params):
check_is_fitted(self)
# As in `fit`, we get a copy of the object's MetadataRouter,
request_router = get_routing_for_object(self)
# then we validate the given metadata,
request_router.validate_metadata(params=predict_params, method="predict")
# and then prepare the input to the underlying `predict` method.
routed_params = request_router.route_params(
params=predict_params, caller="predict"
)
return self.estimator_.predict(X, **routed_params.estimator.predict)
# %%
# Let's break down different parts of the above code.
#
# First, the :meth:`~utils.metadata_routing.get_routing_for_object` takes our
# meta-estimator (``self``) and returns a
# :class:`~utils.metadata_routing.MetadataRouter` or, a
# :class:`~utils.metadata_routing.MetadataRequest` if the object is a consumer,
# based on the output of the estimator's ``get_metadata_routing`` method.
#
# Then in each method, we use the ``route_params`` method to construct a
# dictionary of the form ``{"object_name": {"method_name": {"metadata":
# value}}}`` to pass to the underlying estimator's method. The ``object_name``
# (``estimator`` in the above ``routed_params.estimator.fit`` example) is the
# same as the one added in the ``get_metadata_routing``. ``validate_metadata``
# makes sure all given metadata are requested to avoid silent bugs.
#
# Next, we illustrate the different behaviors and notably the type of errors
# raised.
meta_est = MetaClassifier(
estimator=ExampleClassifier().set_fit_request(sample_weight=True)
)
meta_est.fit(X, y, sample_weight=my_weights)
# %%
# Note that the above example is calling our utility function
# `check_metadata()` via the `ExampleClassifier`. It checks that
# ``sample_weight`` is correctly passed to it. If it is not, like in the
# following example, it would print that ``sample_weight`` is ``None``:
meta_est.fit(X, y)
# %%
# If we pass an unknown metadata, an error is raised:
try:
meta_est.fit(X, y, test=my_weights)
except TypeError as e:
print(e)
# %%
# And if we pass a metadata which is not explicitly requested:
try:
meta_est.fit(X, y, sample_weight=my_weights).predict(X, groups=my_groups)
except ValueError as e:
print(e)
# %%
# Also, if we explicitly set it as not requested, but it is provided:
meta_est = MetaClassifier(
estimator=ExampleClassifier()
.set_fit_request(sample_weight=True)
.set_predict_request(groups=False)
)
try:
meta_est.fit(X, y, sample_weight=my_weights).predict(X[:3, :], groups=my_groups)
except TypeError as e:
print(e)
# %%
# Another concept to introduce is **aliased metadata**. This is when an
# estimator requests a metadata with a different variable name than the default
# variable name. For instance, in a setting where there are two estimators in a
# pipeline, one could request ``sample_weight1`` and the other
# ``sample_weight2``. Note that this doesn't change what the estimator expects,
# it only tells the meta-estimator how to map the provided metadata to what is
# required. Here's an example, where we pass ``aliased_sample_weight`` to the
# meta-estimator, but the meta-estimator understands that
# ``aliased_sample_weight`` is an alias for ``sample_weight``, and passes it as
# ``sample_weight`` to the underlying estimator:
meta_est = MetaClassifier(
estimator=ExampleClassifier().set_fit_request(sample_weight="aliased_sample_weight")
)
meta_est.fit(X, y, aliased_sample_weight=my_weights)
# %%
# Passing ``sample_weight`` here will fail since it is requested with an
# alias and ``sample_weight`` with that name is not requested:
try:
meta_est.fit(X, y, sample_weight=my_weights)
except TypeError as e:
print(e)
# %%
# This leads us to the ``get_metadata_routing``. The way routing works in
# scikit-learn is that consumers request what they need, and routers pass that
# along. Additionally, a router exposes what it requires itself so that it can
# be used inside another router, e.g. a pipeline inside a grid search object.
# The output of the ``get_metadata_routing`` which is a dictionary
# representation of a :class:`~utils.metadata_routing.MetadataRouter`, includes
# the complete tree of requested metadata by all nested objects and their
# corresponding method routings, i.e. which method of a sub-estimator is used
# in which method of a meta-estimator:
print_routing(meta_est)
# %%
# As you can see, the only metadata requested for method ``fit`` is
# ``"sample_weight"`` with ``"aliased_sample_weight"`` as the alias. The
# ``~utils.metadata_routing.MetadataRouter`` class enables us to easily create
# the routing object which would create the output we need for our
# ``get_metadata_routing``.
#
# In order to understand how aliases work in meta-estimators, imagine our
# meta-estimator inside another one:
meta_meta_est = MetaClassifier(estimator=meta_est).fit(
X, y, aliased_sample_weight=my_weights
)
# %%
# In the above example, this is how the ``fit`` method of `meta_meta_est`
# will call their sub-estimator's ``fit`` methods::
#
# # user feeds `my_weights` as `aliased_sample_weight` into `meta_meta_est`:
# meta_meta_est.fit(X, y, aliased_sample_weight=my_weights):
# ...
#
# # the first sub-estimator (`meta_est`) expects `aliased_sample_weight`
# self.estimator_.fit(X, y, aliased_sample_weight=aliased_sample_weight):
# ...
#
# # the second sub-estimator (`est`) expects `sample_weight`
# self.estimator_.fit(X, y, sample_weight=aliased_sample_weight):
# ...
# %%
# Consuming and routing Meta-Estimator
# ------------------------------------
# For a slightly more complex example, consider a meta-estimator that routes
# metadata to an underlying estimator as before, but it also uses some metadata
# in its own methods. This meta-estimator is a consumer and a router at the
# same time. Implementing one is very similar to what we had before, but with a
# few tweaks.
| MetaClassifier |
python | getsentry__sentry | tests/sentry/relocation/tasks/test_process.py | {
"start": 3679,
"end": 8599
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
# Create a collision with the org slug we'll be requesting.
self.requested_org_slug = "testing"
self.existing_org_owner = self.create_user(
email="existing_org_owner@example.com",
is_superuser=False,
is_staff=False,
is_active=True,
)
self.existing_org = self.create_organization(
name=self.requested_org_slug, owner=self.existing_org_owner
)
self.owner = self.create_user(
email="owner@example.com", is_superuser=False, is_staff=False, is_active=True
)
self.superuser = self.create_user(
email="superuser@example.com", is_superuser=True, is_staff=True, is_active=True
)
self.login_as(user=self.superuser, superuser=True)
self.relocation: Relocation = Relocation.objects.create(
creator_id=self.superuser.id,
owner_id=self.owner.id,
want_org_slugs=[self.requested_org_slug],
step=Relocation.Step.UPLOADING.value,
)
self.relocation_file = RelocationFile.objects.create(
relocation=self.relocation,
file=self.file,
kind=RelocationFile.Kind.RAW_USER_DATA.value,
)
self.uuid = str(self.relocation.uuid)
@cached_property
def file(self):
with TemporaryDirectory() as tmp_dir:
(priv_key_pem, pub_key_pem) = generate_rsa_key_pair()
tmp_priv_key_path = Path(tmp_dir).joinpath("key")
self.priv_key_pem = priv_key_pem
with open(tmp_priv_key_path, "wb") as f:
f.write(priv_key_pem)
tmp_pub_key_path = Path(tmp_dir).joinpath("key.pub")
self.pub_key_pem = pub_key_pem
with open(tmp_pub_key_path, "wb") as f:
f.write(pub_key_pem)
with open(IMPORT_JSON_FILE_PATH, "rb") as f:
data = json.load(f)
with open(tmp_pub_key_path, "rb") as p:
file = File.objects.create(name="export.tar", type=RELOCATION_FILE_TYPE)
self.tarball = create_encrypted_export_tarball(
data, LocalFileEncryptor(p)
).getvalue()
file.putfile(BytesIO(self.tarball))
return file
def swap_relocation_file_with_data_from_fixture(
self, file: File, fixture_name: str, blob_size: int = RELOCATION_BLOB_SIZE
) -> None:
with open(get_fixture_path("backup", fixture_name), "rb") as fp:
return self.swap_relocation_file(file, BytesIO(fp.read()), blob_size)
def swap_relocation_file(
self, file: File, contents: BytesIO, blob_size: int = RELOCATION_BLOB_SIZE
) -> None:
with TemporaryDirectory() as tmp_dir:
tmp_priv_key_path = Path(tmp_dir).joinpath("key")
tmp_pub_key_path = Path(tmp_dir).joinpath("key.pub")
with open(tmp_priv_key_path, "wb") as f:
f.write(self.priv_key_pem)
with open(tmp_pub_key_path, "wb") as f:
f.write(self.pub_key_pem)
data = json.load(contents)
with open(tmp_pub_key_path, "rb") as p:
self.tarball = create_encrypted_export_tarball(
data, LocalFileEncryptor(p)
).getvalue()
file.putfile(BytesIO(self.tarball), blob_size=blob_size)
def mock_kms_client(self, fake_kms_client: Mock):
if not hasattr(self, "tarball"):
_ = self.file
unwrapped = unwrap_encrypted_export_tarball(BytesIO(self.tarball))
plaintext_dek = LocalFileDecryptor.from_bytes(
self.priv_key_pem
).decrypt_data_encryption_key(unwrapped)
fake_kms_client.return_value.asymmetric_decrypt.return_value = SimpleNamespace(
plaintext=plaintext_dek,
plaintext_crc32c=crc32c(plaintext_dek),
)
fake_kms_client.return_value.get_public_key.return_value = SimpleNamespace(
pem=self.pub_key_pem.decode()
)
def mock_cloudbuild_client(self, fake_cloudbuild_client: Mock, status: Build.Status):
fake_cloudbuild_client.return_value.create_build.return_value = SimpleNamespace(
metadata=SimpleNamespace(build=SimpleNamespace(id=uuid4().hex))
)
fake_cloudbuild_client.return_value.get_build.return_value = SimpleNamespace(status=status)
def mock_message_builder(self, fake_message_builder: Mock):
fake_message_builder.return_value.send_async.return_value = Mock()
@patch("sentry.backup.crypto.KeyManagementServiceClient")
@patch("sentry.relocation.utils.MessageBuilder")
@patch("sentry.relocation.tasks.process.uploading_complete.apply_async")
@region_silo_test(regions=SAAS_TO_SAAS_TEST_REGIONS)
| RelocationTaskTestCase |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 75370,
"end": 76374
} | class ____(SkipFunctionVariable):
def __init__(
self,
wrapped: VariableTracker,
context: "ContextWrappingVariable",
**kwargs: Any,
) -> None:
kwargs.pop("value", None)
kwargs.pop("reason", None)
super().__init__(wrapped.value, reason=wrapped.reason, **kwargs) # type: ignore[attr-defined]
self.wrapped = wrapped
self.context = context
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
self.context.enter(tx)
result = super().call_function(tx, args, kwargs)
self.context.exit(tx)
return result
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(lambda: codegen(self.context)) # type: ignore[arg-type]
codegen(self.wrapped)
codegen.extend_output(create_call_function(1, False))
| WrappedSkipFunctionVariable |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_ecs.py | {
"start": 2783,
"end": 4242
} | class ____(EcsBaseTestCase):
@pytest.mark.parametrize("aws_conn_id", [None, NOTSET, "aws_test_conn"])
@pytest.mark.parametrize("region_name", [None, NOTSET, "ca-central-1"])
def test_initialise_operator(self, aws_conn_id, region_name):
"""Test sensor initialize."""
op_kw = {"aws_conn_id": aws_conn_id, "region_name": region_name}
op_kw = {k: v for k, v in op_kw.items() if v is not NOTSET}
op = EcsBaseSensor(task_id="test_ecs_base", **op_kw)
assert op.aws_conn_id == (aws_conn_id if aws_conn_id is not NOTSET else "aws_default")
assert op.region_name == (region_name if region_name is not NOTSET else None)
@pytest.mark.parametrize("aws_conn_id", [None, NOTSET, "aws_test_conn"])
@pytest.mark.parametrize("region_name", [None, NOTSET, "ca-central-1"])
def test_hook_and_client(self, aws_conn_id, region_name):
"""Test initialize ``EcsHook`` and ``boto3.client``."""
op_kw = {"aws_conn_id": aws_conn_id, "region_name": region_name}
op_kw = {k: v for k, v in op_kw.items() if v is not NOTSET}
op = EcsBaseSensor(task_id="test_ecs_base_hook_client", **op_kw)
hook = op.hook
assert op.hook is hook, "'hook' property should be cached."
assert isinstance(op.hook, EcsHook)
client = op.client
assert op.client is client, "'client' property should be cached."
assert client is self.fake_client
| TestEcsBaseSensor |
python | pytorch__pytorch | torch/_dynamo/trace_rules.py | {
"start": 127316,
"end": 144877
} | class ____:
"""
Track a set of `id()`s of objects which are either allowed or not
allowed to go into the generated FX graph. Use to test for torch.*,
numpy.*, builtins.*, etc.
Support user modification to permit customization of what can be
added to the graph and what will cause a graph break.
"""
function_ids: Optional[set[int]] = None
function_names: Optional[dict[int, str]] = None
def __init__(
self, lazy_initializer: Callable[[], Union[dict[int, str], set[int]]]
) -> None:
self.lazy_initializer = lazy_initializer
def __call__(self) -> set[int]:
if self.function_ids is None:
value = self.lazy_initializer()
if isinstance(value, dict):
self.function_ids = set(value.keys())
self.function_names = value
else:
assert isinstance(value, set)
self.function_ids = value
return self.function_ids
def get_name(self, idx: int, default: str) -> str:
self() # lazy init
assert self.function_names is not None
return self.function_names.get(idx, default)
def add(self, idx: int) -> None:
function_ids = self() # lazy init
function_ids.add(idx)
def remove(self, idx: int) -> None:
function_ids = self()
if idx in function_ids:
function_ids.remove(idx)
def __contains__(self, idx: int) -> bool:
return idx in self()
@FunctionIdSet
def _allowed_callable_ids() -> dict[int, str]:
rv: dict[int, str] = {}
return rv
@FunctionIdSet
def _disallowed_callable_ids() -> dict[int, str]:
rv: dict[int, str] = {}
return rv
@FunctionIdSet
def _nonstrict_trace_callable_ids() -> dict[int, str]:
rv: dict[int, str] = {}
return rv
@FunctionIdSet
def _builtin_function_ids() -> dict[int, str]:
# See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and callable(v)
}
rv.update(
{
id(v): f"operator.{k}"
for k, v in operator.__dict__.items()
if not k.startswith("_") and callable(v)
}
)
rv.update(
{
id(cast): "typing.cast",
id(copy.deepcopy): "copy.deepcopy",
}
)
return rv
@FunctionIdSet
def _polyfilled_function_ids() -> set[int]:
# See also @torch._dynamo.decorators.substitute_in_graph(...), which adds items in _polyfilled_function_ids
return set()
@FunctionIdSet
def _numpy_function_ids() -> dict[int, str]:
unsupported_funcs = {
"seed",
"ranf",
"get_bit_generator",
"RandomState",
"set_bit_generator",
"sample",
}
def is_supported(k: str, v: Any, mod: Any) -> bool:
if not callable(v):
return False
if not getattr(v, "__module__", None):
return True
if v.__module__ == mod.__name__:
return True
if (
v.__module__ == "numpy.random.mtrand"
and mod.__name__ == "numpy.random"
and k not in unsupported_funcs
):
return True
return False
rv = {}
for mod in NP_SUPPORTED_MODULES:
for k, v in mod.__dict__.items():
if is_supported(k, v, mod):
rv[id(v)] = f"{mod.__name__}.{k}"
return rv
@FunctionIdSet
def _builtin_constant_ids() -> dict[int, str]:
"""
Collects constant builtins by eliminating callable items.
"""
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and not callable(v)
}
return rv
_lazy_module_init: dict[str, list[Callable[[], None]]] = defaultdict(list)
def add_module_init_func(name: str, init_func: Callable[[], None]) -> None:
"""Register a module without eagerly importing it"""
# If the module is already imported, eagerly run init
assert "." not in name, f"Expected a root module name, but got {name}"
assert name not in _lazy_module_init
_lazy_module_init[name].append(init_func)
def _maybe_init_lazy_module(obj: object) -> None:
module = getattr(obj, "__module__", None)
if module is None:
return
base_module = module.split(".")[0]
init_funcs = _lazy_module_init.pop(base_module, None)
if init_funcs is not None:
for fn in init_funcs:
fn()
def is_callable_allowed(obj: Any) -> bool:
_maybe_init_lazy_module(obj)
return id(obj) in _allowed_callable_ids
def is_nonstrict_trace_callable(obj: Any) -> bool:
_maybe_init_lazy_module(obj)
return id(obj) in _nonstrict_trace_callable_ids
def is_callable_disallowed(obj: Any) -> bool:
_maybe_init_lazy_module(obj)
return id(obj) in _disallowed_callable_ids
def is_forbidden(obj: Any) -> bool:
_maybe_init_lazy_module(obj)
return inspect.getattr_static(obj, "_dynamo_forbidden", False)
def is_builtin_callable(obj: Any) -> bool:
# See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids
return id(obj) in _builtin_function_ids
def is_builtin_constant(obj: Any) -> bool:
return id(obj) in _builtin_constant_ids
def is_polyfilled_callable(obj: Any) -> bool:
# See also @torch._dynamo.decorators.substitute_in_graph(...), which adds items in _polyfilled_function_ids
return id(obj) in _polyfilled_function_ids
def is_numpy(obj: Any) -> bool:
if np is None:
return False
return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids
def is_numpy_dtype(obj: Any) -> bool:
if np is None:
return False
return isinstance(obj, np.dtype)
def is_numpy_type_info(obj: Any) -> bool:
if np is None:
return False
return isinstance(obj, (np.finfo, np.iinfo))
BUILTIN_SKIPLIST = (
abc,
copy,
random,
traceback,
linecache,
)
# third party libraries skiplist is defined by str, because users may not use these libraries.
# we should use lazy import & skip in the future.
THIRDPARTY_SKIPLIST = (
"fx2trt_oss",
"hypothesis",
"networkx",
"numpy",
"onnx",
"onnxruntime",
"onnx_tf",
"pandas",
"sklearn",
"tabulate",
"tensorflow",
"tensorrt",
"torch2trt",
"tqdm",
"tree",
"tvm",
"xarray",
)
def _as_posix_path(path: str) -> str:
posix_path = Path(os.path.normpath(path)).as_posix()
# os.path.normpath and pathlib.Path remove trailing slash, so we need to add it back
if path.endswith((os.path.sep, "/")):
posix_path += "/"
return posix_path
def _strip_init_py(s: str) -> str:
suffix = "__init__.py"
s = s.removesuffix(suffix)
return _as_posix_path(s)
def _module_dir(m: types.ModuleType) -> Optional[str]:
# Protect against a module not exporting __file__ - this can happen for
# frozen modules, for example.
file = getattr(m, "__file__", None)
return file and _strip_init_py(file)
# These are legacy workarounds, don't add new modules to this list.
# Please use the MOD_INLINELIST instead to force inline functions under particular modules.
#
# NB: The only thing that is different about MOD_INLINELIST and LEGACY_MOD_INLINELIST
# is the behavior of a function f2 in the module when called by a function f1
# in a module in MOD_SKIPLIST (see MOD_SKIPLIST for more details)
#
# LEGACY_MOD_INLINELIST is the same thing as Dynamo's behavior on a module that
# is not in any *_INLINELIST or *_SKIPLIST.
# That being said, we prefer people to add things to MOD_INLINELIST over
# LEGACY_MOD_INLINELIST because it is less likely to break existing tests.
LEGACY_MOD_INLINELIST = {
"torch._dynamo.external_utils",
"torch._export.db.examples",
"torch._export.wrappers",
"torch._functorch.apis",
"torch._functorch.deprecated",
"torch.nn.attention.flex_attention",
"torch.ao.quantization.stubs",
"torch.ao.quantization.pt2e.export_utils",
"torch.ao.quantization.pt2e.qat_utils",
"torch.ao.quantization.pt2e.representation.rewrite",
"torch.ao.quantization.pt2e.utils",
"torch.ao.quantization.quantizer.xnnpack_quantizer",
"torch.export.unflatten",
}
if torch.distributed.is_available():
LEGACY_MOD_INLINELIST |= {
"torch.distributed.tensor._api",
"torch.distributed.tensor.device_mesh",
"torch.distributed.device_mesh",
"torch.distributed.algorithms._checkpoint.checkpoint_wrapper",
"torch.distributed.tensor.parallel._data_parallel_utils",
"torch.distributed.tensor.parallel._utils",
"torch.distributed.tensor.parallel.style",
# we have to add replicate to LEGACY_MOD_INLINELIST to ensure
# the forward_hook won't be ignored.
"torch.distributed._composable.replicate",
}
if not config.skip_fsdp_hooks:
LEGACY_MOD_INLINELIST.add("torch.distributed.fsdp._fully_shard")
# Force inline functions under these modules, even they are in *_SKIPLIST.
# We are using python module name instead of file or directory object to avoid circular dependency.
# Please keep this sorted alphabetically.
#
# Btw, it is not "ideal" for something to be in MOD_INLINELIST. If Dynamo
# fully supports a module, then the ideal case is that it is not in
# any *_INLINELIST or *_SKIPLIST: then, the behavior of Dynamo is that
# it will always inline into functions in the module.
MOD_INLINELIST = [
"torch._decomp",
"torch._dynamo._trace_wrapped_higher_order_op",
"torch._dynamo.compiled_autograd",
"torch._dynamo.comptime",
"torch._dynamo.polyfills",
"torch._dynamo.test_case",
"torch._export.non_strict_utils",
"torch._functorch._aot_autograd.subclass_parametrization",
"torch._functorch.autograd_function",
"torch._functorch.eager_transforms",
"torch._functorch.functional_call",
"torch._functorch.pyfunctorch",
"torch._functorch.vmap",
"torch._inductor.test_operators",
"torch._library.autograd",
"torch._library.custom_ops",
"torch._ops",
"torch._prims",
"torch._refs",
"torch._tensor",
"torch.amp.autocast_mode",
"torch.ao.nn",
"torch.autograd.function",
"torch.backends.cuda",
"torch.cuda.amp.autocast_mode",
"torch.distributions",
"torch.export._tree_utils",
"torch.export._unlift",
"torch.export._wrapper_utils",
"torch.fx._pytree",
"torch.fx._symbolic_trace",
"torch.fx.experimental.proxy_tensor",
"torch.fx.passes.shape_prop",
"torch.fx.traceback",
"torch.nn",
"torch.overrides",
"torch.random",
"torch.return_types",
"torch.sparse",
"torch.testing",
"torch.utils._content_store",
"torch.utils._contextlib",
"torch.utils._cxx_pytree",
"torch.utils._device",
"torch.utils._foreach_utils",
"torch.utils._python_dispatch",
"torch.utils._pytree",
"torch.utils.hooks",
]
assert sorted(set(MOD_INLINELIST)) == MOD_INLINELIST
MOD_INLINELIST = set(MOD_INLINELIST)
if torch.distributed.is_available():
MOD_INLINELIST.add("torch.distributed")
if not config.skip_fsdp_hooks:
MOD_INLINELIST.add("torch.distributed.fsdp._fully_shard")
# By default, all functions under these modules are skipped.
# All the other knobs
# (torch_name_rule_map, MOD_INLINELIST, LEGACY_MOD_INLINELIST)
# take precedence over this list; e.g. if a function is in
# MOD_INLINELIST and MOD_SKIPLIST, then it will be inlined.
# See "A note on skip/inline rules" for more details.
#
# The skip is NOT recursive. If a function f1 in a module in MOD_SKIPLIST
# calls out to another function f2 in some other module, then Dynamo's
# behavior (skip/inline) depends on what we've marked f2 as:
# - if f2 is a function in a module in MOD_SKIPLIST, then we skip f2
# - if f2 is a function in a module in MOD_INLINELIST, then we skip f2
# - if f2 is a function in a module in LEGACY_MOD_INLINELIST, then we inline f2
# - if f2 is a function in a module not in any *_LIST, then we inline f2
MOD_SKIPLIST = [
"torch._VF",
"torch.__future__",
"torch.__init__",
"torch._awaits",
"torch._classes",
"torch._compile",
"torch._custom_op",
"torch._custom_ops",
"torch._decomp",
"torch._dispatch",
"torch._dynamo",
"torch._export",
"torch._functorch",
"torch._guards",
"torch._higher_order_ops.effects",
"torch._higher_order_ops.torchbind",
"torch._higher_order_ops.wrap",
"torch._inductor",
"torch._jit_internal",
"torch._lazy",
"torch._library",
"torch._linalg_utils",
"torch._lobpcg",
"torch._logging",
"torch._lowrank",
"torch._meta_registrations",
"torch._namedtensor_internals",
"torch._numpy",
"torch._ops",
"torch._prims",
"torch._prims_common",
"torch._python_dispatcher",
"torch._refs",
"torch._strobelight",
"torch._subclasses",
"torch._tensor",
"torch._tensor_str",
"torch._thread_safe_fork",
"torch._utils",
"torch._utils_internal",
"torch._vmap_internals",
"torch._weights_only_unpickler",
"torch.accelerator",
"torch.amp",
"torch.ao",
"torch.autograd",
"torch.backends",
"torch.compiler",
"torch.contrib",
"torch.cpu",
"torch.cuda",
"torch.distributed",
"torch.distributions",
"torch.export",
"torch.fb",
"torch.fft",
"torch.functional",
"torch.futures",
"torch.fx",
"torch.hub",
"torch.jit",
"torch.library",
"torch.linalg",
"torch.masked",
"torch.monitor",
"torch.mps",
"torch.mtia",
"torch.multiprocessing",
"torch.nested",
"torch.nn",
"torch.onnx",
"torch.overrides",
"torch.package",
"torch.profiler",
"torch.quantization",
"torch.quasirandom",
"torch.random",
"torch.serialization",
"torch.signal",
"torch.sparse",
"torch.special",
"torch.storage",
"torch.testing",
"torch.types",
"torch.utils",
"torch.xpu",
]
assert sorted(set(MOD_SKIPLIST)) == MOD_SKIPLIST
MOD_SKIPLIST = set(MOD_SKIPLIST)
@functools.cache
def get_legacy_mod_inlinelist() -> set[str]:
torch_dir = _module_dir(torch)
if torch_dir is None:
return set()
inlinelist = {
_as_posix_path(torch_dir + m[len("torch.") :].replace(".", "/"))
for m in LEGACY_MOD_INLINELIST
}
return inlinelist
@functools.cache
def get_mod_inlinelist() -> set[str]:
torch_dir = _module_dir(torch)
if torch_dir is None:
return set()
inlinelist = {
_as_posix_path(torch_dir + m[len("torch.") :].replace(".", "/"))
for m in MOD_INLINELIST
}
return inlinelist
@functools.cache
def get_mod_skiplist() -> set[str]:
torch_dir = _module_dir(torch)
if torch_dir is None:
return set()
skiplist = {
_as_posix_path(torch_dir + m[len("torch.") :].replace(".", "/"))
for m in MOD_SKIPLIST
}
return skiplist
# skip some standard python builtin libs
SKIP_DIRS = [
"<frozen importlib",
"<frozen abc",
"<__array_function__ internals>",
_as_posix_path(_config_module.__file__),
"triton/backends",
]
SKIP_DIRS.extend(map(_as_posix_path, filter(None, map(_module_dir, BUILTIN_SKIPLIST))))
SKIP_DIRS_RE = re.compile(r"match nothing^")
# Skip fbcode paths(including torch.package paths) containing
# one of the following strings.
FBCODE_SKIP_DIRS: set[str] = set()
FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})")
# Remove this after fbcode is fully migrated to tracing through torchrec.
FBCODE_SKIP_TORCHREC_DIRS = {
"torchrec/distributed",
"torchrec/fb/distributed",
"caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py",
}
FBCODE_SKIP_TORCHREC_DIRS_RE = re.compile(
f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_SKIP_TORCHREC_DIRS)})"
)
# TODO(yanboliang, anijain2305) - There are a few concerns that we should
# resolve
# 1) Audit if torchrec/distributed is even required in FBCODE_SKIPS_DIR
# 2) To inline just one file but skip others in a directory, we could use
# manual_torch_name_rule_map but this one is hard because FBCODE can add unusual
# names like torch_package.
# So, this is a stop gap solution till then.
FBCODE_INLINE_FILES_IN_SKIPPED_DIRS = {
"torchrec/distributed/types.py",
}
FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE = re.compile(
f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_INLINE_FILES_IN_SKIPPED_DIRS)})"
)
# torch.optim is a special case,
# we usually want to inline it, but the directory
# structure does not match the module structure
# and we want to skip the functions in optim/lr_scheduler.py
# this has precedence over all other rules in check_file
FORCE_SKIP_FILES = {f"{_module_dir(torch)}optim/lr_scheduler.py"}
def _recompile_re() -> None:
global SKIP_DIRS_RE
SKIP_DIRS_RE = re.compile(
rf"^[^\s<]*({'|'.join(re.escape(_as_posix_path(d)) for d in SKIP_DIRS)})"
)
def add(import_name: str) -> None:
if isinstance(import_name, types.ModuleType):
return add(import_name.__name__)
assert isinstance(import_name, str)
from importlib.util import find_spec
module_spec = find_spec(import_name)
if not module_spec:
return
origin = module_spec.origin
if origin is None:
return
SKIP_DIRS.append(_strip_init_py(origin))
_recompile_re()
@dataclasses.dataclass
| FunctionIdSet |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 4085,
"end": 4343
} | class ____(HasOptions, Generic[R]):
def __init__(self, function: Callable[[], R]) -> None:
pass
def remote(
self,
) -> "ObjectRef[R]":
...
def bind(
self,
) -> "DAGNode[R]":
...
| RemoteFunctionNoArgs |
python | getsentry__sentry | src/sentry/backup/sanitize.py | {
"start": 1102,
"end": 1203
} | class ____(Exception):
"""
A catch-all class for sanitization errors.
"""
| SanitizationError |
python | ray-project__ray | python/ray/cloudpickle/cloudpickle.py | {
"start": 19053,
"end": 39896
} | class ____:
"""Sentinel for empty closures."""
@classmethod
def __reduce__(cls):
return cls.__name__
def _make_function(code, globals, name, argdefs, closure):
# Setting __builtins__ in globals is needed for nogil CPython.
globals["__builtins__"] = __builtins__
return types.FunctionType(code, globals, name, argdefs, closure)
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError("this route should not be executed")
return (lambda: cell).__closure__[0]
def _make_cell(value=_empty_cell_value):
cell = _make_empty_cell()
if value is not _empty_cell_value:
cell.cell_contents = value
return cell
def _make_skeleton_class(
type_constructor, name, bases, type_kwargs, class_tracker_id, extra
):
"""Build dynamic class with an empty __dict__ to be filled once memoized
If class_tracker_id is not None, try to lookup an existing class definition
matching that id. If none is found, track a newly reconstructed class
definition under that id so that other instances stemming from the same
class id will also reuse this class definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
skeleton_class = types.new_class(
name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
)
return _lookup_class_or_track(class_tracker_id, skeleton_class)
def _make_skeleton_enum(
bases, name, qualname, members, module, class_tracker_id, extra
):
"""Build dynamic enum with an empty __dict__ to be filled once memoized
The creation of the enum class is inspired by the code of
EnumMeta._create_.
If class_tracker_id is not None, try to lookup an existing enum definition
matching that id. If none is found, track a newly reconstructed enum
definition under that id so that other instances stemming from the same
class id will also reuse this enum definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
# enums always inherit from their base Enum class at the last position in
# the list of base classes:
enum_base = bases[-1]
metacls = enum_base.__class__
classdict = metacls.__prepare__(name, bases)
for member_name, member_value in members.items():
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, name, bases, classdict)
enum_class.__module__ = module
enum_class.__qualname__ = qualname
return _lookup_class_or_track(class_tracker_id, enum_class)
def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
tv = typing.TypeVar(
name,
*constraints,
bound=bound,
covariant=covariant,
contravariant=contravariant,
)
return _lookup_class_or_track(class_tracker_id, tv)
def _decompose_typevar(obj):
return (
obj.__name__,
obj.__bound__,
obj.__constraints__,
obj.__covariant__,
obj.__contravariant__,
_get_or_create_tracker_id(obj),
)
def _typevar_reduce(obj):
# TypeVar instances require the module information hence why we
# are not using the _should_pickle_by_reference directly
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
if module_and_name is None:
return (_make_typevar, _decompose_typevar(obj))
elif _is_registered_pickle_by_value(module_and_name[0]):
return (_make_typevar, _decompose_typevar(obj))
return (getattr, module_and_name)
def _get_bases(typ):
if "__orig_bases__" in getattr(typ, "__dict__", {}):
# For generic types (see PEP 560)
# Note that simply checking `hasattr(typ, '__orig_bases__')` is not
# correct. Subclasses of a fully-parameterized generic class does not
# have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
# will return True because it's defined in the base class.
bases_attr = "__orig_bases__"
else:
# For regular class objects
bases_attr = "__bases__"
return getattr(typ, bases_attr)
def _make_dict_keys(obj, is_ordered=False):
if is_ordered:
return OrderedDict.fromkeys(obj).keys()
else:
return dict.fromkeys(obj).keys()
def _make_dict_values(obj, is_ordered=False):
if is_ordered:
return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
else:
return {i: _ for i, _ in enumerate(obj)}.values()
def _make_dict_items(obj, is_ordered=False):
if is_ordered:
return OrderedDict(obj).items()
else:
return obj.items()
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__module__" in obj.__dict__:
type_kwargs["__module__"] = obj.__module__
__dict__ = obj.__dict__.get("__dict__", None)
if isinstance(__dict__, property):
type_kwargs["__dict__"] = __dict__
return (
type(obj),
obj.__name__,
_get_bases(obj),
type_kwargs,
_get_or_create_tracker_id(obj),
None,
)
def _enum_getnewargs(obj):
members = {e.name: e.value for e in obj}
return (
obj.__bases__,
obj.__name__,
obj.__qualname__,
members,
obj.__module__,
_get_or_create_tracker_id(obj),
None,
)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
if func.__closure__ is not None:
closure_values = list(map(_get_cell_contents, func.__closure__))
else:
closure_values = ()
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values)
)
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop("__weakref__", None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, don't pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop("_abc_cache", None)
clsdict.pop("_abc_negative_cache", None)
clsdict.pop("_abc_negative_cache_version", None)
registry = clsdict.pop("_abc_registry", None)
if registry is None:
# The abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop("_abc_impl", None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop("__dict__", None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = {e.name: e.value for e in obj}
# Cleanup the clsdict that will be passed to _make_skeleton_enum:
# Those attributes are already handled by the metaclass.
for attrname in [
"_generate_next_value_",
"_member_names_",
"_member_map_",
"_member_type_",
"_value2member_map_",
]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""code object reducer."""
# If you are not sure about the order of arguments, take a look at help
# of the specific type from types, for example:
# >>> from types import CodeType
# >>> help(CodeType)
if hasattr(obj, "co_exceptiontable"):
# Python 3.11 and later: there are some new attributes
# related to the enhanced exceptions.
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
obj.co_filename,
obj.co_name,
obj.co_qualname,
obj.co_firstlineno,
obj.co_linetable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_linetable"):
# Python 3.10 and later: obj.co_lnotab is deprecated and constructor
# expects obj.co_linetable instead.
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
obj.co_filename,
obj.co_name,
obj.co_firstlineno,
obj.co_linetable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_nmeta"): # pragma: no cover
# "nogil" Python: modified attributes from 3.9
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_framesize,
obj.co_ndefaultargs,
obj.co_nmeta,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_varnames,
obj.co_filename,
obj.co_name,
obj.co_firstlineno,
obj.co_lnotab,
obj.co_exc_handlers,
obj.co_jump_table,
obj.co_freevars,
obj.co_cellvars,
obj.co_free2reg,
obj.co_cell2reg,
)
else:
# Backward compat for 3.8 and 3.9
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
obj.co_filename,
obj.co_name,
obj.co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer."""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents,)
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file."""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s" % obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except OSError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _should_pickle_by_reference(obj):
return subimport, (obj.__name__,)
else:
# Some external libraries can populate the "__builtins__" entry of a
# module's `__dict__` with unpicklable objects (see #316). For that
# reason, we do not attempt to pickle the "__builtins__" entry, and
# restore a default value for it at unpickling time.
state = obj.__dict__.copy()
state.pop("__builtins__", None)
return dynamic_subimport, (obj.__name__, state)
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""Save a class that can't be referenced as a module attribute.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from importable modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum,
_enum_getnewargs(obj),
_enum_getstate(obj),
None,
None,
_class_setstate,
)
else:
return (
_make_skeleton_class,
_class_getnewargs(obj),
_class_getstate(obj),
None,
None,
_class_setstate,
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj."""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _should_pickle_by_reference(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj),)
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj),)
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj),)
def _odict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), True)
def _odict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), True)
def _odict_items_reduce(obj):
return _make_dict_items, (dict(obj), True)
def _dataclass_field_base_reduce(obj):
return _get_dataclass_field_type_sentinel, (obj.name,)
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
obj.__closure__[i].cell_contents = value
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
# COLLECTION OF DATACLASS UTILITIES
# ---------------------------------
# There are some internal sentinel values whose identity must be preserved when
# unpickling dataclass fields. Each sentinel value has a unique name that we can
# use to retrieve its identity at unpickling time.
_DATACLASSE_FIELD_TYPE_SENTINELS = {
dataclasses._FIELD.name: dataclasses._FIELD,
dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
}
def _get_dataclass_field_type_sentinel(name):
return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
| _empty_cell_value |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dependency.py | {
"start": 23871,
"end": 31936
} | class ____(_DependencyProcessor):
def __init__(self, prop):
_DependencyProcessor.__init__(self, prop)
for mapper in self.mapper.self_and_descendants:
mapper._dependency_processors.append(_DetectKeySwitch(prop))
def per_property_dependencies(
self,
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
parent_post_updates = unitofwork._PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
parent_pre_updates = unitofwork._PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
(parent_pre_updates, parent_deletes),
]
)
else:
uow.dependencies.update(
[
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes),
]
)
def per_state_dependencies(
self,
uow,
save_parent,
delete_parent,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork._PostUpdateAll(
uow, self.parent.primary_base_mapper, False
)
if childisdelete:
uow.dependencies.update(
[
(after_save, parent_post_updates),
(parent_post_updates, child_action),
]
)
else:
uow.dependencies.update(
[
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates),
]
)
else:
parent_pre_updates = unitofwork._PostUpdateAll(
uow, self.parent.primary_base_mapper, True
)
uow.dependencies.update(
[
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action),
]
)
elif not isdelete:
if not childisdelete:
uow.dependencies.update(
[(child_action, after_save), (after_save, save_parent)]
)
else:
uow.dependencies.update([(after_save, save_parent)])
else:
if childisdelete:
uow.dependencies.update([(delete_parent, child_action)])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child,
isdelete=True,
operation="delete",
prop=self.prop,
)
t = self.mapper.cascade_iterator("delete", child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if (
self.post_update
and not self.cascade.delete_orphan
and not self.passive_deletes == "all"
):
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state, self.key, self._passive_delete_flag
)
if history:
self._post_update(
state, uowcommit, history.sum(), is_m2o_delete=True
)
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state, self.key, attributes.PASSIVE_NO_INITIALIZE
)
if history:
if history.added:
for child in history.added:
self._synchronize(
state, child, None, False, uowcommit, "add"
)
elif history.deleted:
self._synchronize(
state, None, None, True, uowcommit, "delete"
)
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(
self,
state,
child,
associationrow,
clearkeys,
uowcommit,
operation=None,
):
if state is None or (
not self.post_update and uowcommit.is_deleted(state)
):
return
if (
operation is not None
and child is not None
and not uowcommit.session._contains_state(child)
):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed"
% (mapperutil.state_class_str(child), operation, self.prop)
)
return
if clearkeys or child is None:
sync._clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync._populate(
child,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False,
)
| _ManyToOneDP |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 774,
"end": 951
} | class ____(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = None
id = factory.Sequence(lambda n: n)
| NoSessionFactory |
python | PrefectHQ__prefect | src/prefect/server/exceptions.py | {
"start": 296,
"end": 407
} | class ____(PrefectException):
"""An error raised while orchestrating a state transition"""
| OrchestrationError |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/environments.py | {
"start": 12165,
"end": 17163
} | class ____(BuildCommand):
"""
Create a docker container and run a command inside the container.
Build command to execute in docker container
"""
bash_escape_re = re.compile(
r"([\s\!\"\#\$\&\'\(\)\*\:\;\<\>\?\@\[\\\]\^\`\{\|\}\~])" # noqa
)
def __init__(self, *args, escape_command=True, **kwargs):
"""
Override default to extend behavior.
:param escape_command: whether escape special chars the command before
executing it in the container. This should only be disabled on
trusted or internal commands.
:type escape_command: bool
"""
self.escape_command = escape_command
super().__init__(*args, **kwargs)
def run(self):
"""Execute command in existing Docker container."""
log.info(
"Running build command in container.",
container_id=self.build_env.container_id,
command=self.get_command(),
cwd=self.cwd,
)
self.start_time = datetime.utcnow()
client = self.build_env.get_client()
try:
exec_cmd = client.exec_create(
container=self.build_env.container_id,
cmd=self.get_wrapped_command(),
environment=self._environment,
user=self.user,
workdir=self.cwd,
stdout=True,
stderr=True,
)
out = client.exec_start(exec_id=exec_cmd["Id"], stream=False, demux=self.demux)
cmd_stdout = ""
cmd_stderr = ""
if self.demux:
cmd_stdout, cmd_stderr = out
else:
cmd_stdout = out
self.output = self.decode_output(cmd_stdout)
self.error = self.decode_output(cmd_stderr)
cmd_ret = client.exec_inspect(exec_id=exec_cmd["Id"])
self.exit_code = cmd_ret["ExitCode"]
# Docker will exit with a special exit code to signify the command
# was killed due to memory usage. We try to make the error code
# nicer here. However, sometimes the kernel kills the command and
# Docker does not use the specific exit code, so we check if the
# word `Killed` is in the last 15 lines of the command's output.
#
# NOTE: the work `Killed` could appear in the output because the
# command was killed by OOM or timeout so we put a generic message here.
killed_in_output = "Killed" in "\n".join(
self.output.splitlines()[-15:],
)
if self.exit_code == DOCKER_OOM_EXIT_CODE or (self.exit_code == 1 and killed_in_output):
self.output += str(
_(
"\n\nCommand killed due to timeout or excessive memory consumption\n",
),
)
except DockerAPIError:
self.exit_code = -1
if self.output is None or not self.output:
self.output = _("Command exited abnormally")
finally:
self.end_time = datetime.utcnow()
def get_wrapped_command(self):
"""
Wrap command in a shell and optionally escape special bash characters.
In order to set the current working path inside a docker container, we
need to wrap the command in a shell call manually.
Some characters will be interpreted as shell characters without
escaping, such as: ``pip install requests<0.8``. When passing
``escape_command=True`` in the init method this escapes a good majority
of those characters.
"""
prefix = ""
if self.bin_path:
bin_path = self._escape_command(self.bin_path)
prefix += f"PATH={bin_path}:$PATH "
command = " ".join(
self._escape_command(part) if self.escape_command else part for part in self.command
)
if prefix:
# Using `;` or `\n` to separate the `prefix` where we define the
# variables with the `command` itself, have the same effect.
# However, using `;` is more explicit.
# See https://github.com/readthedocs/readthedocs.org/pull/10334
return f"/bin/sh -c '{prefix}; {command}'"
return f"/bin/sh -c '{command}'"
def _escape_command(self, cmd):
r"""Escape the command by prefixing suspicious chars with `\`."""
command = self.bash_escape_re.sub(r"\\\1", cmd)
# HACK: avoid escaping variables that we need to use in the commands
not_escape_variables = (
"READTHEDOCS_OUTPUT",
"READTHEDOCS_VIRTUALENV_PATH",
"READTHEDOCS_GIT_CLONE_TOKEN",
"CONDA_ENVS_PATH",
"CONDA_DEFAULT_ENV",
)
for variable in not_escape_variables:
command = command.replace(f"\\${variable}", f"${variable}")
return command
| DockerBuildCommand |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | {
"start": 7200,
"end": 8832
} | class ____(BaseSphinx):
sphinx_builder = "singlehtml"
relative_output_dir = "htmlzip"
def _post_build(self):
"""Internal post build to create the ZIP file from the HTML output."""
target_file = os.path.join(
self.absolute_container_output_dir,
# TODO: shouldn't this name include the name of the version as well?
# It seems we were using the project's slug previously.
# So, keeping it like that for now until we decide make that adjustment.
f"{self.project.slug}.zip",
)
# **SECURITY CRITICAL: Advisory GHSA-hqwg-gjqw-h5wg**
# Move the directory into a temporal directory,
# so we can rename the directory for zip to use
# that prefix when zipping the files (arcname).
mktemp = self.run("mktemp", "--directory", record=False)
tmp_dir = Path(mktemp.output.strip())
dirname = f"{self.project.slug}-{self.version.slug}"
self.run(
"mv",
self.absolute_container_output_dir,
str(tmp_dir / dirname),
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"--parents",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"zip",
"--recurse-paths", # Include all files and directories.
"--symlinks", # Don't resolve symlinks.
target_file,
dirname,
cwd=str(tmp_dir),
record=False,
)
| LocalMediaBuilder |
python | doocs__leetcode | solution/0100-0199/0145.Binary Tree Postorder Traversal/Solution3.py | {
"start": 192,
"end": 842
} | class ____:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans = []
while root:
if root.right is None:
ans.append(root.val)
root = root.left
else:
next = root.right
while next.left and next.left != root:
next = next.left
if next.left != root:
ans.append(root.val)
next.left = root
root = root.right
else:
next.left = None
root = root.left
return ans[::-1]
| Solution |
python | tox-dev__tox | src/tox/config/cli/ini.py | {
"start": 466,
"end": 3172
} | class ____:
TOX_CONFIG_FILE_ENV_VAR = "TOX_USER_CONFIG_FILE"
STATE: ClassVar[dict[bool | None, str]] = {None: "failed to parse", True: "active", False: "missing"}
def __init__(self) -> None:
config_file = os.environ.get(self.TOX_CONFIG_FILE_ENV_VAR, None)
self.is_env_var = config_file is not None
self.config_file = Path(config_file if config_file is not None else DEFAULT_CONFIG_FILE)
self._cache: dict[tuple[str, type[Any]], Any] = {}
self.has_config_file: bool | None = self.config_file.exists()
self.ini: IniLoader | None = None
if self.has_config_file:
self.config_file = self.config_file.absolute()
try:
parser = ConfigParser(interpolation=None)
with self.config_file.open() as file_handler:
parser.read_file(file_handler)
self.has_tox_section = parser.has_section(CORE.key)
if self.has_tox_section:
self.ini = IniLoader(CORE, parser, overrides=[], core_section=CORE)
except Exception as exception: # noqa: BLE001
logging.error("failed to read config file %s because %r", self.config_file, exception) # noqa: TRY400
self.has_config_file = None
def get(self, key: str, of_type: type[Any]) -> Any:
cache_key = key, of_type
if cache_key in self._cache:
result = self._cache[cache_key]
else:
try:
if self.ini is None: # pragma: no cover # this can only happen if we don't call __bool__ firsts
result = None
else:
source = "file"
args = ConfigLoadArgs(chain=[key], name=CORE.prefix, env_name=None)
value = self.ini.load(key, of_type=of_type, conf=None, factory=None, args=args)
result = value, source
except KeyError: # just not found
result = None
except Exception as exception: # noqa: BLE001
logging.warning("%s key %s as type %r failed with %r", self.config_file, key, of_type, exception)
result = None
self._cache[cache_key] = result
return result
def __bool__(self) -> bool:
return bool(self.has_config_file) and bool(self.has_tox_section)
@property
def epilog(self) -> str:
# text to show within the parsers epilog
return (
f"{os.linesep}config file {str(self.config_file)!r} {self.STATE[self.has_config_file]} "
f"(change{'d' if self.is_env_var else ''} via env var {self.TOX_CONFIG_FILE_ENV_VAR})"
)
| IniConfig |
python | pandas-dev__pandas | pandas/core/dtypes/base.py | {
"start": 890,
"end": 13654
} | class ____:
"""
A custom data type, to be paired with an ExtensionArray.
This enables support for third-party and custom dtypes within the
pandas ecosystem. By implementing this interface and pairing it with a custom
`ExtensionArray`, users can create rich data types that integrate cleanly
with pandas operations, such as grouping, joining, or aggregation.
See Also
--------
extensions.register_extension_dtype: Register an ExtensionType
with pandas as class decorator.
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
* construct_array_type
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
Examples
--------
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values:
>>> import pyarrow
>>> from pandas.api.extensions import ExtensionArray
>>> class ExtensionDtype:
... def __from_arrow__(
... self, array: pyarrow.Array | pyarrow.ChunkedArray
... ) -> ExtensionArray: ...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: object) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the attributes
in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
# different nan objects have different hashes
# we need to avoid that and thus use hash function with old behavior
return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> type_t[Any]:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> list[str] | None:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
def construct_array_type(self) -> type_t[ExtensionArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise AbstractMethodError(self)
def empty(self, shape: Shape) -> ExtensionArray:
"""
Construct an ExtensionArray of this dtype with the given shape.
Analogous to numpy.empty.
Parameters
----------
shape : int or tuple[int]
Returns
-------
ExtensionArray
"""
cls = self.construct_array_type()
return cls._empty(shape, dtype=self)
@classmethod
def construct_from_string(cls, string: str) -> Self:
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[h]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> import re
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
@property
def _can_hold_na(self) -> bool:
"""
Can arrays of this dtype hold NA values?
"""
return True
@property
def _is_immutable(self) -> bool:
"""
Can arrays with this dtype be modified with __setitem__? If not, return
True.
Immutable arrays are expected to raise TypeError on __setitem__ calls.
"""
return False
@cache_readonly
def index_class(self) -> type_t[Index]:
"""
The Index subclass to return from Index.__new__ when this dtype is
encountered.
"""
from pandas import Index
return Index
@property
def _supports_2d(self) -> bool:
"""
Do ExtensionArrays with this dtype support 2D arrays?
Historically ExtensionArrays were limited to 1D. By returning True here,
authors can indicate that their arrays support 2D instances. This can
improve performance in some cases, particularly operations with `axis=1`.
Arrays that support 2D values should:
- implement Array.reshape
- subclass the Dim2CompatTests in tests.extension.base
- _concat_same_type should support `axis` keyword
- _reduce and reductions should support `axis` keyword
"""
return False
@property
def _can_fast_transpose(self) -> bool:
"""
Is transposing an array with this dtype zero-copy?
Only relevant for cases where _supports_2d is True.
"""
return False
| ExtensionDtype |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py | {
"start": 257,
"end": 3359
} | class ____(BasePydanticReader):
"""
Read documents from a Solr index.
These documents can then be used in a downstream Llama Index data structure.
"""
endpoint: str = Field(description="Full endpoint, including collection info.")
_client: Any = PrivateAttr()
def __init__(
self,
endpoint: str,
):
"""Initialize with parameters."""
super().__init__(endpoint=endpoint)
self._client = pysolr.Solr(endpoint)
def load_data(
self,
query: dict[str, Any],
field: str,
id_field: str = "id",
metadata_fields: Optional[list[str]] = None,
embedding: Optional[str] = None,
) -> list[Document]:
r"""
Read data from the Solr index. At least one field argument must be specified.
Args:
query (dict): The Solr query parameters.
- "q" is required.
- "rows" should be specified or will default to 10 by Solr.
- If "fl" is provided, it is respected exactly as given.
If "fl" is NOT provided, a default `fl` is constructed from
{id_field, field, embedding?, metadata_fields?}.
field (str): Field in Solr to retrieve as document text.
id_field (str): Field in Solr to retrieve as the document identifier. Defaults to "id".
metadata_fields (list[str], optional): Fields to include as metadata. Defaults to None.
embedding (str, optional): Field to use for embeddings. Defaults to None.
Raises:
ValueError: If the HTTP call to Solr fails.
Returns:
list[Document]: A list of retrieved documents where field is populated.
"""
if "q" not in query:
raise ValueError("Query parameters must include a 'q' field for the query.")
fl_default = {}
if "fl" not in query:
fields = [id_field, field]
if embedding:
fields.append(embedding)
if metadata_fields:
fields.extend(metadata_fields)
fl_default = {"fl": ",".join(fields)}
try:
query_params = {
**query,
**fl_default,
}
results = self._client.search(**query_params)
except Exception as e: # pragma: no cover
raise ValueError(f"Failed to query Solr endpoint: {e!s}") from e
documents: list[Document] = []
for doc in results.docs:
if field not in doc:
continue
doc_kwargs: dict[str, Any] = {
"id_": str(doc[id_field]),
"text": doc[field],
**({"embedding": doc.get(embedding)} if embedding else {}),
"metadata": {
metadata_field: doc[metadata_field]
for metadata_field in (metadata_fields or [])
if metadata_field in doc
},
}
documents.append(Document(**doc_kwargs))
return documents
| SolrReader |
python | wandb__wandb | tools/bench/_timing.py | {
"start": 137,
"end": 1364
} | class ____:
function_name: str
runtime_seconds: float
def timeit(
timings: List[FunctionTiming],
):
"""Timing decorator.
Args:
timings: list of FunctionTiming to append for each function call
"""
def timing_func(func):
def wrapper(*args, **kwargs):
t1 = time.time()
result = func(*args, **kwargs)
t2 = time.time()
timings.append(FunctionTiming(func.__name__, (t2 - t1)))
return result
return wrapper
return timing_func
def write(
fname: str,
timings: List[FunctionTiming],
prefix_list: Optional[List] = None,
):
"""Appends timing data to the file.
Args:
fname: The name of the timing data output file which is appended
timings: The timings data to append to the file, one row per timing.
This list is cleared at the end.
prefix_list: An optional prefix for each timing line written.
"""
prefix_list = prefix_list or []
with open(fname, "a") as csvfile:
writer = csv.writer(csvfile)
for item in timings:
writer.writerow(prefix_list + [item.function_name, item.runtime_seconds])
timings.clear()
| FunctionTiming |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 16404,
"end": 18148
} | class ____(Structure):
_fields_ = (
("segname", p_str16),
("vmaddr", p_uint64),
("vmsize", p_uint64),
("fileoff", p_uint64),
("filesize", p_uint64),
("maxprot", vm_prot_t),
("initprot", vm_prot_t),
("nsects", p_uint32), # read the section structures ?
("flags", p_uint32),
)
def describe(self):
s = {}
s["segname"] = self.segname.rstrip("\x00")
s["vmaddr"] = int(self.vmaddr)
s["vmsize"] = int(self.vmsize)
s["fileoff"] = int(self.fileoff)
s["filesize"] = int(self.filesize)
s["initprot"] = self.get_initial_virtual_memory_protections()
s["initprot_raw"] = int(self.initprot)
s["maxprot"] = self.get_max_virtual_memory_protections()
s["maxprot_raw"] = int(self.maxprot)
s["nsects"] = int(self.nsects)
s["flags"] = self.flags
return s
def get_initial_virtual_memory_protections(self):
vm = []
if self.initprot == 0:
vm.append("VM_PROT_NONE")
if self.initprot & 1:
vm.append("VM_PROT_READ")
if self.initprot & 2:
vm.append("VM_PROT_WRITE")
if self.initprot & 4:
vm.append("VM_PROT_EXECUTE")
return vm
def get_max_virtual_memory_protections(self):
vm = []
if self.maxprot == 0:
vm.append("VM_PROT_NONE")
if self.maxprot & 1:
vm.append("VM_PROT_READ")
if self.maxprot & 2:
vm.append("VM_PROT_WRITE")
if self.maxprot & 4:
vm.append("VM_PROT_EXECUTE")
return vm
SG_HIGHVM = 0x1
SG_FVMLIB = 0x2
SG_NORELOC = 0x4
SG_PROTECTED_VERSION_1 = 0x8
| segment_command_64 |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-vertexai-search/llama_index/retrievers/vertexai_search/base.py | {
"start": 900,
"end": 15227
} | class ____(BaseRetriever):
"""
`Vertex AI Search` retrieval.
For a detailed explanation of the Vertex AI Search concepts
and configuration parameters, refer to the product documentation.
https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction
Args:
project_id: str
#Google Cloud Project ID
data_store_id: str
#Vertex AI Search data store ID.
location_id: str = "global"
#Vertex AI Search data store location.
serving_config_id: str = "default_config"
#Vertex AI Search serving config ID
credentials: Any = None
The default custom credentials (google.auth.credentials.Credentials) to use
when making API calls. If not provided, credentials will be ascertained from
the environment
engine_data_type: int = 0
Defines the Vertex AI Search data type
0 - Unstructured data
1 - Structured data
2 - Website data
Example:
retriever = VertexAISearchRetriever(
project_id=PROJECT_ID,
data_store_id=DATA_STORE_ID,
location_id=LOCATION_ID,
engine_data_type=0
)
"""
"""
The following parameter explanation can be found here:
https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#contentsearchspec
"""
filter: Optional[str] = None
"""Filter expression."""
get_extractive_answers: bool = False
"""If True return Extractive Answers, otherwise return Extractive Segments or Snippets."""
max_documents: int = 5
"""The maximum number of documents to return."""
max_extractive_answer_count: int = 1
"""The maximum number of extractive answers returned in each search result.
At most 5 answers will be returned for each SearchResult.
"""
max_extractive_segment_count: int = 1
"""The maximum number of extractive segments returned in each search result.
Currently one segment will be returned for each SearchResult.
"""
query_expansion_condition: int = 1
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified query expansion condition. In this case, server behavior defaults
to disabled
1 - Disabled query expansion. Only the exact search query is used, even if
SearchResponse.total_size is zero.
2 - Automatic query expansion built by the Search API.
"""
spell_correction_mode: int = 1
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified spell correction mode. In this case, server behavior defaults
to auto.
1 - Suggestion only. Search API will try to find a spell suggestion if there is any
and put in the `SearchResponse.corrected_query`.
The spell suggestion will not be used as the search query.
2 - Automatic spell correction built by the Search API.
Search will be based on the corrected query if found.
"""
boost_spec: Optional[Dict[Any, Any]] = None
"""BoostSpec for boosting search results. A protobuf should be provided.
https://cloud.google.com/generative-ai-app-builder/docs/boost-search-results
https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1beta/BoostSpec
"""
return_extractive_segment_score: bool = True
"""
Specifies whether to return the confidence score from the extractive segments in each search result.
This feature is available only for new or allowlisted data stores.
"""
_client: SearchServiceClient
_serving_config: str
def __init__(
self,
project_id: str,
data_store_id: str,
location_id: str = "global",
serving_config_id: str = "default_config",
credentials: Any = None,
engine_data_type: int = 0,
max_documents: int = 5,
user_agent: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Initializes private fields."""
self.project_id = project_id
self.location_id = location_id
self.data_store_id = data_store_id
self.serving_config_id = serving_config_id
self.engine_data_type = engine_data_type
self.credentials = credentials
self.max_documents = max_documents
self._user_agent = user_agent or "llama-index/0.0.0"
self.client_options = ClientOptions(
api_endpoint=(
f"{self.location_id}-discoveryengine.googleapis.com"
if self.location_id != "global"
else None
)
)
try:
from google.cloud.discoveryengine_v1beta import SearchServiceClient
except ImportError as exc:
raise ImportError(
"Could not import google-cloud-discoveryengine python package. "
"Please, install vertexaisearch dependency group: "
) from exc
try:
super().__init__(**kwargs)
except ValueError as e:
print(f"Error initializing GoogleVertexAISearchRetriever: {e!s}")
raise
# For more information, refer to:
# https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store
self._client = SearchServiceClient(
credentials=self.credentials,
client_options=self.client_options,
client_info=get_client_info(module="vertex-ai-search"),
)
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.data_store_id,
serving_config=self.serving_config_id,
)
def _get_content_spec_kwargs(self) -> Optional[Dict[str, Any]]:
"""Prepares a ContentSpec object."""
from google.cloud.discoveryengine_v1beta import SearchRequest
if self.engine_data_type == 0:
if self.get_extractive_answers:
extractive_content_spec = SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
return_extractive_segment_score=self.return_extractive_segment_score,
)
else:
extractive_content_spec = SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_segment_count=self.max_extractive_segment_count,
return_extractive_segment_score=self.return_extractive_segment_score,
)
content_search_spec = {"extractive_content_spec": extractive_content_spec}
elif self.engine_data_type == 1:
content_search_spec = None
elif self.engine_data_type == 2:
content_search_spec = {
"extractive_content_spec": SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_segment_count=self.max_extractive_segment_count,
max_extractive_answer_count=self.max_extractive_answer_count,
return_extractive_segment_score=self.return_extractive_segment_score,
),
"snippet_spec": SearchRequest.ContentSearchSpec.SnippetSpec(
return_snippet=True
),
}
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"or 2 (Website) are supported currently."
+ f" Got {self.engine_data_type}"
)
return content_search_spec
def _create_search_request(self, query: str) -> SearchRequest:
"""Prepares a SearchRequest object."""
from google.cloud.discoveryengine_v1beta import SearchRequest
query_expansion_spec = SearchRequest.QueryExpansionSpec(
condition=self.query_expansion_condition,
)
spell_correction_spec = SearchRequest.SpellCorrectionSpec(
mode=self.spell_correction_mode
)
content_search_spec_kwargs = self._get_content_spec_kwargs()
if content_search_spec_kwargs is not None:
content_search_spec = SearchRequest.ContentSearchSpec(
**content_search_spec_kwargs
)
else:
content_search_spec = None
return SearchRequest(
query=query,
filter=self.filter,
serving_config=self._serving_config,
page_size=self.max_documents,
content_search_spec=content_search_spec,
query_expansion_spec=query_expansion_spec,
spell_correction_spec=spell_correction_spec,
boost_spec=SearchRequest.BoostSpec(**self.boost_spec)
if self.boost_spec
else None,
)
def _convert_structured_datastore_response(
self, results: Sequence[SearchResult]
) -> List[NodeWithScore]:
"""Converts a sequence of search results to a list of Llamaindex note_with_score."""
note_with_score: List[NodeWithScore] = []
for i, result in enumerate(results):
# Structured datastore does not have relevance score. The results are ranked
# in order. score is calculated by below. Index 0 has the highest score
score = (len(results) - i) / len(results)
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
note_with_score.append(
NodeWithScore(
node=TextNode(
text=json.dumps(document_dict.get("struct_data", {}))
),
score=score,
)
)
return note_with_score
def _convert_unstructured_datastore_response(
self, results: Sequence[SearchResult], chunk_type: str
) -> List[NodeWithScore]:
"""Converts a sequence of search results to a list of LLamaindex note_with_score."""
note_with_score: List[NodeWithScore] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
score = chunk.get("relevanceScore", 0)
note_with_score.append(
NodeWithScore(
node=TextNode(text=chunk.get("content", "")),
score=score,
)
)
return note_with_score
def _convert_website_datastore_response(
self, results: Sequence[SearchResult], chunk_type: str
) -> List[NodeWithScore]:
"""Converts a sequence of search results to a list of LLamaindex note_with_score."""
note_with_score: List[NodeWithScore] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
if chunk_type not in derived_struct_data:
continue
text_field = "snippet" if chunk_type == "snippets" else "content"
for chunk in derived_struct_data[chunk_type]:
score = chunk.get("relevanceScore", 0)
note_with_score.append(
NodeWithScore(
node=TextNode(text=chunk.get(text_field, "")),
score=score,
)
)
if not note_with_score:
print(f"No {chunk_type} could be found.")
if chunk_type == "extractive_answers":
print(
"Make sure that your data store is using Advanced Website "
"Indexing.\n"
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing"
)
return note_with_score
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve from the platform."""
"""Get note_with_score relevant for a query."""
search_request = self._create_search_request(query_bundle.query_str)
try:
response = self._client.search(search_request)
except InvalidArgument as exc:
raise type(exc)(
exc.message
+ " This might be due to engine_data_type not set correctly."
)
if self.engine_data_type == 0:
chunk_type = (
"extractive_answers"
if self.get_extractive_answers
else "extractive_segments"
)
note_with_score = self._convert_unstructured_datastore_response(
response.results, chunk_type
)
elif self.engine_data_type == 1:
note_with_score = self._convert_structured_datastore_response(
response.results
)
elif self.engine_data_type == 2:
chunk_type = (
"extractive_answers"
if self.get_extractive_answers
else "extractive_segments"
)
note_with_score = self._convert_website_datastore_response(
response.results, chunk_type
)
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"or 2 (Website) are supported currently."
+ f" Got {self.engine_data_type}"
)
return note_with_score
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Asynchronously retrieve from the platform."""
return self._retrieve(query_bundle=query_bundle)
| VertexAISearchRetriever |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_sampled_from.py | {
"start": 2472,
"end": 2640
} | class ____(enum.Flag):
a = enum.auto()
b = enum.auto()
c = enum.auto()
LargeFlag = enum.Flag("LargeFlag", {f"bit{i}": enum.auto() for i in range(64)})
| AFlag |
python | pytorch__pytorch | test/ao/sparsity/test_structured_sparsifier.py | {
"start": 1084,
"end": 1494
} | class ____(BaseStructuredSparsifier):
def update_mask(self, module, tensor_name, **kwargs):
"""Prunes 1/3 of the weight output channels, so resulting module has 33.3% pruning"""
num_rows = len(module.parametrizations[tensor_name][0].mask)
prune = random.sample(list(range(num_rows)), num_rows // 3)
module.parametrizations[tensor_name][0].mask[prune] = False
| ImplementedPruner |
python | pandas-dev__pandas | asv_bench/benchmarks/inference.py | {
"start": 2677,
"end": 3691
} | class ____:
def setup(self):
self.ts_sec = Series(range(1521080307, 1521685107), dtype="int64")
self.ts_sec_uint = Series(range(1521080307, 1521685107), dtype="uint64")
self.ts_sec_float = self.ts_sec.astype("float64")
self.ts_nanosec = 1_000_000 * self.ts_sec
self.ts_nanosec_uint = 1_000_000 * self.ts_sec_uint
self.ts_nanosec_float = self.ts_nanosec.astype("float64")
# speed of int64, uint64 and float64 paths should be comparable
def time_nanosec_int64(self):
to_datetime(self.ts_nanosec, unit="ns")
def time_nanosec_uint64(self):
to_datetime(self.ts_nanosec_uint, unit="ns")
def time_nanosec_float64(self):
to_datetime(self.ts_nanosec_float, unit="ns")
def time_sec_uint64(self):
to_datetime(self.ts_sec_uint, unit="s")
def time_sec_int64(self):
to_datetime(self.ts_sec, unit="s")
def time_sec_float64(self):
to_datetime(self.ts_sec_float, unit="s")
| ToDatetimeFromIntsFloats |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 2335,
"end": 2387
} | class ____(ASTNode):
exc: ASTNode
@dataclass
| Raise |
python | huggingface__transformers | tests/models/dpt/test_image_processing_dpt.py | {
"start": 3537,
"end": 16254
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DPTImageProcessor if is_vision_available() else None
fast_image_processing_class = DPTImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = DPTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
self.assertTrue(hasattr(image_processing, "do_reduce_labels"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processing_class = image_processing_class(**self.image_processor_dict)
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_padding(self):
for image_processing_class in self.image_processor_list:
if image_processing_class == DPTImageProcessorFast:
image = torch.arange(0, 366777, 1, dtype=torch.uint8).reshape(3, 249, 491)
image_processor = image_processing_class(**self.image_processor_dict)
padded_image = image_processor.pad_image(image, size_divisor=4)
self.assertTrue(padded_image.shape[1] % 4 == 0)
self.assertTrue(padded_image.shape[2] % 4 == 0)
pixel_values = image_processor.preprocess(
image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt"
).pixel_values
self.assertTrue(pixel_values.shape[2] % 4 == 0)
self.assertTrue(pixel_values.shape[3] % 4 == 0)
else:
image_processor = image_processing_class(**self.image_processor_dict)
image = np.random.randn(3, 249, 491)
image = image_processor.pad_image(image, size_divisor=4)
self.assertTrue(image.shape[1] % 4 == 0)
self.assertTrue(image.shape[2] % 4 == 0)
pixel_values = image_processor.preprocess(
image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt"
).pixel_values
self.assertTrue(pixel_values.shape[2] % 4 == 0)
self.assertTrue(pixel_values.shape[3] % 4 == 0)
def test_keep_aspect_ratio(self):
size = {"height": 512, "width": 512}
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(size=size, keep_aspect_ratio=True, ensure_multiple_of=32)
image = np.zeros((489, 640, 3))
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672])
# Copied from transformers.tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_call_segmentation_maps
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processor(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processor(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processor(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_reduce_labels(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
image, map = prepare_semantic_single_inputs()
encoding = image_processor(image, map, return_tensors="pt")
labels_no_reduce = encoding["labels"].clone()
self.assertTrue(labels_no_reduce.min().item() >= 0)
self.assertTrue(labels_no_reduce.max().item() <= 150)
# Get the first non-zero label coords and value, for comparison when do_reduce_labels is True
non_zero_positions = (labels_no_reduce > 0).nonzero()
first_non_zero_coords = tuple(non_zero_positions[0].tolist())
first_non_zero_value = labels_no_reduce[first_non_zero_coords].item()
image_processor.do_reduce_labels = True
encoding = image_processor(image, map, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Compare with non-reduced label to see if it's reduced by 1
self.assertEqual(encoding["labels"][first_non_zero_coords].item(), first_non_zero_value - 1)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
self.assertTrue(torch.allclose(image_encoding_slow.labels, image_encoding_fast.labels, atol=1e-1))
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
| DPTImageProcessingTest |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 20445,
"end": 20841
} | class ____(PointEvent):
''' Announce the start of a pan event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'panstart'
| PanStart |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 17010,
"end": 17141
} | class ____(models.Model):
history = HistoricalRecords(inherit=True)
class Meta:
abstract = True
| TrackedAbstractBaseA |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/multi_asset_sensor_definition.py | {
"start": 2223,
"end": 4998
} | class ____(
NamedTuple(
"_MultiAssetSensorAssetCursorComponent",
[
("latest_consumed_event_partition", Optional[str]),
("latest_consumed_event_id", Optional[int]),
("trailing_unconsumed_partitioned_event_ids", dict[str, int]),
],
)
):
"""A cursor component that is used to track the cursor for a particular asset in a multi-asset
sensor.
Here's an illustration to help explain how this representation works:
partition_1 ---|----------a----
partition_2 -t-----|-x---------
partition_3 ----t------|---a---
The "|", "a", "t", and "x" characters represent materialization events.
The x-axis is storage_id, which is basically time. The cursor has been advanced to the "|" event
for each partition. latest_evaluated_event_partition would be "partition_3", and
"latest_evaluated_event_id" would be the storage_id of the "|" event for partition_3.
The "t" events aren't directly represented in the cursor, because they trail the event that the
the cursor for their partition has advanced to. The "a" events aren't directly represented
in the cursor, because they occurred after the "latest_evaluated_event_id". The "x" event is
included in "unevaluated_partitioned_event_ids", because it's after the event that the cursor
for its partition has advanced to, but trails "latest_evaluated_event_id".
Args:
latest_consumed_event_partition (Optional[str]): The partition of the latest consumed event
for this asset.
latest_consumed_event_id (Optional[int]): The event ID of the latest consumed event for
this asset.
trailing_unconsumed_partitioned_event_ids (Dict[str, int]): A mapping containing
the partition key mapped to the latest unconsumed materialization event for this
partition with an ID less than latest_consumed_event_id.
"""
def __new__(
cls,
latest_consumed_event_partition,
latest_consumed_event_id,
trailing_unconsumed_partitioned_event_ids,
):
return super().__new__(
cls,
latest_consumed_event_partition=check.opt_str_param(
latest_consumed_event_partition, "latest_consumed_event_partition"
),
latest_consumed_event_id=check.opt_int_param(
latest_consumed_event_id, "latest_consumed_event_id"
),
trailing_unconsumed_partitioned_event_ids=check.dict_param(
trailing_unconsumed_partitioned_event_ids,
"trailing_unconsumed_partitioned_event_ids",
key_type=str,
value_type=int,
),
)
| MultiAssetSensorAssetCursorComponent |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/confusion_matrix_test.py | {
"start": 8541,
"end": 18437
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testBothScalarShape(self):
label_values = 1.0
prediction_values = 0.0
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShape(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShapeExpectedRankDiff0(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=0))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=0))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabelsExpectedRankDiffPlus1(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3, 5))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=1))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictionsExpectedRankDiffMinus1(self):
label_values = np.ones(shape=(2, 3, 5))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=-1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=-1))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 2))
prediction_values = np.zeros(shape=(2, 3))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
_, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(labels_placeholder,
predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 2))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, _ = (
confusion_matrix.remove_squeezable_dimensions(labels_placeholder,
predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| RemoveSqueezableDimensionsTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec51.py | {
"start": 304,
"end": 1149
} | class ____:
@overload
def method1(
self,
cb: Callable[Concatenate[Self, P], None],
*args: P.args,
**kwargs: P.kwargs,
) -> None: ...
@overload
def method1(
self, cb: tuple[Callable[..., None], str], *args: Any, **kwargs: Any
) -> None: ...
def method1(self, cb, *args, **kwargs) -> None:
if isinstance(cb, tuple):
cb[0](self, *args, **kwargs)
else:
cb(self, *args, **kwargs)
def func1(fo: A, x: int) -> None: ...
def func2(fo: A, x: int, /, y: str) -> None: ...
def func3(fo: A, x: int, /, y: str, *, z: tuple[int, int]) -> None: ...
a = A()
a.method1(func1, 1)
a.method1(func2, 3, "f1")
a.method1(func3, 6, "f2", z=(0, 1))
a.method1((func1, "f1"), 1)
a.method1((func2, "f2"), 2, "a")
a.method1((func3, "f3"), 3, "b", z=(0, 1))
| A |
python | pytorch__pytorch | test/dynamo/test_sets.py | {
"start": 2161,
"end": 8451
} | class ____(LoggingTestCase):
def test_set_with_function(self):
s = {
torch._C._set_grad_enabled,
"hello",
torch.amp._exit_autocast,
}
cnts = CompileCounter()
@torch.compile(backend=cnts, fullgraph=True)
def fn(x, s):
if torch.amp._exit_autocast in s:
return x.sin()
return x.cos()
x = torch.randn(2)
y = fn(x, s)
self.assertEqual(y, x.sin())
self.assertEqual(cnts.frame_count, 1)
s.remove(torch.amp._exit_autocast)
s.add(torch._C._set_fwd_grad_enabled)
y = fn(x, s)
self.assertEqual(y, x.cos())
self.assertEqual(cnts.frame_count, 2)
@make_logging_test(recompiles=True)
def test_in_guard(self, records):
s = {
"Dynamo",
"Inductor",
"PyTorch",
torch.sin,
}
cnts = CompileCounter()
@torch.compile(backend=cnts, fullgraph=True)
def fn(x, s):
if "PyTorch" in s:
return x.sin()
return x.cos()
x = torch.randn(2)
y = fn(x, s)
self.assertEqual(y, x.sin())
self.assertEqual(cnts.frame_count, 1)
s.remove("PyTorch")
s.add("Cuda")
y = fn(x, s)
self.assertEqual(y, x.cos())
self.assertEqual(cnts.frame_count, 2)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "set.__contains__")
self.assertIn(
"""set.__contains__(s, 'PyTorch')""",
munge_exc(record.getMessage()),
)
def test_set_with_tensors(self):
s = {
torch.ones(1),
torch.tensor([1.0]),
torch.zeros(1),
}
cnts = CompileCounter()
@torch.compile(backend=cnts, fullgraph=True)
def fn(x, s):
z = torch.zeros(1)
for i in s:
z += i
return x + z
x = torch.tensor([1.0])
self.assertExpectedInlineMunged(
Unsupported,
lambda: fn(x, s),
"""\
Attempted to wrap a set with tensors
Explanation: Dynamo cannot trace sets of tensors. To get a stable ordering, Dynamo needs to convert the set into a list and the order might not be stable if the set contains tensors.
Hint: Use a dictionary where the keys are tensors.
Hint: It may be possible to write Dynamo tracing rules for this code. Please report an issue to PyTorch if you encounter this graph break often and it is causing performance issues.
Developer debug context: Python set containing torch.Tensor elements
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0222.html
from user code:
File "test_sets.py", line N, in fn
for i in s:""", # noqa: B950
)
def test_set_multiple_types(self):
s = {
"PyTorch",
3.3,
1j,
math.nan,
}
cnts = CompileCounter()
@torch.compile(backend=cnts, fullgraph=True)
def fn(x, s):
if "PyTorch" in s:
return x.sin()
return x.cos()
x = torch.tensor(1.0)
y = fn(x, s)
self.assertEqual(y, x.sin())
self.assertEqual(cnts.frame_count, 1)
s.remove("PyTorch")
y = fn(x, s)
self.assertEqual(y, x.cos())
self.assertEqual(cnts.frame_count, 2)
def test_set_recompile_on_key_pop(self):
s = {
torch._C._set_grad_enabled,
torch.amp._enter_autocast,
torch.amp._exit_autocast,
}
cnts = CompileCounter()
def fn(x, s):
if torch.amp._exit_autocast in s:
return x.sin()
return x.cos()
x = torch.randn(4)
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x, s)
opt_fn(x, s)
self.assertEqual(res, fn(x, s))
# No recompilation
self.assertEqual(cnts.frame_count, 1)
# Pop a value
s.remove(torch.amp._exit_autocast)
res = opt_fn(x, s)
# Check recompilation
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(res, fn(x, s))
def test_set_recompile_on_key_change(self):
s = {
torch._C._set_grad_enabled,
torch.amp._enter_autocast,
torch.amp._exit_autocast,
}
cnts = CompileCounter()
def fn(x, s):
if torch.amp._exit_autocast in s:
return x.sin()
return x.cos()
x = torch.randn(4)
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x, s)
opt_fn(x, s)
self.assertEqual(res, fn(x, s))
# No recompilation
self.assertEqual(cnts.frame_count, 1)
# Pop a value
s.remove(torch.amp._exit_autocast)
# Add a different value
s.add(torch._C._set_autograd_fallback_mode)
res = opt_fn(x, s)
# Check recompilation
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(res, fn(x, s))
@unittest.skip("random failures on Python 3.9")
def test_set_guard_on_keys_change(self):
# This test guarantee that we're not triggering any of the dict guards
# on sets
s = {
torch._C._set_grad_enabled,
torch.amp._enter_autocast,
torch.amp._exit_autocast,
}
cnts = CompileCounter()
def fn(x, s):
for e in s:
x = x * len(str(e))
return x
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
opt_fn(torch.randn(4), s)
opt_fn(torch.randn(4), s)
# No recompilation
self.assertEqual(cnts.frame_count, 1)
# pop and add the same item
s.remove(torch.amp._exit_autocast)
# It is not guaranteed that _exit_autocast will be in a specific order
s.add(torch.amp._exit_autocast)
x = torch.randn(4)
res = opt_fn(x, s)
# Check Dynamo don't recompile
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(res, fn(x, s))
| TestSetGuards |
python | davidhalter__parso | parso/python/tokenize.py | {
"start": 8830,
"end": 25795
} | class ____:
def __init__(self, quote):
self.quote = quote
self.parentheses_count = 0
self.previous_lines = ''
self.last_string_start_pos = None
# In the syntax there can be multiple format_spec's nested:
# {x:{y:3}}
self.format_spec_count = 0
def open_parentheses(self, character):
self.parentheses_count += 1
def close_parentheses(self, character):
self.parentheses_count -= 1
if self.parentheses_count == 0:
# No parentheses means that the format spec is also finished.
self.format_spec_count = 0
def allow_multiline(self):
return len(self.quote) == 3
def is_in_expr(self):
return self.parentheses_count > self.format_spec_count
def is_in_format_spec(self):
return not self.is_in_expr() and self.format_spec_count
def _close_fstring_if_necessary(fstring_stack, string, line_nr, column, additional_prefix):
for fstring_stack_index, node in enumerate(fstring_stack):
lstripped_string = string.lstrip()
len_lstrip = len(string) - len(lstripped_string)
if lstripped_string.startswith(node.quote):
token = PythonToken(
FSTRING_END,
node.quote,
(line_nr, column + len_lstrip),
prefix=additional_prefix+string[:len_lstrip],
)
additional_prefix = ''
assert not node.previous_lines
del fstring_stack[fstring_stack_index:]
return token, '', len(node.quote) + len_lstrip
return None, additional_prefix, 0
def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
tos = fstring_stack[-1]
allow_multiline = tos.allow_multiline()
if tos.is_in_format_spec():
if allow_multiline:
regex = fstring_format_spec_multi_line
else:
regex = fstring_format_spec_single_line
else:
if allow_multiline:
regex = fstring_string_multi_line
else:
regex = fstring_string_single_line
match = regex.match(line, pos)
if match is None:
return tos.previous_lines, pos
if not tos.previous_lines:
tos.last_string_start_pos = (lnum, pos)
string = match.group(0)
for fstring_stack_node in fstring_stack:
end_match = endpats[fstring_stack_node.quote].match(string)
if end_match is not None:
string = end_match.group(0)[:-len(fstring_stack_node.quote)]
new_pos = pos
new_pos += len(string)
# even if allow_multiline is False, we still need to check for trailing
# newlines, because a single-line f-string can contain line continuations
if string.endswith('\n') or string.endswith('\r'):
tos.previous_lines += string
string = ''
else:
string = tos.previous_lines + string
return string, new_pos
def tokenize(
code: str, *, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0)
) -> Iterator[PythonToken]:
"""Generate tokens from a the source code (string)."""
lines = split_lines(code, keepends=True)
return tokenize_lines(lines, version_info=version_info, start_pos=start_pos)
def _print_tokens(func):
"""
A small helper function to help debug the tokenize_lines function.
"""
def wrapper(*args, **kwargs):
for token in func(*args, **kwargs):
print(token) # This print is intentional for debugging!
yield token
return wrapper
# @_print_tokens
def tokenize_lines(
lines: Iterable[str],
*,
version_info: PythonVersionInfo,
indents: List[int] = None,
start_pos: Tuple[int, int] = (1, 0),
is_first_token=True,
) -> Iterator[PythonToken]:
"""
A heavily modified Python standard library tokenizer.
Additionally to the default information, yields also the prefix of each
token. This idea comes from lib2to3. The prefix contains all information
that is irrelevant for the parser like newlines in parentheses or comments.
"""
def dedent_if_necessary(start):
while start < indents[-1]:
if start > indents[-2]:
yield PythonToken(ERROR_DEDENT, '', (lnum, start), '')
indents[-1] = start
break
indents.pop()
yield PythonToken(DEDENT, '', spos, '')
pseudo_token, single_quoted, triple_quoted, endpats, whitespace, \
fstring_pattern_map, always_break_tokens, = \
_get_token_collection(version_info)
paren_level = 0 # count parentheses
if indents is None:
indents = [0]
max_ = 0
numchars = '0123456789'
contstr = ''
contline: str
contstr_start: Tuple[int, int]
endprog: Pattern
# We start with a newline. This makes indent at the first position
# possible. It's not valid Python, but still better than an INDENT in the
# second line (and not in the first). This makes quite a few things in
# Jedi's fast parser possible.
new_line = True
prefix = '' # Should never be required, but here for safety
additional_prefix = ''
lnum = start_pos[0] - 1
fstring_stack: List[FStringNode] = []
for line in lines: # loop over lines in stream
lnum += 1
pos = 0
max_ = len(line)
if is_first_token:
if line.startswith(BOM_UTF8_STRING):
additional_prefix = BOM_UTF8_STRING
line = line[1:]
max_ = len(line)
# Fake that the part before was already parsed.
line = '^' * start_pos[1] + line
pos = start_pos[1]
max_ += start_pos[1]
is_first_token = False
if contstr: # continued string
endmatch = endprog.match(line) # noqa: F821
if endmatch:
pos = endmatch.end(0)
yield PythonToken(
STRING, contstr + line[:pos],
contstr_start, prefix) # noqa: F821
contstr = ''
contline = ''
else:
contstr = contstr + line
contline = contline + line
continue
while pos < max_:
if fstring_stack:
tos = fstring_stack[-1]
if not tos.is_in_expr():
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
if string:
yield PythonToken(
FSTRING_STRING, string,
tos.last_string_start_pos,
# Never has a prefix because it can start anywhere and
# include whitespace.
prefix=''
)
tos.previous_lines = ''
continue
if pos == max_:
break
rest = line[pos:]
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
fstring_stack,
rest,
lnum,
pos,
additional_prefix,
)
pos += quote_length
if fstring_end_token is not None:
yield fstring_end_token
continue
# in an f-string, match until the end of the string
if fstring_stack:
string_line = line
for fstring_stack_node in fstring_stack:
quote = fstring_stack_node.quote
end_match = endpats[quote].match(line, pos)
if end_match is not None:
end_match_string = end_match.group(0)
if len(end_match_string) - len(quote) + pos < len(string_line):
string_line = line[:pos] + end_match_string[:-len(quote)]
pseudomatch = pseudo_token.match(string_line, pos)
else:
pseudomatch = pseudo_token.match(line, pos)
if pseudomatch:
prefix = additional_prefix + pseudomatch.group(1)
additional_prefix = ''
start, pos = pseudomatch.span(2)
spos = (lnum, start)
token = pseudomatch.group(2)
if token == '':
assert prefix
additional_prefix = prefix
# This means that we have a line with whitespace/comments at
# the end, which just results in an endmarker.
break
initial = token[0]
else:
match = whitespace.match(line, pos)
initial = line[match.end()]
start = match.end()
spos = (lnum, start)
if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None):
new_line = False
if paren_level == 0 and not fstring_stack:
indent_start = start
if indent_start > indents[-1]:
yield PythonToken(INDENT, '', spos, '')
indents.append(indent_start)
yield from dedent_if_necessary(indent_start)
if not pseudomatch: # scan for tokens
match = whitespace.match(line, pos)
if new_line and paren_level == 0 and not fstring_stack:
yield from dedent_if_necessary(match.end())
pos = match.end()
new_line = False
yield PythonToken(
ERRORTOKEN, line[pos], (lnum, pos),
additional_prefix + match.group(0)
)
additional_prefix = ''
pos += 1
continue
if (initial in numchars # ordinary number
or (initial == '.' and token != '.' and token != '...')):
yield PythonToken(NUMBER, token, spos, prefix)
elif pseudomatch.group(3) is not None: # ordinary name
if token in always_break_tokens and (fstring_stack or paren_level):
fstring_stack[:] = []
paren_level = 0
# We only want to dedent if the token is on a new line.
m = re.match(r'[ \f\t]*$', line[:start])
if m is not None:
yield from dedent_if_necessary(m.end())
if token.isidentifier():
yield PythonToken(NAME, token, spos, prefix)
else:
yield from _split_illegal_unicode_name(token, spos, prefix)
elif initial in '\r\n':
if any(not f.allow_multiline() for f in fstring_stack):
fstring_stack.clear()
if not new_line and paren_level == 0 and not fstring_stack:
yield PythonToken(NEWLINE, token, spos, prefix)
else:
additional_prefix = prefix + token
new_line = True
elif initial == '#': # Comments
assert not token.endswith("\n") and not token.endswith("\r")
if fstring_stack and fstring_stack[-1].is_in_expr():
# `#` is not allowed in f-string expressions
yield PythonToken(ERRORTOKEN, initial, spos, prefix)
pos = start + 1
else:
additional_prefix = prefix + token
elif token in triple_quoted:
endprog = endpats[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield PythonToken(STRING, token, spos, prefix)
else:
contstr_start = spos # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] in '\r\n': # continued string
# This means that a single quoted string ends with a
# backslash and is continued.
contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2]))
contstr = line[start:]
contline = line
break
else: # ordinary string
yield PythonToken(STRING, token, spos, prefix)
elif token in fstring_pattern_map: # The start of an fstring.
fstring_stack.append(FStringNode(fstring_pattern_map[token]))
yield PythonToken(FSTRING_START, token, spos, prefix)
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n', '\\\r'): # continued stmt
additional_prefix += prefix + line[start:]
break
else:
if token in '([{':
if fstring_stack:
fstring_stack[-1].open_parentheses(token)
else:
paren_level += 1
elif token in ')]}':
if fstring_stack:
fstring_stack[-1].close_parentheses(token)
else:
if paren_level:
paren_level -= 1
elif token.startswith(':') and fstring_stack \
and fstring_stack[-1].parentheses_count \
- fstring_stack[-1].format_spec_count == 1:
# `:` and `:=` both count
fstring_stack[-1].format_spec_count += 1
token = ':'
pos = start + 1
yield PythonToken(OP, token, spos, prefix)
if contstr:
yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix)
if contstr.endswith('\n') or contstr.endswith('\r'):
new_line = True
if fstring_stack:
tos = fstring_stack[-1]
if tos.previous_lines:
yield PythonToken(
FSTRING_STRING, tos.previous_lines,
tos.last_string_start_pos,
# Never has a prefix because it can start anywhere and
# include whitespace.
prefix=''
)
end_pos = lnum, max_
# As the last position we just take the maximally possible position. We
# remove -1 for the last new line.
for indent in indents[1:]:
indents.pop()
yield PythonToken(DEDENT, '', end_pos, '')
yield PythonToken(ENDMARKER, '', end_pos, additional_prefix)
def _split_illegal_unicode_name(token, start_pos, prefix):
def create_token():
return PythonToken(ERRORTOKEN if is_illegal else NAME, found, pos, prefix)
found = ''
is_illegal = False
pos = start_pos
for i, char in enumerate(token):
if is_illegal:
if char.isidentifier():
yield create_token()
found = char
is_illegal = False
prefix = ''
pos = start_pos[0], start_pos[1] + i
else:
found += char
else:
new_found = found + char
if new_found.isidentifier():
found = new_found
else:
if found:
yield create_token()
prefix = ''
pos = start_pos[0], start_pos[1] + i
found = char
is_illegal = True
if found:
yield create_token()
if __name__ == "__main__":
path = sys.argv[1]
with open(path) as f:
code = f.read()
for token in tokenize(code, version_info=parse_version_string('3.10')):
print(token)
| FStringNode |
python | pytorch__pytorch | test/distributed/test_store.py | {
"start": 37316,
"end": 38718
} | class ____(TestCase):
"""
This test shows how to use the legacy TCPStore (non-libuv) backend since libuv is now
the default backend.
"""
def tearDown(self):
super().tearDown()
os.environ.pop("USE_LIBUV", None)
os.environ.pop("MASTER_ADDR", None)
os.environ.pop("MASTER_PORT", None)
def test_with_url_param(self):
port = common.find_free_port()
dist.init_process_group(
"gloo",
rank=0,
world_size=1,
init_method=f"tcp://{DEFAULT_HOSTNAME}:{port}?use_libuv=0",
)
self._run_test()
def test_with_env_var(self):
port = common.find_free_port()
os.environ["USE_LIBUV"] = "0"
os.environ["MASTER_ADDR"] = DEFAULT_HOSTNAME
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group("gloo", rank=0, world_size=1, init_method="env://")
self._run_test()
def _run_test(self):
pg = dist.group.WORLD
store = c10d._get_process_group_store(pg)
self.assertTrue(isinstance(store, dist.PrefixStore))
# c10d does multiple levels of wrapping
while isinstance(store, dist.PrefixStore):
store = store.underlying_store
self.assertTrue(isinstance(store, dist.TCPStore))
self.assertFalse(store.libuvBackend)
dist.destroy_process_group()
| InitPgWithNonUvStore |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 258459,
"end": 259494
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
email: str,
password: str,
start_date: str,
logs_batch_size: Optional[int] = None,
):
"""Airbyte Source for My Hours.
Documentation can be found at https://docs.airbyte.com/integrations/sources/my-hours
Args:
name (str): The name of the destination.
email (str): Your My Hours username
password (str): The password associated to the username
start_date (str): Start date for collecting time logs
logs_batch_size (Optional[int]): Pagination size used for retrieving logs in days
"""
self.email = check.str_param(email, "email")
self.password = check.str_param(password, "password")
self.start_date = check.str_param(start_date, "start_date")
self.logs_batch_size = check.opt_int_param(logs_batch_size, "logs_batch_size")
super().__init__("My Hours", name)
| MyHoursSource |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace_item_attributes.py | {
"start": 36721,
"end": 67753
} | class ____(
OrganizationTraceItemAttributeValuesEndpointBaseTest, BaseSpansTestCase, SpanTestCase
):
feature_flags = {"organizations:visibility-explore-view": True}
item_type = SupportedTraceItemType.SPANS
def test_no_feature(self) -> None:
response = self.do_request(features={})
assert response.status_code == 404, response.content
def test_invalid_item_type(self) -> None:
response = self.do_request(query={"itemType": "invalid"})
assert response.status_code == 400, response.content
assert response.data == {
"itemType": [
ErrorDetail(string='"invalid" is not a valid choice.', code="invalid_choice")
],
}
def test_no_projects(self) -> None:
response = self.do_request()
assert response.status_code == 200, response.content
assert response.data == []
def test_tags_keys(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "bar", "baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="foo",
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
response = self.do_request(key="tag")
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": "tag",
"value": "bar",
"name": "bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "tag",
"value": "baz",
"name": "baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "tag",
"value": "foo",
"name": "foo",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_transaction_keys_autocomplete(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for transaction in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction=transaction,
duration=100,
exclusive_time=100,
is_eap=True,
)
key = "transaction"
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "foo",
"name": "foo",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_transaction_keys_autocomplete_substring(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for transaction in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction=transaction,
duration=100,
exclusive_time=100,
is_eap=True,
)
key = "transaction"
response = self.do_request(query={"substringMatch": "b"}, key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_transaction_keys_autocomplete_substring_with_asterisk(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for transaction in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction=transaction,
duration=100,
exclusive_time=100,
is_eap=True,
)
key = "transaction"
response = self.do_request(query={"substringMatch": r"\*b"}, key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_tags_keys_autocomplete(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="transaction",
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
key = "tag"
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "foo",
"name": "foo",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_tags_keys_autocomplete_substring(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="transaction",
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
key = "tag"
response = self.do_request(query={"substringMatch": "b"}, key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_tags_keys_autocomplete_substring_with_asterisks(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "*bar", "*baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="transaction",
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
key = "tag"
response = self.do_request(query={"substringMatch": r"\*b"}, key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "*bar",
"name": "*bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "*baz",
"name": "*baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_tags_keys_autocomplete_noop(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "bar", "baz"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction=tag,
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
for key in [
"span.duration",
"span.self_time",
"timestamp",
"id",
"span_id",
"parent_span",
"parent_span_id",
"trace",
"trace_id",
"transaction.id",
"transaction_id",
"segment.id",
"segment_id",
"profile.id",
"profile_id",
"replay.id",
"replay_id",
]:
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert response.data == [], key
def test_tags_keys_autocomplete_project(self) -> None:
base_id = 9223372036854775000
self.create_project(id=base_id + 100, name="foo")
self.create_project(id=base_id + 299, name="bar")
self.create_project(id=base_id + 399, name="baz")
for key in ["project", "project.name"]:
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert sorted(response.data, key=lambda v: v["value"]) == [
{
"count": mock.ANY,
"key": key,
"value": "bar",
"name": "bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "baz",
"name": "baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "foo",
"name": "foo",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
response = self.do_request(query={"substringMatch": "ba"}, key=key)
assert response.status_code == 200, response.data
assert sorted(response.data, key=lambda v: v["value"]) == [
{
"count": mock.ANY,
"key": key,
"value": "bar",
"name": "bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "baz",
"name": "baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
key = "project.id"
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert sorted(response.data, key=lambda v: v["value"]) == [
{
"count": mock.ANY,
"key": key,
"value": "9223372036854775100",
"name": "9223372036854775100",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "9223372036854775299",
"name": "9223372036854775299",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "9223372036854775399",
"name": "9223372036854775399",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
response = self.do_request(query={"substringMatch": "99"}, key=key)
assert response.status_code == 200, response.data
assert sorted(response.data, key=lambda v: v["value"]) == [
{
"count": mock.ANY,
"key": key,
"value": "9223372036854775299",
"name": "9223372036854775299",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "9223372036854775399",
"name": "9223372036854775399",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_tags_keys_autocomplete_span_status(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for status in ["ok", "internal_error", "invalid_argument"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="foo",
status=status,
is_eap=True,
)
response = self.do_request(key="span.status")
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": "span.status",
"value": "internal_error",
"name": "internal_error",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "span.status",
"value": "invalid_argument",
"name": "invalid_argument",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "span.status",
"value": "ok",
"name": "ok",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
response = self.do_request(query={"substringMatch": "in"}, key="span.status")
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": "span.status",
"value": "internal_error",
"name": "internal_error",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "span.status",
"value": "invalid_argument",
"name": "invalid_argument",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_measurements_autocomplete(self) -> None:
keys = [
"measurements.app_start_cold",
"measurements.app_start_warm",
"measurements.frames_frozen",
"measurements.frames_frozen_rate",
"measurements.frames_slow",
"measurements.frames_slow_rate",
"measurements.frames_total",
"measurements.time_to_initial_display",
"measurements.time_to_full_display",
"measurements.stall_count",
"measurements.stall_percentage",
"measurements.stall_stall_longest_time",
"measurements.stall_stall_total_time",
"measurements.cls",
"measurements.fcp",
"measurements.fid",
"measurements.fp",
"measurements.inp",
"measurements.lcp",
"measurements.ttfb",
"measurements.ttfb.requesttime",
"measurements.score.cls",
"measurements.score.fcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.lcp",
"measurements.score.ttfb",
"measurements.score.total",
"measurements.score.weight.cls",
"measurements.score.weight.fcp",
"measurements.score.weight.fid",
"measurements.score.weight.inp",
"measurements.score.weight.lcp",
"measurements.score.weight.ttfb",
"measurements.cache.item_size",
"measurements.messaging.message.body.size",
"measurements.messaging.message.receive.latency",
"measurements.messaging.message.retry.count",
"measurements.http.response_content_length",
]
self.project
for key in keys:
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert response.data == []
def test_boolean_autocomplete(self) -> None:
keys = ["is_transaction"]
self.project
for key in keys:
response = self.do_request(key=key)
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": key,
"value": "false",
"name": "false",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": key,
"value": "true",
"name": "true",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
@mock.patch(
"sentry.api.endpoints.organization_trace_item_attributes.TraceItemAttributeValuesAutocompletionExecutor.execute",
side_effect=InvalidSearchQuery,
)
def test_invalid_query(self, mock_executor_2: mock.MagicMock) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="foo",
duration=100,
exclusive_time=100,
tags={"tag": "foo"},
is_eap=True,
)
response = self.do_request(key="tag")
assert response.status_code == 400, response.data
@override_options({"explore.trace-items.values.max": 2})
def test_pagination(self) -> None:
timestamp = before_now(days=0, minutes=10).replace(microsecond=0)
for tag in ["foo", "bar", "baz", "qux"]:
self.store_segment(
self.project.id,
uuid4().hex,
uuid4().hex,
span_id=uuid4().hex[:16],
organization_id=self.organization.id,
parent_span_id=None,
timestamp=timestamp,
transaction="foo",
duration=100,
exclusive_time=100,
tags={"tag": tag},
is_eap=True,
)
response = self.do_request(key="tag")
assert response.status_code == 200, response.data
assert response.data == [
{
"count": mock.ANY,
"key": "tag",
"value": "bar",
"name": "bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "tag",
"value": "baz",
"name": "baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
links = {}
for url, attrs in parse_link_header(response["Link"]).items():
links[attrs["rel"]] = attrs
attrs["href"] = url
assert links["previous"]["results"] == "false"
assert links["next"]["results"] == "true"
assert links["next"]["href"] is not None
with self.feature(self.feature_flags):
response = self.client.get(links["next"]["href"], format="json")
assert response.status_code == 200, response.content
assert response.data == [
{
"count": mock.ANY,
"key": "tag",
"value": "foo",
"name": "foo",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "tag",
"value": "qux",
"name": "qux",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
links = {}
for url, attrs in parse_link_header(response["Link"]).items():
links[attrs["rel"]] = attrs
attrs["href"] = url
assert links["previous"]["results"] == "true"
assert links["next"]["results"] == "false"
assert links["previous"]["href"] is not None
with self.feature(self.feature_flags):
response = self.client.get(links["previous"]["href"], format="json")
assert response.status_code == 200, response.content
assert response.data == [
{
"count": mock.ANY,
"key": "tag",
"value": "bar",
"name": "bar",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
{
"count": mock.ANY,
"key": "tag",
"value": "baz",
"name": "baz",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
},
]
def test_autocomplete_release_semver_attributes(self) -> None:
release_1 = self.create_release(version="foo@1.2.3+121")
release_2 = self.create_release(version="qux@2.2.4+122")
self.store_spans(
[
self.create_span(
{"sentry_tags": {"release": release_1.version}},
start_ts=before_now(days=0, minutes=10),
),
self.create_span(
{"sentry_tags": {"release": release_2.version}},
start_ts=before_now(days=0, minutes=10),
),
],
is_eap=True,
)
response = self.do_request(key="release")
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release",
"value": release,
"name": release,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for release in ["foo@1.2.3+121", "qux@2.2.4+122"]
]
response = self.do_request(key="release", query={"substringMatch": "121"})
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release",
"value": "foo@1.2.3+121",
"name": "foo@1.2.3+121",
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
]
response = self.do_request(key="release.stage")
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.stage",
"value": stage,
"name": stage,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for stage in ["adopted", "low_adoption", "replaced"]
]
response = self.do_request(key="release.stage", query={"substringMatch": "adopt"})
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.stage",
"value": stage,
"name": stage,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for stage in ["adopted", "low_adoption"]
]
response = self.do_request(key="release.version")
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.version",
"value": version,
"name": version,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for version in ["1.2.3", "2.2.4"]
]
response = self.do_request(key="release.version", query={"substringMatch": "2"})
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.version",
"value": version,
"name": version,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for version in ["2.2.4"]
]
response = self.do_request(key="release.package")
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.package",
"value": version,
"name": version,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for version in ["foo", "qux"]
]
response = self.do_request(key="release.package", query={"substringMatch": "q"})
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.package",
"value": version,
"name": version,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for version in ["qux"]
]
response = self.do_request(key="release.build")
assert response.status_code == 200
assert response.data == [
{
"count": mock.ANY,
"key": "release.build",
"value": version,
"name": version,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for version in ["121", "122"]
]
def test_autocomplete_timestamp(self) -> None:
self.store_spans(
[self.create_span(start_ts=before_now(days=0, minutes=10))],
is_eap=True,
)
response = self.do_request(key="timestamp", query={"substringMatch": "20"})
assert response.status_code == 200
assert response.data == []
def test_autocomplete_device_class(self) -> None:
self.store_spans(
[
self.create_span({"sentry_tags": {"device.class": "3"}}),
self.create_span({"sentry_tags": {"device.class": "2"}}),
self.create_span({"sentry_tags": {"device.class": "1"}}),
self.create_span({"sentry_tags": {"device.class": ""}}),
self.create_span({}),
],
is_eap=True,
)
response = self.do_request(key="device.class")
assert response.data == [
{
"count": mock.ANY,
"key": "device.class",
"value": device_class,
"name": device_class,
"firstSeen": mock.ANY,
"lastSeen": mock.ANY,
}
for device_class in sorted(["low", "medium", "high", "Unknown"])
]
| OrganizationTraceItemAttributeValuesEndpointSpansTest |
python | pytorch__pytorch | torch/_inductor/codegen/simd_kernel_features.py | {
"start": 15312,
"end": 17818
} | class ____:
"""Memory usage stats for a block dimension in the generated kernel (different from user dimensions)"""
# the number of load/store ops
count_per_thread_contiguous: int = 0
count_per_thread_broadcast: int = 0
count_per_thread_non_contiguous: int = 0 # excludes broadcast
# total bytes in each load/store op for a single element
bytes_per_thread_contiguous: int = 0
bytes_per_thread_broadcast: int = 0
bytes_per_thread_non_contiguous: int = 0 # excludes broadcast
# total bytes read by entire kernel
bytes_contiguous_or_broadcast: sympy.Expr = sympy.S.Zero
bytes_non_contiguous: sympy.Expr = sympy.S.Zero
def __add__(self, other: typing.Self) -> StatsForDim:
return StatsForDim(
count_per_thread_contiguous=self.count_per_thread_contiguous
+ other.count_per_thread_contiguous,
count_per_thread_broadcast=self.count_per_thread_broadcast
+ other.count_per_thread_broadcast,
count_per_thread_non_contiguous=self.count_per_thread_non_contiguous
+ other.count_per_thread_non_contiguous,
bytes_per_thread_contiguous=self.bytes_per_thread_contiguous
+ other.bytes_per_thread_contiguous,
bytes_per_thread_broadcast=self.bytes_per_thread_broadcast
+ other.bytes_per_thread_broadcast,
bytes_per_thread_non_contiguous=self.bytes_per_thread_non_contiguous
+ other.bytes_per_thread_non_contiguous,
bytes_contiguous_or_broadcast=self.bytes_contiguous_or_broadcast
+ other.bytes_contiguous_or_broadcast,
bytes_non_contiguous=self.bytes_non_contiguous + other.bytes_non_contiguous,
)
@property
def count_per_thread(self) -> int:
return (
self.count_per_thread_contiguous
+ self.count_per_thread_broadcast
+ self.count_per_thread_non_contiguous
)
@property
def bytes_per_thread(self) -> int:
return (
self.bytes_per_thread_contiguous
+ self.bytes_per_thread_broadcast
+ self.bytes_per_thread_non_contiguous
)
@property
def bytes(self) -> sympy.Expr:
return self.bytes_contiguous_or_broadcast + self.bytes_non_contiguous
@property
def contiguous_score(self) -> float:
return 1.0 - self.count_per_thread_non_contiguous / max(
self.count_per_thread, 1
)
@dataclasses.dataclass
| StatsForDim |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 23562,
"end": 24159
} | class ____(TypedDict, closed=True, total=False): # type: ignore[call-arg]
# https://peps.python.org/pep-0728/
# Parameter {"param", "value", "empty"}
# Predicate {"test", "value"}
empty: Optional[bool]
param: Parameter | str
test: _TestPredicateType
value: Any
_Conditions: TypeAlias = list[_ConditionClosed]
"""
Chainable conditions produced by ``.when()`` and ``Then.when()``.
All must be a `Conditional Value`_.
.. _Conditional Value:
https://vega.github.io/vega-lite/docs/condition.html#value
"""
_C = TypeVar("_C", _Conditions, _Condition)
| _ConditionClosed |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 22234,
"end": 22439
} | class ____(AnyUrl):
"""A type that will accept any ws or wss URL.
* TLD not required
* Host not required
"""
_constraints = UrlConstraints(allowed_schemes=['ws', 'wss'])
| AnyWebsocketUrl |
python | python-markdown__markdown | markdown/extensions/footnotes.py | {
"start": 1050,
"end": 9175
} | class ____(Extension):
""" Footnote Extension. """
def __init__(self, **kwargs):
""" Setup configs. """
self.config = {
'PLACE_MARKER': [
'///Footnotes Go Here///', 'The text string that marks where the footnotes go'
],
'UNIQUE_IDS': [
False, 'Avoid name collisions across multiple calls to `reset()`.'
],
'BACKLINK_TEXT': [
'↩', "The text string that links from the footnote to the reader's place."
],
'SUPERSCRIPT_TEXT': [
'{}', "The text string that links from the reader's place to the footnote."
],
'BACKLINK_TITLE': [
'Jump back to footnote %d in the text',
'The text string used for the title HTML attribute of the backlink. '
'%d will be replaced by the footnote number.'
],
'SEPARATOR': [
':', 'Footnote separator.'
],
'USE_DEFINITION_ORDER': [
True,
'Order footnote labels by definition order (True) or by document order (False). '
'Default: True.'
]
}
""" Default configuration options. """
super().__init__(**kwargs)
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.found_refs: dict[str, int] = {}
self.used_refs: set[str] = set()
# Backward compatibility with old '%d' placeholder
self.setConfig('BACKLINK_TITLE', self.getConfig("BACKLINK_TITLE").replace("%d", "{}"))
self.reset()
def extendMarkdown(self, md):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
self.md = md
# Insert a `blockprocessor` before `ReferencePreprocessor`
md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17)
# Insert an inline pattern before `ImageReferencePattern`
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175)
# Insert a tree-processor that would actually add the footnote div
# This must be before all other tree-processors (i.e., `inline` and
# `codehilite`) so they can run on the the contents of the div.
md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50)
# Insert a tree-processor to reorder the footnotes if necessary. This must be after
# `inline` tree-processor so it can access the footnote reference order
# (`self.footnote_order`) that gets populated by the `FootnoteInlineProcessor`.
if not self.getConfig("USE_DEFINITION_ORDER"):
md.treeprocessors.register(FootnoteReorderingProcessor(self), 'footnote-reorder', 19)
# Insert a tree-processor that will run after inline is done.
# In this tree-processor we want to check our duplicate footnote tracker
# And add additional `backrefs` to the footnote pointing back to the
# duplicated references.
md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15)
# Insert a postprocessor after amp_substitute processor
md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25)
def reset(self) -> None:
""" Clear footnotes on reset, and prepare for distinct document. """
self.footnote_order: list[str] = []
self.footnotes: OrderedDict[str, str] = OrderedDict()
self.unique_prefix += 1
self.found_refs = {}
self.used_refs = set()
def unique_ref(self, reference: str, found: bool = False) -> str:
""" Get a unique reference if there are duplicates. """
if not found:
return reference
original_ref = reference
while reference in self.used_refs:
ref, rest = reference.split(self.get_separator(), 1)
m = RE_REF_ID.match(ref)
if m:
reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest)
else:
reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest)
self.used_refs.add(reference)
if original_ref in self.found_refs:
self.found_refs[original_ref] += 1
else:
self.found_refs[original_ref] = 1
return reference
def findFootnotesPlaceholder(
self, root: etree.Element
) -> tuple[etree.Element, etree.Element, bool] | None:
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
child_res = finder(child)
if child_res is not None:
return child_res
return None
res = finder(root)
return res
def setFootnote(self, id: str, text: str) -> None:
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def addFootnoteRef(self, id: str) -> None:
""" Store a footnote reference id in order of appearance. """
if id not in self.footnote_order:
self.footnote_order.append(id)
def get_separator(self) -> str:
""" Get the footnote separator. """
return self.getConfig("SEPARATOR")
def makeFootnoteId(self, id: str) -> str:
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
return 'fn{}{}'.format(self.get_separator(), id)
def makeFootnoteRefId(self, id: str, found: bool = False) -> str:
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found)
else:
return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found)
def makeFootnotesDiv(self, root: etree.Element) -> etree.Element | None:
""" Return `div` of footnotes as `etree` Element. """
if not list(self.footnotes.keys()):
return None
div = etree.Element("div")
div.set('class', 'footnote')
etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
surrogate_parent = etree.Element("div")
for index, id in enumerate(self.footnotes.keys(), start=1):
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
# Parse footnote with surrogate parent as `li` cannot be used.
# List block handlers have special logic to deal with `li`.
# When we are done parsing, we will copy everything over to `li`.
self.parser.parseChunk(surrogate_parent, self.footnotes[id])
for el in list(surrogate_parent):
li.append(el)
surrogate_parent.remove(el)
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
backlink.set("class", "footnote-backref")
backlink.set(
"title",
self.getConfig('BACKLINK_TITLE').format(index)
)
backlink.text = FN_BACKLINK_TEXT
if len(li):
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
| FootnoteExtension |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_registries.py | {
"start": 365,
"end": 517
} | class ____(GQLResult):
org_entity: Optional[FetchRegistriesOrganizationOrgEntity] = Field(
alias="orgEntity"
)
| FetchRegistriesOrganization |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/source_salesforce/exceptions.py | {
"start": 342,
"end": 483
} | class ____(SalesforceException):
"""
We use this exception for unknown input data types for Salesforce.
"""
| TypeSalesforceException |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 14949,
"end": 15099
} | class ____(models.Model):
manager = models.OneToOneField("Employee", null=True, on_delete=models.CASCADE)
history = HistoricalRecords()
| Employee |
python | arrow-py__arrow | arrow/locales.py | {
"start": 105378,
"end": 106933
} | class ____(Locale):
names = [
"sw",
"sw-ke",
"sw-tz",
]
past = "{0} iliyopita"
future = "muda wa {0}"
and_word = "na"
timeframes = {
"now": "sasa hivi",
"second": "sekunde",
"seconds": "sekunde {0}",
"minute": "dakika moja",
"minutes": "dakika {0}",
"hour": "saa moja",
"hours": "saa {0}",
"day": "siku moja",
"days": "siku {0}",
"week": "wiki moja",
"weeks": "wiki {0}",
"month": "mwezi moja",
"months": "miezi {0}",
"year": "mwaka moja",
"years": "miaka {0}",
}
meridians = {"am": "asu", "pm": "mch", "AM": "ASU", "PM": "MCH"}
month_names = [
"",
"Januari",
"Februari",
"Machi",
"Aprili",
"Mei",
"Juni",
"Julai",
"Agosti",
"Septemba",
"Oktoba",
"Novemba",
"Desemba",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mac",
"Apr",
"Mei",
"Jun",
"Jul",
"Ago",
"Sep",
"Okt",
"Nov",
"Des",
]
day_names = [
"",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi",
"Jumapili",
]
day_abbreviations = [
"",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi",
"Jumapili",
]
| SwahiliLocale |
python | pytorch__pytorch | test/ao/sparsity/test_data_scheduler.py | {
"start": 332,
"end": 836
} | class ____(BaseDataScheduler):
def __init__(self, sparsifier, sparsifier_hyperparam, last_epoch=-1, verbose=False):
super().__init__(sparsifier, sparsifier_hyperparam, last_epoch, verbose)
def get_schedule_param(self):
if self.last_epoch > 0:
return {
name: config["sparsity_level"] * 0.5
for name, config in self.data_sparsifier.data_groups.items()
}
else:
return self.base_param
| ImplementedDataScheduler |
python | pytorch__pytorch | tools/code_coverage/package/util/setting.py | {
"start": 1323,
"end": 1405
} | class ____(Enum):
FBCODE = "fbcode"
OSS = "oss"
# compiler type
| TestPlatform |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-faker/source_faker/purchase_generator.py | {
"start": 379,
"end": 4591
} | class ____:
def __init__(self, stream_name: str, seed: int) -> None:
self.stream_name = stream_name
self.seed = seed
def prepare(self):
"""
Note: the instances of the mimesis generators need to be global.
Yes, they *should* be able to be instance variables on this class, which should only instantiated once-per-worker, but that's not quite the case:
* relying only on prepare as a pool initializer fails because we are calling the parent process's method, not the fork
* Calling prepare() as part of generate() (perhaps checking if self.person is set) and then `print(self, current_process()._identity, current_process().pid)` reveals multiple object IDs in the same process, resetting the internal random counters
"""
seed_with_offset = self.seed
if self.seed is not None and len(current_process()._identity) > 0:
seed_with_offset = self.seed + current_process()._identity[0]
global dt
global numeric
dt = Datetime(seed=seed_with_offset)
numeric = Numeric(seed=seed_with_offset)
def random_date_in_range(
self, start_date: datetime.datetime, end_date: datetime.datetime = datetime.datetime.now()
) -> datetime.datetime:
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
if days_between_dates < 2:
days_between_dates = 2
random_number_of_days = numeric.integer_number(0, days_between_dates)
random_date = start_date + datetime.timedelta(days=random_number_of_days)
return random_date
def generate(self, user_id: int) -> List[Dict]:
"""
Because we are doing this work in parallel processes, we need a deterministic way to know what a purchase's ID should be given on the input of a user_id.
tldr; Every 10 user_ids produce 10 purchases. User ID x5 has no purchases, User ID mod x7 has 2, and everyone else has 1
"""
purchases: List[Dict] = []
last_user_id_digit = int(repr(user_id)[-1])
purchase_count = 1
id_offset = 0
if last_user_id_digit - 1 == 5:
purchase_count = 0
elif last_user_id_digit - 1 == 6:
id_offset = 1
elif last_user_id_digit - 1 == 7:
id_offset = 1
purchase_count = 2
total_products = 100
i = 0
while purchase_count > 0:
id = user_id + i + 1 - id_offset
time_a = dt.datetime()
time_b = dt.datetime()
updated_at = format_airbyte_time(datetime.datetime.now())
created_at = time_a if time_a <= time_b else time_b
product_id = numeric.integer_number(1, total_products)
added_to_cart_at = self.random_date_in_range(created_at)
purchased_at = (
self.random_date_in_range(added_to_cart_at)
if added_to_cart_at is not None and numeric.integer_number(1, 100) <= 70
else None
) # 70% likely to purchase the item in the cart
returned_at = (
self.random_date_in_range(purchased_at) if purchased_at is not None and numeric.integer_number(1, 100) <= 15 else None
) # 15% likely to return the item
purchase = {
"id": id,
"product_id": product_id,
"user_id": user_id + 1,
"created_at": created_at,
"updated_at": updated_at,
"added_to_cart_at": format_airbyte_time(added_to_cart_at) if added_to_cart_at is not None else None,
"purchased_at": format_airbyte_time(purchased_at) if purchased_at is not None else None,
"returned_at": format_airbyte_time(returned_at) if returned_at is not None else None,
}
record = AirbyteRecordMessage(stream=self.stream_name, data=purchase, emitted_at=now_millis())
message = AirbyteMessageWithCachedJSON(type=Type.RECORD, record=record)
purchases.append(message)
purchase_count = purchase_count - 1
i += 1
return purchases
| PurchaseGenerator |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 8124,
"end": 8339
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'network']
valid_subsets = ['network']
fact_namespace = 'ansible_network'
collector_class = NetworkCollector
| TestNetworkCollector |
python | wandb__wandb | wandb/sdk/artifacts/_generated/add_aliases.py | {
"start": 250,
"end": 332
} | class ____(GQLResult):
success: bool
AddAliases.model_rebuild()
| AddAliasesResult |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/pool_test.py | {
"start": 2151,
"end": 3583
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, H, W, device, op_func):
self.inputs = {"input": torch.rand(N, C, H, W, device=device)}
self.op_func = op_func(kernel, stride=stride)
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(
pool_2d_ops_list, pool_2d_configs_short + pool_2d_configs_long, Pool2dBenchmark
)
"""
Microbenchmarks for MaxPool3d and AvgPool3d operators.
"""
# Configs for pool-3d ops
pool_3d_configs_short = op_bench.config_list(
attr_names=["kernel", "stride", "N", "C", "D", "H", "W"],
attrs=[
[[3, 1, 3], [2, 1, 2], 1, 16, 16, 32, 32],
],
cross_product_configs={
"device": ["cpu", "cuda"],
},
tags=["short"],
)
pool_3d_configs_long = op_bench.cross_product_configs(
kernel=[[3, 2, 3], [3, 3, 3]],
stride=[[2, 2, 2]],
N=[8, 16],
C=[32],
D=[32],
H=[32, 64],
W=[32, 64],
device=["cpu", "cuda"],
tags=["long"],
)
pool_3d_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
["MaxPool3d", nn.MaxPool3d],
["AvgPool3d", nn.AvgPool3d],
["AdaptiveMaxPool3d", lambda kernel, stride: nn.AdaptiveMaxPool3d(kernel)],
[
"FractionalMaxPool3d",
lambda kernel, stride: nn.FractionalMaxPool3d(kernel, output_size=2),
],
],
)
| Pool2dBenchmark |
python | getsentry__sentry | src/sentry/integrations/messaging/commands.py | {
"start": 1019,
"end": 1220
} | class ____(Exception):
def __init__(self, message: str, unmatched_input: CommandInput) -> None:
super().__init__(message)
self.unmatched_input = unmatched_input
| CommandNotMatchedError |
python | django__django | tests/backends/tests.py | {
"start": 38140,
"end": 39329
} | class ____(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field(
"related_objects"
).remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
| DBConstraintTestCase |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 13190,
"end": 13292
} | class ____(TypedDict):
method: Literal["replace"]
substitution: str
| TransactionNameRuleRedaction |
python | cython__cython | Tools/dataclass_test_data/test_dataclasses.py | {
"start": 101921,
"end": 106562
} | class ____(unittest.TestCase):
def test_set_name(self):
# See bpo-33141.
# Create a descriptor.
class D:
def __set_name__(self, owner, name):
self.name = name + 'x'
def __get__(self, instance, owner):
if instance is not None:
return 1
return self
# This is the case of just normal descriptor behavior, no
# dataclass code is involved in initializing the descriptor.
@dataclass
class C:
c: int=D()
self.assertEqual(C.c.name, 'cx')
# Now test with a default value and init=False, which is the
# only time this is really meaningful. If not using
# init=False, then the descriptor will be overwritten, anyway.
@dataclass
class C:
c: int=field(default=D(), init=False)
self.assertEqual(C.c.name, 'cx')
self.assertEqual(C().c, 1)
def test_non_descriptor(self):
# PEP 487 says __set_name__ should work on non-descriptors.
# Create a descriptor.
class D:
def __set_name__(self, owner, name):
self.name = name + 'x'
@dataclass
class C:
c: int=field(default=D(), init=False)
self.assertEqual(C.c.name, 'cx')
def test_lookup_on_instance(self):
# See bpo-33175.
class D:
pass
d = D()
# Create an attribute on the instance, not type.
d.__set_name__ = Mock()
# Make sure d.__set_name__ is not called.
@dataclass
class C:
i: int=field(default=d, init=False)
self.assertEqual(d.__set_name__.call_count, 0)
def test_lookup_on_class(self):
# See bpo-33175.
class D:
pass
D.__set_name__ = Mock()
# Make sure D.__set_name__ is called.
@dataclass
class C:
i: int=field(default=D(), init=False)
self.assertEqual(D.__set_name__.call_count, 1)
def test_init_calls_set(self):
class D:
pass
D.__set__ = Mock()
@dataclass
class C:
i: D = D()
# Make sure D.__set__ is called.
D.__set__.reset_mock()
c = C(5)
self.assertEqual(D.__set__.call_count, 1)
def test_getting_field_calls_get(self):
class D:
pass
D.__set__ = Mock()
D.__get__ = Mock()
@dataclass
class C:
i: D = D()
c = C(5)
# Make sure D.__get__ is called.
D.__get__.reset_mock()
value = c.i
self.assertEqual(D.__get__.call_count, 1)
def test_setting_field_calls_set(self):
class D:
pass
D.__set__ = Mock()
@dataclass
class C:
i: D = D()
c = C(5)
# Make sure D.__set__ is called.
D.__set__.reset_mock()
c.i = 10
self.assertEqual(D.__set__.call_count, 1)
def test_setting_uninitialized_descriptor_field(self):
class D:
pass
D.__set__ = Mock()
@dataclass
class C:
i: D
# D.__set__ is not called because there's no D instance to call it on
D.__set__.reset_mock()
c = C(5)
self.assertEqual(D.__set__.call_count, 0)
# D.__set__ still isn't called after setting i to an instance of D
# because descriptors don't behave like that when stored as instance vars
c.i = D()
c.i = 5
self.assertEqual(D.__set__.call_count, 0)
def test_default_value(self):
class D:
def __get__(self, instance: Any, owner: object) -> int:
if instance is None:
return 100
return instance._x
def __set__(self, instance: Any, value: int) -> None:
instance._x = value
@dataclass
class C:
i: D = D()
c = C()
self.assertEqual(c.i, 100)
c = C(5)
self.assertEqual(c.i, 5)
def test_no_default_value(self):
class D:
def __get__(self, instance: Any, owner: object) -> int:
if instance is None:
raise AttributeError()
return instance._x
def __set__(self, instance: Any, value: int) -> None:
instance._x = value
@dataclass
class C:
i: D = D()
with self.assertRaisesRegex(TypeError, 'missing 1 required positional argument'):
c = C()
| TestDescriptors |
python | fluentpython__example-code | 14-it-generator/sentence_gen2.py | {
"start": 122,
"end": 403
} | class ____:
def __init__(self, text):
self.text = text # <1>
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
for match in RE_WORD.finditer(self.text): # <2>
yield match.group() # <3>
| Sentence |
python | huggingface__transformers | tests/models/olmo/test_modeling_olmo.py | {
"start": 6826,
"end": 15708
} | class ____(unittest.TestCase):
@slow
def test_model_1b_logits(self):
input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]]
model = OlmoForCausalLM.from_pretrained("allenai/OLMo-1B-hf", device_map="auto")
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[2.2869, 0.3315, 0.9876, 1.4146, 1.8804, 2.0430, 1.7055, 1.2065]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([2.5551, -1.1230, 11.0510, 12.4977, 7.9651, 7.2342, 6.1885, 7.8340, 9.9847, 12.6695, 12.2345, 10.7970, 8.4749, 14.2483, 12.9588, 13.9233, 11.0496, 5.5749, 7.4466, 7.7914, 6.8440, 5.8951, 4.8180, 4.1935, 4.5216, 4.7256, 3.9553, 12.2870, 12.4990, 8.1591]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_logits(self):
input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]]
model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-hf", device_map="auto")
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[0.0271, 0.0249, -0.0578, -0.0870, 0.0167, 0.0710, 0.1002, 0.0677]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-1.7433, -1.6685, 7.4941, 6.1506, 0.1364, -0.1127, 1.3224, 4.5458, 4.2068, 5.8296, 7.4723, 2.7925, 3.1245, 10.8872, 10.0758, 10.6717, 7.0945, 1.2398, 3.6766, 4.2365, 2.5655, 2.2222, 1.7418, 0.5223, 0.7753, 1.0938, 0.6723, 6.2522, 6.2264, 1.8105]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_twin_2t_logits(self):
input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]]
model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-Twin-2T-hf", device_map="auto")
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-0.3636, -0.3825, -0.4800, -0.3696, -0.8388, -0.9737, -0.9849, -0.8356]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-2.0833, -1.9234, 8.7312, 7.8049, 1.0372, 0.8941, 3.1548, 1.8502, 5.5511, 5.5793, 8.1166, 4.5906, 1.8691, 11.6377, 8.9858, 11.6447, 7.4549, 1.4725, 2.8399, 2.7568, 1.4011, 1.6958, 0.5572, 0.5231, 0.3068, 0.5364, 0.6769, 7.9636, 8.2379, 1.7950]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_greedy_generation(self):
EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that \nthe speed of light is the same for all observers.\n\nThe theory of relativity is a theory of physics that describes the \nmovement of objects in space and time.\n\nThe theory of relativity is a theory of physics that describes the \nmovement of objects in space and time.\n\n"""
prompt = "Simply put, the theory of relativity states that "
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-hf", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt")
model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-hf", device_map="auto")
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@require_tokenizers
def test_fast_special_tokens(self):
fast_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMo-1B-hf")
original_add_eos_token = fast_tokenizer.add_eos_token
fast_tokenizer.add_eos_token = False
fast = fast_tokenizer.encode("A sample test")
self.assertEqual(fast, [34, 3410, 1071])
fast_tokenizer.add_eos_token = True
fast = fast_tokenizer.encode("A sample test")
self.assertEqual(fast, [34, 3410, 1071, 50279])
fast_tokenizer.add_eos_token = original_add_eos_token
@require_tokenizers
def test_simple_encode_decode(self):
rust_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMo-1B-hf")
self.assertEqual(rust_tokenizer.encode("This is a test"), [1552, 310, 247, 1071])
self.assertEqual(rust_tokenizer.decode([1552, 310, 247, 1071], skip_special_tokens=True), "This is a test")
# bytefallback showcase
self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [20025, 46549, 5225, 48561, 33656, 238, 12105]) # fmt: skip
self.assertEqual(
rust_tokenizer.decode([20025, 46549, 5225, 48561, 33656, 238, 12105], skip_special_tokens=True),
"生活的真谛是",
)
# Inner spaces showcase
self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50276, 12092])
self.assertEqual(rust_tokenizer.decode([12764, 50276, 12092], skip_special_tokens=True), "Hi Hello")
self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50275, 12092])
self.assertEqual(rust_tokenizer.decode([12764, 50275, 12092], skip_special_tokens=True), "Hi Hello")
self.assertEqual(rust_tokenizer.encode(""), [])
self.assertEqual(rust_tokenizer.encode(" "), [209])
self.assertEqual(rust_tokenizer.encode(" "), [50276])
self.assertEqual(rust_tokenizer.encode(" Hello"), [24387])
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
olmo_model = "allenai/OLMo-1B-hf"
tokenizer = AutoTokenizer.from_pretrained(olmo_model, pad_token="</s>", padding_side="right")
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that \nthe speed of light is the same in all reference frames.\n\nThe speed of light",
]
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = OlmoForCausalLM.from_pretrained(
olmo_model,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompts = ["Simply put, the theory of relativity states that "]
prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + eager
eager_generated_ids = model.generate(
**prompt_tokens, max_new_tokens=max_new_tokens, do_sample=False, cache_implementation=cache_implementation
)
eager_generated_text = tokenizer.batch_decode(eager_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, eager_generated_text)
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
| OlmoIntegrationTest |
python | Textualize__textual | tests/test_widget_mounting.py | {
"start": 184,
"end": 4855
} | class ____(Widget):
"""Test a widget that tries to own itself."""
def __init__(self) -> None:
super().__init__(self)
async def test_mount_via_app() -> None:
"""Perform mount tests via the app."""
# Make a background set of widgets.
widgets = [Static(id=f"starter-{n}") for n in range(10)]
async with App[None]().run_test() as pilot:
with pytest.raises(WidgetError):
await pilot.app.mount(SelfOwn())
async with App().run_test() as pilot:
# Mount the first one and make sure it's there.
await pilot.app.mount(widgets[0])
assert len(pilot.app.screen._nodes) == 1
assert pilot.app.screen._nodes[0] == widgets[0]
# Mount the next 2 widgets via mount.
await pilot.app.mount(*widgets[1:3])
assert list(pilot.app.screen._nodes) == widgets[0:3]
# Finally mount the rest of the widgets via mount_all.
await pilot.app.mount_all(widgets[3:])
assert list(pilot.app.screen._nodes) == widgets
async with App().run_test() as pilot:
# Mount a widget before -1, which is "before the end".
penultimate = Static(id="penultimate")
await pilot.app.mount_all(widgets)
await pilot.app.mount(penultimate, before=-1)
assert pilot.app.screen._nodes[-2] == penultimate
async with App().run_test() as pilot:
# Mount a widget after -1, which is "at the end".
ultimate = Static(id="ultimate")
await pilot.app.mount_all(widgets)
await pilot.app.mount(ultimate, after=-1)
assert pilot.app.screen._nodes[-1] == ultimate
async with App().run_test() as pilot:
# Mount a widget before -2, which is "before the penultimate".
penpenultimate = Static(id="penpenultimate")
await pilot.app.mount_all(widgets)
await pilot.app.mount(penpenultimate, before=-2)
assert pilot.app.screen._nodes[-3] == penpenultimate
async with App().run_test() as pilot:
# Mount a widget after -2, which is "before the end".
penultimate = Static(id="penultimate")
await pilot.app.mount_all(widgets)
await pilot.app.mount(penultimate, after=-2)
assert pilot.app.screen._nodes[-2] == penultimate
async with App().run_test() as pilot:
# Mount a widget before 0, which is "at the start".
start = Static(id="start")
await pilot.app.mount_all(widgets)
await pilot.app.mount(start, before=0)
assert pilot.app.screen._nodes[0] == start
async with App().run_test() as pilot:
# Mount a widget after 0. You get the idea...
second = Static(id="second")
await pilot.app.mount_all(widgets)
await pilot.app.mount(second, after=0)
assert pilot.app.screen._nodes[1] == second
async with App().run_test() as pilot:
# Mount a widget relative to another via query.
queue_jumper = Static(id="queue-jumper")
await pilot.app.mount_all(widgets)
await pilot.app.mount(queue_jumper, after="#starter-5")
assert pilot.app.screen._nodes[6] == queue_jumper
async with App().run_test() as pilot:
# Mount a widget relative to another via query.
queue_jumper = Static(id="queue-jumper")
await pilot.app.mount_all(widgets)
await pilot.app.mount(queue_jumper, after=widgets[5])
assert pilot.app.screen._nodes[6] == queue_jumper
async with App().run_test() as pilot:
# Make sure we get told off for trying to before and after.
await pilot.app.mount_all(widgets)
with pytest.raises(MountError):
await pilot.app.mount(Static(), before=2, after=2)
async with App().run_test() as pilot:
# Make sure we get told off trying to mount relative to something
# that isn't actually in the DOM.
await pilot.app.mount_all(widgets)
with pytest.raises(MountError):
await pilot.app.mount(Static(), before=Static())
with pytest.raises(MountError):
await pilot.app.mount(Static(), after=Static())
async with App().run_test() as pilot:
# Make sure we get an error if we try and mount with a selector that
# results in more than one hit.
await pilot.app.mount_all(widgets)
with pytest.raises(TooManyMatches):
await pilot.app.mount(Static(), before="Static")
async def test_mount_error() -> None:
"""Mounting a widget on an un-mounted widget should raise an error."""
app = App()
async with app.run_test():
with pytest.raises(MountError):
widget = Widget()
widget.mount(Static())
| SelfOwn |
python | facelessuser__soupsieve | tests/test_level4/test_nth_child.py | {
"start": 56,
"end": 1202
} | class ____(util.TestCase):
"""Test `nth` child selectors."""
MARKUP = """
<p id="0"></p>
<p id="1"></p>
<span id="2" class="test"></span>
<span id="3"></span>
<span id="4" class="test"></span>
<span id="5"></span>
<span id="6" class="test"></span>
<p id="7"></p>
<p id="8" class="test"></p>
<p id="9"></p>
<p id="10" class="test"></p>
<span id="11"></span>
"""
def test_nth_child_of_s_simple(self):
"""Test `nth` child with selector (simple)."""
self.assert_selector(
self.MARKUP,
":nth-child(-n+3 of p)",
['0', '1', '7'],
flags=util.HTML
)
def test_nth_child_of_s_complex(self):
"""Test `nth` child with selector (complex)."""
self.assert_selector(
self.MARKUP,
":nth-child(2n + 1 of :is(p, span).test)",
['2', '6', '10'],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
":nth-child(2n + 1 OF :is(p, span).test)",
['2', '6', '10'],
flags=util.HTML
)
| TestNthChild |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 1197,
"end": 2352
} | class ____(Benchmark):
r"""
McCormick objective function.
This class defines the McCormick [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{McCormick}}(x) = - x_{1} + 2 x_{2} + \left(x_{1}
- x_{2}\right)^{2} + \sin\left(x_{1} + x_{2}\right) + 1
with :math:`x_1 \in [-1.5, 4]`, :math:`x_2 \in [-3, 4]`.
*Global optimum*: :math:`f(x) = -1.913222954981037` for
:math:`x = [-0.5471975602214493, -1.547197559268372]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-1.5, 4.0), (-3.0, 3.0)]
self.global_optimum = [[-0.5471975602214493, -1.547197559268372]]
self.fglob = -1.913222954981037
def fun(self, x, *args):
self.nfev += 1
return (sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0]
+ 2.5 * x[1] + 1)
| McCormick |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 54649,
"end": 67833
} | class ____(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops_stack.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_shape):
with self.session(graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_shape, True))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertAllClose(out[0][0][0], out[1][0][3])
self.assertAllClose(out[0][0][1], out[1][0][4])
self.assertAllClose(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertAllClose(out[1][0][0], out[0][0][3])
self.assertAllClose(out[1][0][1], out[0][0][4])
self.assertAllClose(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertAllClose(out[0][1][0], out[2][1][3])
self.assertAllClose(out[0][1][1], out[2][1][4])
self.assertAllClose(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertAllClose(out[1][1][0], out[1][1][3])
self.assertAllClose(out[1][1][1], out[1][1][4])
self.assertAllClose(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertAllClose(out[2][1][0], out[0][1][3])
self.assertAllClose(out[2][1][1], out[0][1][4])
self.assertAllClose(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
with self.session(graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict={
inputs[0]: input_value
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in range(8):
self.assertAllClose(out[i][0][0:3], out[8 - 1 - i][0][3:6])
self.assertAllClose(out[i][1][0:3], out[8 - 1 - i][1][3:6])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_shape=False)
self._testBidirectionalRNN(use_shape=True)
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops_stack.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.session(graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
# First sequence in batch is length=2
# Check that the t=0 forward output is equal to t=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the t=1 forward output is equal to t=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the t=0 forward output is equal to t=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the t=1 forward output is equal to t=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the t=2 forward output is equal to t=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should
# be exactly the same
self.assertAllClose(s_fw, s_bw)
else: # not use_sequence_length
max_length = 8 # from createBidirectionalDynamicRNN
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
@test_util.run_v1_only("b/124229375")
def testBidirectionalDynamicRNN(self):
# Generate 2^5 option values
# from [True, True, True, True, True] to [False, False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(
use_shape=option[0],
use_state_tuple=option[1],
use_time_major=option[2],
use_sequence_length=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.session(graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
@test_util.run_v1_only("b/124229375")
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
| BidirectionalRNNTest |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 57332,
"end": 66343
} | class ____(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV1Test, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV1Test, self).tearDown()
def test_while_outside_loop(self):
x = while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return while_loop.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return while_loop.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = while_loop.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = while_loop.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = while_loop.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = while_loop.while_loop(lambda j, _: j < i, body, (0, 0))
return s
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = while_loop.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = while_loop.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size, state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [
array_ops.where(done, s, ns)
for s, ns in zip(nest.flatten(state), nest.flatten(new_state))
]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = while_loop.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
@test_util.run_all_in_graph_and_eager_modes
| WhileV1Test |
python | python-openxml__python-docx | tests/image/test_jpeg.py | {
"start": 3399,
"end": 7550
} | class ____:
def it_can_construct_from_a_jfif_stream(
self, stream_, _MarkerParser_, _JfifMarkers__init_, soi_, app0_, sof_, sos_
):
marker_lst = [soi_, app0_, sof_, sos_]
jfif_markers = _JfifMarkers.from_stream(stream_)
_MarkerParser_.from_stream.assert_called_once_with(stream_)
_JfifMarkers__init_.assert_called_once_with(ANY, marker_lst)
assert isinstance(jfif_markers, _JfifMarkers)
def it_can_find_the_APP0_marker(self, app0_fixture):
jfif_markers, app0_ = app0_fixture
app0 = jfif_markers.app0
assert app0 is app0_
def it_can_find_the_APP1_marker(self, app1_fixture):
jfif_markers, app1_ = app1_fixture
app1 = jfif_markers.app1
assert app1 is app1_
def it_raises_if_it_cant_find_the_APP0_marker(self, no_app0_fixture):
jfif_markers = no_app0_fixture
with pytest.raises(KeyError):
jfif_markers.app0
def it_raises_if_it_cant_find_the_APP1_marker(self, no_app1_fixture):
jfif_markers = no_app1_fixture
with pytest.raises(KeyError):
jfif_markers.app1
def it_can_find_the_SOF_marker(self, sof_fixture):
jfif_markers, sof_ = sof_fixture
sof = jfif_markers.sof
assert sof is sof_
def it_raises_if_it_cant_find_the_SOF_marker(self, no_sof_fixture):
jfif_markers = no_sof_fixture
with pytest.raises(KeyError):
jfif_markers.sof
# fixtures -------------------------------------------------------
@pytest.fixture
def app0_(self, request):
return instance_mock(request, _App0Marker, marker_code=JPEG_MARKER_CODE.APP0)
@pytest.fixture
def app1_(self, request):
return instance_mock(request, _App1Marker, marker_code=JPEG_MARKER_CODE.APP1)
@pytest.fixture
def app0_fixture(self, soi_, app0_, eoi_):
markers = (soi_, app0_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, app0_
@pytest.fixture
def app1_fixture(self, soi_, app1_, eoi_):
markers = (soi_, app1_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, app1_
@pytest.fixture
def eoi_(self, request):
return instance_mock(request, _SofMarker, marker_code=JPEG_MARKER_CODE.EOI)
@pytest.fixture
def _JfifMarkers__init_(self, request):
return initializer_mock(request, _JfifMarkers)
@pytest.fixture
def marker_parser_(self, request, markers_all_):
marker_parser_ = instance_mock(request, _MarkerParser)
marker_parser_.iter_markers.return_value = markers_all_
return marker_parser_
@pytest.fixture
def _MarkerParser_(self, request, marker_parser_):
_MarkerParser_ = class_mock(request, "docx.image.jpeg._MarkerParser")
_MarkerParser_.from_stream.return_value = marker_parser_
return _MarkerParser_
@pytest.fixture
def markers_all_(self, request, soi_, app0_, sof_, sos_, eoi_):
return [soi_, app0_, sof_, sos_, eoi_]
@pytest.fixture
def no_app0_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def no_app1_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def no_sof_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def sof_(self, request):
return instance_mock(request, _SofMarker, marker_code=JPEG_MARKER_CODE.SOF0)
@pytest.fixture
def sof_fixture(self, soi_, sof_, eoi_):
markers = (soi_, sof_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, sof_
@pytest.fixture
def soi_(self, request):
return instance_mock(request, _Marker, marker_code=JPEG_MARKER_CODE.SOI)
@pytest.fixture
def sos_(self, request):
return instance_mock(request, _Marker, marker_code=JPEG_MARKER_CODE.SOS)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, io.BytesIO)
| Describe_JfifMarkers |
python | getsentry__sentry | src/sentry/snuba/entity_subscription.py | {
"start": 5226,
"end": 5308
} | class ____:
query_type: SnubaQuery.Type
dataset: Dataset
| _EntitySubscription |
python | python-openxml__python-docx | tests/test_enum.py | {
"start": 703,
"end": 2188
} | class ____:
"""Unit-test suite for `docx.enum.base.BaseXmlEnum`."""
def it_is_an_instance_of_EnumMeta_just_like_a_regular_Enum(self):
assert type(SomeXmlAttr) is enum.EnumMeta
def it_has_the_same_repr_as_a_regular_Enum(self):
assert repr(SomeXmlAttr) == "<enum 'SomeXmlAttr'>"
def it_has_an_MRO_that_goes_through_the_base_class_int_and_Enum(self):
assert SomeXmlAttr.__mro__ == (
SomeXmlAttr,
BaseXmlEnum,
int,
enum.Enum,
object,
), f"got: {SomeXmlAttr.__mro__}"
def it_knows_the_XML_value_for_each_member_by_the_member_instance(self):
assert SomeXmlAttr.to_xml(SomeXmlAttr.FOO) == "foo"
def it_knows_the_XML_value_for_each_member_by_the_member_value(self):
assert SomeXmlAttr.to_xml(2) == "bar"
def but_it_raises_when_there_is_no_such_member(self):
with pytest.raises(ValueError, match="42 is not a valid SomeXmlAttr"):
SomeXmlAttr.to_xml(42)
def it_can_find_the_member_from_the_XML_attr_value(self):
assert SomeXmlAttr.from_xml("bar") == SomeXmlAttr.BAR
def and_it_can_find_the_member_from_None_when_a_member_maps_that(self):
assert SomeXmlAttr.from_xml(None) == SomeXmlAttr.BAZ
def but_it_raises_when_there_is_no_such_mapped_XML_value(self):
with pytest.raises(ValueError, match="SomeXmlAttr has no XML mapping for 'baz'"):
SomeXmlAttr.from_xml("baz")
| DescribeBaseXmlEnum |
python | scikit-learn__scikit-learn | sklearn/kernel_approximation.py | {
"start": 30408,
"end": 39735
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
.. versionadded:: 0.13
Parameters
----------
kernel : str or callable, default='rbf'
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as `kernel_params`, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : dict, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int, default=100
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the uniform sampling without
replacement of `n_components` of the training data to construct the
basis kernel.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the kernel matrix into `n_jobs` even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.24
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : ndarray of shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : ndarray of shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
>>> data = X / 16.
>>> clf = svm.LinearSVC()
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, y)
LinearSVC()
>>> clf.score(data_transformed, y)
0.9987...
"""
_parameter_constraints: dict = {
"kernel": [
StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"coef0": [Interval(Real, None, None, closed="neither"), None],
"degree": [Interval(Real, 1, None, closed="left"), None],
"kernel_params": [dict, None],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"n_jobs": [Integral, None],
}
def __init__(
self,
kernel="rbf",
*,
gamma=None,
coef0=None,
degree=None,
kernel_params=None,
n_components=100,
random_state=None,
n_jobs=None,
):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
self.n_jobs = n_jobs
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr")
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn(
"n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel."
)
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(
basis,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**self._get_kernel_params(),
)
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = basis_inds
self._n_features_out = n_components
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(
X,
self.components_,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**kernel_params,
)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel) and self.kernel != "precomputed":
for param in KERNEL_PARAMS[self.kernel]:
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (
self.gamma is not None
or self.coef0 is not None
or self.degree is not None
):
raise ValueError(
"Don't pass gamma, coef0 or degree to "
"Nystroem if using a callable "
"or precomputed kernel"
)
return params
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
| Nystroem |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 16490,
"end": 18665
} | class ____(TemplateNameMixin):
"""
Layout object for rendering an HTML button in a ``<button>`` tag.
Attributes
----------
template: str
The default template which this Layout Object will be rendered
with.
field_classes : str
The CSS classes to be applied to the button. By default "btn".
Parameters
----------
content : str
The content of the button. This content is context aware, to bring
this to life see the examples section.
css_id : str, optional
A custom DOM id for the layout object which will be added to the
``<button>`` if provided. By default None.
css_class : str, optional
Additional CSS classes to be applied to the ``<button>``. By default
None.
template : str, optional
Overrides the default template, if provided. By default None.
**kwargs : dict, optional
Additional attributes are passed to `flatatt` and converted into
key="value", pairs. These attributes are added to the ``<button>``.
Examples
--------
In your ``Layout``::
StrictButton("button content", css_class="extra")
The content of the button is context aware, so you can do things like::
StrictButton("Button for {{ user.username }}")
"""
template = "%s/layout/button.html"
field_classes = "btn"
def __init__(self, content, css_id=None, css_class=None, template=None, **kwargs):
self.content = content
self.template = template or self.template
kwargs.setdefault("type", "button")
# We turn css_id and css_class into id and class
if css_id:
kwargs["id"] = css_id
kwargs["class"] = self.field_classes
if css_class:
kwargs["class"] += f" {css_class}"
self.flat_attrs = flatatt(kwargs)
def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
self.content = Template(str(self.content)).render(context)
template = self.get_template_name(template_pack)
context.update({"button": self})
return render_to_string(template, context.flatten())
| StrictButton |
python | conda__conda | tests/core/test_index.py | {
"start": 7788,
"end": 14524
} | class ____:
@pytest.fixture(params=[False, True])
def index(
self,
request: FixtureRequest,
test_recipes_channel: Path,
tmp_env: Path,
) -> Iterable[Index]:
with tmp_env("dependent=2.0") as prefix:
_index = Index(prefix=prefix, use_cache=True, use_system=True)
if request.param:
_index.data
yield _index
@pytest.fixture
def reduced_index(self, index: Index) -> ReducedIndex:
return index.get_reduced_index((MatchSpec("dependent=2.0"),))
@pytest.fixture
def valid_channel_entry(self, test_recipes_channel: Path) -> PackageRecord:
return PackageRecord(
channel=Channel(str(test_recipes_channel)),
name="dependent",
subdir="noarch",
version="1.0",
build_number=0,
build="0",
fn="dependent-1.0-0.tar.bz2",
)
@pytest.fixture
def invalid_channel_entry(self, test_recipes_channel: Path) -> PackageRecord:
return PackageRecord(
channel=Channel(str(test_recipes_channel)),
name="dependent-non-existent",
subdir="noarch",
version="1.0",
build_number=0,
build="0",
fn="dependent-1.0-0.tar.bz2",
)
@pytest.fixture
def valid_prefix_entry(self, test_recipes_channel: Path) -> PackageRecord:
return PackageRecord(
channel=Channel(str(test_recipes_channel)),
name="dependent",
subdir="noarch",
version="2.0",
build_number=0,
build="0",
fn="dependent-2.0-0.tar.bz2",
)
@pytest.fixture
def valid_cache_entry(self):
return PackageRecord(
name="python",
subdir="linux-64",
version="3.12.4",
channel="defaults",
build_number=1,
build="h5148396_1",
fn="python-3.12.4-h5148396_1.conda",
)
@pytest.fixture
def valid_feature(self):
return PackageRecord.feature("test_feature")
@pytest.fixture
def invalid_feature(self):
return PackageRecord.feature("test_feature_non_existent")
@pytest.fixture
def valid_system_package(self):
return PackageRecord.virtual_package("__conda", conda.__version__)
@pytest.fixture
def invalid_system_package(self):
return PackageRecord.virtual_package("__conda_invalid", conda.__version__)
def test_init_use_local(self):
index = Index(use_local=True, prepend=False)
assert len(index.channels) == 1
assert "local" in index.channels.keys()
def test_init_conflicting_subdirs(self, mocker):
log = mocker.patch("conda.core.index.log")
platform = "linux-64"
subdirs = ("linux-64",)
_ = Index(platform=platform, subdirs=subdirs)
assert len(log.method_calls) == 1
log_call = log.method_calls[0]
assert log_call.args == (
"subdirs is %s, ignoring platform %s",
subdirs,
platform,
)
def test_init_prefix_path(self, tmp_path: Path):
index = Index(prefix=tmp_path)
assert index.prefix_data
assert index.prefix_data.prefix_path == tmp_path
def test_init_prefix_data(self, tmp_path: Path):
index = Index(prefix=PrefixData(tmp_path))
assert index.prefix_data
assert index.prefix_data.prefix_path == tmp_path
def test_cache_entries(self, index, pkg_cache_entries):
cache_entries = index.cache_entries
assert cache_entries == pkg_cache_entries
def test_getitem_channel(self, index, valid_channel_entry):
package_record = index[valid_channel_entry]
assert type(package_record) is PackageRecord
assert package_record == valid_channel_entry
def test_getitem_channel_invalid(self, index, invalid_channel_entry):
with pytest.raises(KeyError):
_ = index[invalid_channel_entry]
def test_getitem_prefix(self, index, valid_prefix_entry):
prefix_record = index[valid_prefix_entry]
assert type(prefix_record) is PrefixRecord
assert prefix_record == valid_prefix_entry
def test_getitem_cache(self, index, valid_cache_entry):
cache_record = index[valid_cache_entry]
assert type(cache_record) is PackageCacheRecord
assert cache_record == valid_cache_entry
def test_getitem_feature(self, index, valid_feature):
feature_record = index[valid_feature]
assert type(feature_record) is PackageRecord
assert feature_record == valid_feature
def test_getitem_feature_non_existent(self, index, invalid_feature):
with pytest.raises(KeyError):
_ = index[invalid_feature]
def test_getitem_system_package_valid(self, index, valid_system_package):
system_record = index[valid_system_package]
assert system_record == valid_system_package
assert type(system_record) is PackageRecord
assert system_record.package_type == PackageType.VIRTUAL_SYSTEM
def test_getitem_system_package_invalid(self, index, invalid_system_package):
with pytest.raises(KeyError):
_ = index[invalid_system_package]
def test_contains_valid(self, index, valid_cache_entry):
assert valid_cache_entry in index
def test_contains_invalid(self, index, invalid_feature):
assert invalid_feature not in index
def test_copy(self, index):
index_copy = copy.copy(index)
assert index_copy == index
def test_reduced_index(self, reduced_index):
assert len(reduced_index) == (
# tests/data/pkg_cache/miniconda.json has 75 packages, see patch_pkg_cache
75
# we have 1 feature, see patch_pkg_cache
+ 1
# only 4 packages are loaded from tests/data/test-recipes/noarch/repodata.json
+ 4
# each OS has different virtual packages
+ len(context.plugin_manager.get_virtual_package_records())
)
def test_check_allowlist_deprecation_warning():
"""
Ensure a deprecation warning is raised for ``check_allowlist``.
Also used to ensure coverage on this code path
"""
with pytest.deprecated_call():
check_allowlist(("defaults",))
@pytest.mark.parametrize(
"function,raises",
[
("calculate_channel_urls", None),
],
)
def test_deprecations(function: str, raises: type[Exception] | None) -> None:
raises_context = pytest.raises(raises) if raises else nullcontext()
with pytest.deprecated_call(), raises_context:
getattr(index, function)()
| TestIndex |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/exceptions.py | {
"start": 1112,
"end": 2320
} | class ____(BuildBaseException):
GENERIC = "build:user:generic"
BUILD_COMMANDS_WITHOUT_OUTPUT = "build:user:output:no-html"
BUILD_OUTPUT_IS_NOT_A_DIRECTORY = "build:user:output:is-no-a-directory"
BUILD_OUTPUT_HAS_0_FILES = "build:user:output:has-0-files"
BUILD_OUTPUT_HAS_NO_PDF_FILES = "build:user:output:has-no-pdf-files"
BUILD_OUTPUT_HAS_MULTIPLE_FILES = "build:user:output:has-multiple-files"
BUILD_OUTPUT_HTML_NO_INDEX_FILE = "build:user:output:html-no-index-file"
BUILD_OUTPUT_OLD_DIRECTORY_USED = "build:user:output:old-directory-used"
FILE_TOO_LARGE = "build:user:output:file-too-large"
TEX_FILE_NOT_FOUND = "build:user:tex-file-not-found"
NO_CONFIG_FILE_DEPRECATED = "build:user:config:no-config-file"
BUILD_IMAGE_CONFIG_KEY_DEPRECATED = "build:user:config:build-image-deprecated"
BUILD_OS_REQUIRED = "build:user:config:build-os-required"
BUILD_COMMANDS_IN_BETA = "build:user:build-commands-config-key-in-beta"
BUILD_TIME_OUT = "build:user:time-out"
BUILD_EXCESSIVE_MEMORY = "build:user:excessive-memory"
VCS_DEPRECATED = "build:vcs:deprecated"
SSH_KEY_WITH_WRITE_ACCESS = "build:user:ssh-key-with-write-access"
| BuildUserError |
python | keras-team__keras | keras/src/metrics/reduction_metrics_test.py | {
"start": 4290,
"end": 7418
} | class ____(testing.TestCase):
def test_config(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
self.assertEqual(mse_obj.name, "mse")
self.assertEqual(len(mse_obj.variables), 2)
self.assertEqual(mse_obj._dtype, "float32")
# Check save and restore config
mse_obj2 = reduction_metrics.MeanMetricWrapper.from_config(
mse_obj.get_config()
)
self.assertEqual(mse_obj2.name, "mse")
self.assertEqual(len(mse_obj2.variables), 2)
self.assertEqual(mse_obj2._dtype, "float32")
self.assertTrue("fn" in mse_obj2.get_config())
def test_unweighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
def test_weighted_broadcast(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([[1.0, 0.0, 0.5, 0.0, 1.0]])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.45, result, atol=1e-5)
def test_weighted_dynamic_shape(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
result = backend.compute_output_spec(
mse_obj,
KerasTensor((None, 5)),
KerasTensor((None, 5)),
KerasTensor((None, 5)),
)
self.assertAllEqual(result.shape, ())
def test_binary_accuracy_with_boolean_inputs(self):
inp = layers.Input(shape=(1,))
out = inp > 0.5
model = models.Model(inputs=inp, outputs=out)
x = np.random.rand(32, 1)
y = x > 0.5
res = model.predict(x)
metric = metrics.BinaryAccuracy()
metric.update_state(y, res)
result = metric.result()
assert result == 1.0
| MetricWrapperTest |
python | FactoryBoy__factory_boy | tests/test_docs_internals.py | {
"start": 3654,
"end": 4270
} | class ____(unittest.TestCase):
def test_simple_usage(self):
user = UserFactory()
# Default user should be active, not super
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
# We should have one log
self.assertEqual(1, len(user.logs))
# And it should be a 'create' action linked to the user's creation_date
self.assertEqual('create', user.logs[0].action)
self.assertEqual(user, user.logs[0].user)
self.assertEqual(user.creation_date, user.logs[0].timestamp)
| DocsInternalsTests |
python | scipy__scipy | scipy/sparse/linalg/_isolve/tests/test_iterative.py | {
"start": 1146,
"end": 1619
} | class ____:
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
self.name = name
self.A = A
if b is None:
self.b = arange(A.shape[0], dtype=float)
else:
self.b = b
if skip is None:
self.skip = []
else:
self.skip = skip
if nonconvergence is None:
self.nonconvergence = []
else:
self.nonconvergence = nonconvergence
| Case |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 64865,
"end": 162263
} | class ____(Structure):
_fields_ = [("max", c_uint),
("high", c_uint),
("partial", c_uint),
("low", c_uint),
("none", c_uint)
]
## string/bytes conversion for ease of use
def convertStrBytes(func):
'''
In python 3, strings are unicode instead of bytes, and need to be converted for ctypes
Args from caller: (1, 'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF>)
Args passed to function: (1, b'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF)>
----
Returned from function: b'returned string'
Returned to caller: 'returned string'
'''
@wraps(func)
def wrapper(*args, **kwargs):
# encoding a str returns bytes in python 2 and 3
args = [arg.encode() if isinstance(arg, str) else arg for arg in args]
res = func(*args, **kwargs)
# In python 2, str and bytes are the same
# In python 3, str is unicode and should be decoded.
# Ctypes handles most conversions, this only effects c_char and char arrays.
if isinstance(res, bytes):
if isinstance(res, str):
return res
return res.decode()
return res
if sys.version_info >= (3,):
return wrapper
return func
## C function wrappers ##
def nvmlInitWithFlags(flags):
_LoadNvmlLibrary()
#
# Initialize the library
#
fn = _nvmlGetFunctionPointer("nvmlInitWithFlags")
ret = fn(flags)
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
_nvmlLib_refcount += 1
libLoadLock.release()
return None
def nvmlInit():
nvmlInitWithFlags(0)
return None
def _LoadNvmlLibrary():
'''
Load the library if it isn't loaded already
'''
global nvmlLib
if nvmlLib is None:
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if nvmlLib is None:
try:
if (sys.platform[:3] == "win"):
# cdecl calling convention
try:
# Check for nvml.dll in System32 first for DCH drivers
nvmlLib = CDLL(os.path.join(os.getenv("WINDIR", "C:/Windows"), "System32/nvml.dll"))
except OSError as ose:
# If nvml.dll is not found in System32, it should be in ProgramFiles
# load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll
nvmlLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI/nvml.dll"))
else:
# assume linux
nvmlLib = CDLL("libnvidia-ml.so.1")
except OSError as ose:
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
if nvmlLib is None:
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
def nvmlShutdown():
#
# Leave the library loaded, but shutdown the interface
#
fn = _nvmlGetFunctionPointer("nvmlShutdown")
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
if (0 < _nvmlLib_refcount):
_nvmlLib_refcount -= 1
libLoadLock.release()
return None
# Added in 2.285
@convertStrBytes
def nvmlErrorString(result):
fn = _nvmlGetFunctionPointer("nvmlErrorString")
fn.restype = c_char_p # otherwise return is an int
ret = fn(result)
return ret
# Added in 2.285
@convertStrBytes
def nvmlSystemGetNVMLVersion():
c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetNVMLVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
def nvmlSystemGetCudaDriverVersion():
c_cuda_version = c_int()
fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion")
ret = fn(byref(c_cuda_version))
_nvmlCheckReturn(ret)
return c_cuda_version.value
def nvmlSystemGetCudaDriverVersion_v2():
c_cuda_version = c_int()
fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion_v2")
ret = fn(byref(c_cuda_version))
_nvmlCheckReturn(ret)
return c_cuda_version.value
# Added in 2.285
@convertStrBytes
def nvmlSystemGetProcessName(pid):
c_name = create_string_buffer(1024)
fn = _nvmlGetFunctionPointer("nvmlSystemGetProcessName")
ret = fn(c_uint(pid), c_name, c_uint(1024))
_nvmlCheckReturn(ret)
return c_name.value
@convertStrBytes
def nvmlSystemGetDriverVersion():
c_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlSystemGetHicVersion():
c_count = c_uint(0)
hics = None
fn = _nvmlGetFunctionPointer("nvmlSystemGetHicVersion")
# get the count
ret = fn(byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# If there are no hics
if (c_count.value == 0):
return []
hic_array = c_nvmlHwbcEntry_t * c_count.value
hics = hic_array()
ret = fn(byref(c_count), hics)
_nvmlCheckReturn(ret)
return hics
## Unit get functions
def nvmlUnitGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetCount")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetHandleByIndex(index):
c_index = c_uint(index)
unit = c_nvmlUnit_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex")
ret = fn(c_index, byref(unit))
_nvmlCheckReturn(ret)
return unit
def nvmlUnitGetUnitInfo(unit):
c_info = c_nvmlUnitInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetUnitInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetLedState(unit):
c_state = c_nvmlLedState_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetLedState")
ret = fn(unit, byref(c_state))
_nvmlCheckReturn(ret)
return c_state
def nvmlUnitGetPsuInfo(unit):
c_info = c_nvmlPSUInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetPsuInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetTemperature(unit, type):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetTemperature")
ret = fn(unit, c_uint(type), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlUnitGetFanSpeedInfo(unit):
c_speeds = c_nvmlUnitFanSpeeds_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetFanSpeedInfo")
ret = fn(unit, byref(c_speeds))
_nvmlCheckReturn(ret)
return c_speeds
# added to API
def nvmlUnitGetDeviceCount(unit):
c_count = c_uint(0)
# query the unit to determine device count
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), None)
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = NVML_SUCCESS
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetDevices(unit):
c_count = c_uint(nvmlUnitGetDeviceCount(unit))
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return c_devices
## Device get functions
def nvmlDeviceGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCount_v2")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetHandleByIndex(index):
c_index = c_uint(index)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByIndex_v2")
ret = fn(c_index, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleBySerial(serial):
c_serial = c_char_p(serial)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleBySerial")
ret = fn(c_serial, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleByUUID(uuid):
c_uuid = c_char_p(uuid)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByUUID")
ret = fn(c_uuid, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleByPciBusId(pciBusId):
c_busId = c_char_p(pciBusId)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByPciBusId_v2")
ret = fn(c_busId, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetName(handle):
c_name = create_string_buffer(NVML_DEVICE_NAME_V2_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetName")
ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_V2_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_name.value
def nvmlDeviceGetBoardId(handle):
c_id = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId")
ret = fn(handle, byref(c_id))
_nvmlCheckReturn(ret)
return c_id.value
def nvmlDeviceGetMultiGpuBoard(handle):
c_multiGpu = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard")
ret = fn(handle, byref(c_multiGpu))
_nvmlCheckReturn(ret)
return c_multiGpu.value
def nvmlDeviceGetBrand(handle):
c_type = _nvmlBrandType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBrand")
ret = fn(handle, byref(c_type))
_nvmlCheckReturn(ret)
return c_type.value
@convertStrBytes
def nvmlDeviceGetBoardPartNumber(handle):
c_part_number = create_string_buffer(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardPartNumber")
ret = fn(handle, c_part_number, c_uint(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_part_number.value
@convertStrBytes
def nvmlDeviceGetSerial(handle):
c_serial = create_string_buffer(NVML_DEVICE_SERIAL_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSerial")
ret = fn(handle, c_serial, c_uint(NVML_DEVICE_SERIAL_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_serial.value
def nvmlDeviceGetMemoryAffinity(handle, nodeSetSize, scope):
affinity_array = c_ulonglong * nodeSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryAffinity")
ret = fn(handle, nodeSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceGetCpuAffinityWithinScope(handle, cpuSetSize, scope):
affinity_array = c_ulonglong * cpuSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinityWithinScope")
ret = fn(handle, cpuSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceGetCpuAffinity(handle, cpuSetSize):
affinity_array = c_ulonglong * cpuSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinity")
ret = fn(handle, cpuSetSize, byref(c_affinity))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceSetCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetMinorNumber(handle):
c_minor_number = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinorNumber")
ret = fn(handle, byref(c_minor_number))
_nvmlCheckReturn(ret)
return c_minor_number.value
@convertStrBytes
def nvmlDeviceGetUUID(handle):
c_uuid = create_string_buffer(NVML_DEVICE_UUID_V2_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetUUID")
ret = fn(handle, c_uuid, c_uint(NVML_DEVICE_UUID_V2_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_uuid.value
@convertStrBytes
def nvmlDeviceGetInforomVersion(handle, infoRomObject):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomVersion")
ret = fn(handle, _nvmlInforomObject_t(infoRomObject),
c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
@convertStrBytes
def nvmlDeviceGetInforomImageVersion(handle):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomImageVersion")
ret = fn(handle, c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
def nvmlDeviceGetInforomConfigurationChecksum(handle):
c_checksum = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomConfigurationChecksum")
ret = fn(handle, byref(c_checksum))
_nvmlCheckReturn(ret)
return c_checksum.value
# Added in 4.304
def nvmlDeviceValidateInforom(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceValidateInforom")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetDisplayMode(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetDisplayActive(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayActive")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetPersistenceMode(handle):
c_state = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPersistenceMode")
ret = fn(handle, byref(c_state))
_nvmlCheckReturn(ret)
return c_state.value
def nvmlDeviceGetPciInfo_v3(handle):
c_info = nvmlPciInfo_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfo_v3")
ret = fn(handle, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlDeviceGetPciInfo(handle):
return nvmlDeviceGetPciInfo_v3(handle)
def nvmlDeviceGetClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 2.285
def nvmlDeviceGetMaxClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
def nvmlDeviceGetApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
def nvmlDeviceGetMaxCustomerBoostClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxCustomerBoostClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
def nvmlDeviceGetClock(handle, type, id):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClock")
ret = fn(handle, _nvmlClockType_t(type), _nvmlClockId_t(id), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 5.319
def nvmlDeviceGetDefaultApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
def nvmlDeviceGetSupportedMemoryClocks(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
# Added in 4.304
def nvmlDeviceGetSupportedGraphicsClocks(handle, memoryClockMHz):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedGraphicsClocks")
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetFanSpeed(handle):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed")
ret = fn(handle, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetFanSpeed_v2(handle, fan):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed_v2")
ret = fn(handle, fan, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetTargetFanSpeed(handle, fan):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTargetFanSpeed")
ret = fn(handle, fan, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetNumFans(device):
c_numFans = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumFans")
ret = fn(device, byref(c_numFans))
_nvmlCheckReturn(ret)
return c_numFans.value
def nvmlDeviceSetDefaultFanSpeed_v2(handle, index):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultFanSpeed_v2")
ret = fn(handle, index)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetMinMaxFanSpeed(handle, minSpeed, maxSpeed):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinMaxFanSpeed")
ret = fn(handle, minSpeed, maxSpeed)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetFanControlPolicy_v2(handle, fan, fanControlPolicy):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanControlPolicy_v2")
ret = fn(handle, fan, fanControlPolicy)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceSetFanControlPolicy(handle, fan, fanControlPolicy):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetFanControlPolicy")
ret = fn(handle, fan, _nvmlFanControlPolicy_t(fanControlPolicy))
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetTemperature(handle, sensor):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperature")
ret = fn(handle, _nvmlTemperatureSensors_t(sensor), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlDeviceGetTemperatureThreshold(handle, threshold):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperatureThreshold")
ret = fn(handle, _nvmlTemperatureThresholds_t(threshold), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlDeviceSetTemperatureThreshold(handle, threshold, temp):
c_temp = c_uint()
c_temp.value = temp
fn = _nvmlGetFunctionPointer("nvmlDeviceSetTemperatureThreshold")
ret = fn(handle, _nvmlTemperatureThresholds_t(threshold), byref(c_temp))
_nvmlCheckReturn(ret)
return None
# DEPRECATED use nvmlDeviceGetPerformanceState
def nvmlDeviceGetPowerState(handle):
c_pstate = _nvmlPstates_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerState")
ret = fn(handle, byref(c_pstate))
_nvmlCheckReturn(ret)
return c_pstate.value
def nvmlDeviceGetPerformanceState(handle):
c_pstate = _nvmlPstates_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPerformanceState")
ret = fn(handle, byref(c_pstate))
_nvmlCheckReturn(ret)
return c_pstate.value
def nvmlDeviceGetPowerManagementMode(handle):
c_pcapMode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementMode")
ret = fn(handle, byref(c_pcapMode))
_nvmlCheckReturn(ret)
return c_pcapMode.value
def nvmlDeviceGetPowerManagementLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
# Added in 4.304
def nvmlDeviceGetPowerManagementLimitConstraints(handle):
c_minLimit = c_uint()
c_maxLimit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimitConstraints")
ret = fn(handle, byref(c_minLimit), byref(c_maxLimit))
_nvmlCheckReturn(ret)
return [c_minLimit.value, c_maxLimit.value]
# Added in 4.304
def nvmlDeviceGetPowerManagementDefaultLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementDefaultLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
# Added in 331
def nvmlDeviceGetEnforcedPowerLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEnforcedPowerLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
def nvmlDeviceGetPowerUsage(handle):
c_watts = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerUsage")
ret = fn(handle, byref(c_watts))
_nvmlCheckReturn(ret)
return c_watts.value
def nvmlDeviceGetTotalEnergyConsumption(handle):
c_millijoules = c_uint64()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEnergyConsumption")
ret = fn(handle, byref(c_millijoules))
_nvmlCheckReturn(ret)
return c_millijoules.value
# Added in 4.304
def nvmlDeviceGetGpuOperationMode(handle):
c_currState = _nvmlGpuOperationMode_t()
c_pendingState = _nvmlGpuOperationMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuOperationMode")
ret = fn(handle, byref(c_currState), byref(c_pendingState))
_nvmlCheckReturn(ret)
return [c_currState.value, c_pendingState.value]
# Added in 4.304
def nvmlDeviceGetCurrentGpuOperationMode(handle):
return nvmlDeviceGetGpuOperationMode(handle)[0]
# Added in 4.304
def nvmlDeviceGetPendingGpuOperationMode(handle):
return nvmlDeviceGetGpuOperationMode(handle)[1]
def nvmlDeviceGetMemoryInfo(handle, version=None):
if not version:
c_memory = c_nvmlMemory_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo")
else:
c_memory = c_nvmlMemory_v2_t()
c_memory.version = version
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo_v2")
ret = fn(handle, byref(c_memory))
_nvmlCheckReturn(ret)
return c_memory
def nvmlDeviceGetBAR1MemoryInfo(handle):
c_bar1_memory = c_nvmlBAR1Memory_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBAR1MemoryInfo")
ret = fn(handle, byref(c_bar1_memory))
_nvmlCheckReturn(ret)
return c_bar1_memory
def nvmlDeviceGetComputeMode(handle):
c_mode = _nvmlComputeMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetCudaComputeCapability(handle):
c_major = c_int()
c_minor = c_int()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCudaComputeCapability")
ret = fn(handle, byref(c_major), byref(c_minor))
_nvmlCheckReturn(ret)
return (c_major.value, c_minor.value)
def nvmlDeviceGetEccMode(handle):
c_currState = _nvmlEnableState_t()
c_pendingState = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEccMode")
ret = fn(handle, byref(c_currState), byref(c_pendingState))
_nvmlCheckReturn(ret)
return [c_currState.value, c_pendingState.value]
# added to API
def nvmlDeviceGetCurrentEccMode(handle):
return nvmlDeviceGetEccMode(handle)[0]
# added to API
def nvmlDeviceGetPendingEccMode(handle):
return nvmlDeviceGetEccMode(handle)[1]
def nvmlDeviceGetDefaultEccMode(handle):
c_defaultState = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultEccMode")
ret = fn(handle, byref(c_defaultState))
_nvmlCheckReturn(ret)
return [c_defaultState.value]
def nvmlDeviceGetTotalEccErrors(handle, errorType, counterType):
c_count = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEccErrors")
ret = fn(handle, _nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType), byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
# This is deprecated, instead use nvmlDeviceGetMemoryErrorCounter
def nvmlDeviceGetDetailedEccErrors(handle, errorType, counterType):
c_counts = c_nvmlEccErrorCounts_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDetailedEccErrors")
ret = fn(handle, _nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType), byref(c_counts))
_nvmlCheckReturn(ret)
return c_counts
# Added in 4.304
def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType):
c_count = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryErrorCounter")
ret = fn(handle,
_nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType),
_nvmlMemoryLocation_t(locationType),
byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetUtilizationRates(handle):
c_util = c_nvmlUtilization_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetUtilizationRates")
ret = fn(handle, byref(c_util))
_nvmlCheckReturn(ret)
return c_util
def nvmlDeviceGetEncoderUtilization(handle):
c_util = c_uint()
c_samplingPeriod = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderUtilization")
ret = fn(handle, byref(c_util), byref(c_samplingPeriod))
_nvmlCheckReturn(ret)
return [c_util.value, c_samplingPeriod.value]
def nvmlDeviceGetDecoderUtilization(handle):
c_util = c_uint()
c_samplingPeriod = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDecoderUtilization")
ret = fn(handle, byref(c_util), byref(c_samplingPeriod))
_nvmlCheckReturn(ret)
return [c_util.value, c_samplingPeriod.value]
def nvmlDeviceGetPcieReplayCounter(handle):
c_replay = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieReplayCounter")
ret = fn(handle, byref(c_replay))
_nvmlCheckReturn(ret)
return c_replay.value
def nvmlDeviceGetDriverModel(handle):
c_currModel = _nvmlDriverModel_t()
c_pendingModel = _nvmlDriverModel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDriverModel")
ret = fn(handle, byref(c_currModel), byref(c_pendingModel))
_nvmlCheckReturn(ret)
return [c_currModel.value, c_pendingModel.value]
# added to API
def nvmlDeviceGetCurrentDriverModel(handle):
return nvmlDeviceGetDriverModel(handle)[0]
# added to API
def nvmlDeviceGetPendingDriverModel(handle):
return nvmlDeviceGetDriverModel(handle)[1]
# Added in 2.285
@convertStrBytes
def nvmlDeviceGetVbiosVersion(handle):
c_version = create_string_buffer(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVbiosVersion")
ret = fn(handle, c_version, c_uint(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlDeviceGetComputeRunningProcesses_v3(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeRunningProcesses_v3")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no running processes
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
# oversize the array incase more processes are created
c_count.value = c_count.value * 2 + 5
proc_array = c_nvmlProcessInfo_t * c_count.value
c_procs = proc_array()
# make the call again
ret = fn(handle, byref(c_count), c_procs)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
# use an alternative struct for this object
obj = nvmlStructToFriendlyObject(c_procs[i])
if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
obj.usedGpuMemory = None
procs.append(obj)
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetComputeRunningProcesses(handle):
return nvmlDeviceGetComputeRunningProcesses_v3(handle)
def nvmlDeviceGetGraphicsRunningProcesses_v3(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGraphicsRunningProcesses_v3")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no running processes
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
# oversize the array incase more processes are created
c_count.value = c_count.value * 2 + 5
proc_array = c_nvmlProcessInfo_t * c_count.value
c_procs = proc_array()
# make the call again
ret = fn(handle, byref(c_count), c_procs)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
# use an alternative struct for this object
obj = nvmlStructToFriendlyObject(c_procs[i])
if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
obj.usedGpuMemory = None
procs.append(obj)
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetGraphicsRunningProcesses(handle):
return nvmlDeviceGetGraphicsRunningProcesses_v3(handle)
def nvmlDeviceGetMPSComputeRunningProcesses(handle):
return nvmlDeviceGetMPSComputeRunningProcesses_v3(handle)
def nvmlDeviceGetMPSComputeRunningProcesses_v3(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMPSComputeRunningProcesses_v3")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no running processes
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
# oversize the array incase more processes are created
c_count.value = c_count.value * 2 + 5
proc_array = c_nvmlProcessInfo_t * c_count.value
c_procs = proc_array()
# make the call again
ret = fn(handle, byref(c_count), c_procs)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
# use an alternative struct for this object
obj = nvmlStructToFriendlyObject(c_procs[i])
if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
obj.usedGpuMemory = None
procs.append(obj)
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetAutoBoostedClocksEnabled(handle):
c_isEnabled = _nvmlEnableState_t()
c_defaultIsEnabled = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAutoBoostedClocksEnabled")
ret = fn(handle, byref(c_isEnabled), byref(c_defaultIsEnabled))
_nvmlCheckReturn(ret)
return [c_isEnabled.value, c_defaultIsEnabled.value]
# Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
## Set functions
def nvmlUnitSetLedState(unit, color):
fn = _nvmlGetFunctionPointer("nvmlUnitSetLedState")
ret = fn(unit, _nvmlLedColor_t(color))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetPersistenceMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetPersistenceMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetComputeMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetComputeMode")
ret = fn(handle, _nvmlComputeMode_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetEccMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetEccMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearEccErrorCounts(handle, counterType):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearEccErrorCounts")
ret = fn(handle, _nvmlEccCounterType_t(counterType))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetDriverModel(handle, model):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDriverModel")
ret = fn(handle, _nvmlDriverModel_t(model))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetAutoBoostedClocksEnabled(handle, enabled):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled))
_nvmlCheckReturn(ret)
return None
# Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags))
_nvmlCheckReturn(ret)
return None
# Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
def nvmlDeviceSetGpuLockedClocks(handle, minGpuClockMHz, maxGpuClockMHz):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpuLockedClocks")
ret = fn(handle, c_uint(minGpuClockMHz), c_uint(maxGpuClockMHz))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceResetGpuLockedClocks(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetGpuLockedClocks")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetMemoryLockedClocks(handle, minMemClockMHz, maxMemClockMHz):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetMemoryLockedClocks")
ret = fn(handle, c_uint(minMemClockMHz), c_uint(maxMemClockMHz))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceResetMemoryLockedClocks(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetMemoryLockedClocks")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetClkMonStatus(handle, c_clkMonInfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClkMonStatus")
ret = fn(handle, c_clkMonInfo)
return ret
# Added in 4.304
def nvmlDeviceSetApplicationsClocks(handle, maxMemClockMHz, maxGraphicsClockMHz):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetApplicationsClocks")
ret = fn(handle, c_uint(maxMemClockMHz), c_uint(maxGraphicsClockMHz))
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceResetApplicationsClocks(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetApplicationsClocks")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceSetPowerManagementLimit(handle, limit):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit")
ret = fn(handle, c_uint(limit))
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceSetGpuOperationMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpuOperationMode")
ret = fn(handle, _nvmlGpuOperationMode_t(mode))
_nvmlCheckReturn(ret)
return None
# Added in 2.285
def nvmlEventSetCreate():
fn = _nvmlGetFunctionPointer("nvmlEventSetCreate")
eventSet = c_nvmlEventSet_t()
ret = fn(byref(eventSet))
_nvmlCheckReturn(ret)
return eventSet
# Added in 2.285
def nvmlDeviceRegisterEvents(handle, eventTypes, eventSet):
fn = _nvmlGetFunctionPointer("nvmlDeviceRegisterEvents")
ret = fn(handle, c_ulonglong(eventTypes), eventSet)
_nvmlCheckReturn(ret)
return None
# Added in 2.285
def nvmlDeviceGetSupportedEventTypes(handle):
c_eventTypes = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedEventTypes")
ret = fn(handle, byref(c_eventTypes))
_nvmlCheckReturn(ret)
return c_eventTypes.value
# raises NVML_ERROR_TIMEOUT exception on timeout
def nvmlEventSetWait_v2(eventSet, timeoutms):
fn = _nvmlGetFunctionPointer("nvmlEventSetWait_v2")
data = c_nvmlEventData_t()
ret = fn(eventSet, byref(data), c_uint(timeoutms))
_nvmlCheckReturn(ret)
return data
def nvmlEventSetWait(eventSet, timeoutms):
return nvmlEventSetWait_v2(eventSet, timeoutms)
# Added in 2.285
def nvmlEventSetFree(eventSet):
fn = _nvmlGetFunctionPointer("nvmlEventSetFree")
ret = fn(eventSet)
_nvmlCheckReturn(ret)
return None
# Added in 3.295
def nvmlDeviceOnSameBoard(handle1, handle2):
fn = _nvmlGetFunctionPointer("nvmlDeviceOnSameBoard")
onSameBoard = c_int()
ret = fn(handle1, handle2, byref(onSameBoard))
_nvmlCheckReturn(ret)
return (onSameBoard.value != 0)
# Added in 3.295
def nvmlDeviceGetCurrPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value
# Added in 3.295
def nvmlDeviceGetMaxPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value
# Added in 3.295
def nvmlDeviceGetCurrPcieLinkWidth(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth")
width = c_uint()
ret = fn(handle, byref(width))
_nvmlCheckReturn(ret)
return width.value
# Added in 3.295
def nvmlDeviceGetMaxPcieLinkWidth(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkWidth")
width = c_uint()
ret = fn(handle, byref(width))
_nvmlCheckReturn(ret)
return width.value
def nvmlDeviceGetGpuMaxPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuMaxPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value
# Added in 4.304
def nvmlDeviceGetSupportedClocksThrottleReasons(handle):
c_reasons = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedClocksThrottleReasons")
ret = fn(handle, byref(c_reasons))
_nvmlCheckReturn(ret)
return c_reasons.value
# Added in 4.304
def nvmlDeviceGetCurrentClocksThrottleReasons(handle):
c_reasons = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClocksThrottleReasons")
ret = fn(handle, byref(c_reasons))
_nvmlCheckReturn(ret)
return c_reasons.value
# Added in 5.319
def nvmlDeviceGetIndex(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetIndex")
c_index = c_uint()
ret = fn(handle, byref(c_index))
_nvmlCheckReturn(ret)
return c_index.value
# Added in 5.319
def nvmlDeviceGetAccountingMode(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceSetAccountingMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearAccountingPids(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearAccountingPids")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetAccountingStats(handle, pid):
stats = c_nvmlAccountingStats_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingStats")
ret = fn(handle, c_uint(pid), byref(stats))
_nvmlCheckReturn(ret)
if (stats.maxMemoryUsage == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
stats.maxMemoryUsage = None
return stats
def nvmlDeviceGetAccountingPids(handle):
count = c_uint(nvmlDeviceGetAccountingBufferSize(handle))
pids = (c_uint * count.value)()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingPids")
ret = fn(handle, byref(count), pids)
_nvmlCheckReturn(ret)
return list(map(int, pids[0:count.value]))
def nvmlDeviceGetAccountingBufferSize(handle):
bufferSize = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingBufferSize")
ret = fn(handle, byref(bufferSize))
_nvmlCheckReturn(ret)
return int(bufferSize.value)
def nvmlDeviceGetRetiredPages(device, sourceFilter):
c_source = _nvmlPageRetirementCause_t(sourceFilter)
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPages")
# First call will get the size
ret = fn(device, c_source, byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# call again with a buffer
# oversize the array for the rare cases where additional pages
# are retired between NVML calls
c_count.value = c_count.value * 2 + 5
page_array = c_ulonglong * c_count.value
c_pages = page_array()
ret = fn(device, c_source, byref(c_count), c_pages)
_nvmlCheckReturn(ret)
return list(map(int, c_pages[0:c_count.value]))
def nvmlDeviceGetRetiredPages_v2(device, sourceFilter):
c_source = _nvmlPageRetirementCause_t(sourceFilter)
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPages_v2")
# First call will get the size
ret = fn(device, c_source, byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# call again with a buffer
# oversize the array for the rare cases where additional pages
# are retired between NVML calls
c_count.value = c_count.value * 2 + 5
page_array = c_ulonglong * c_count.value
c_pages = page_array()
times_array = c_ulonglong * c_count.value
c_times = times_array()
ret = fn(device, c_source, byref(c_count), c_pages, c_times)
_nvmlCheckReturn(ret)
return [{'address': int(c_pages[i]), 'timestamp': int(c_times[i])} for i in range(c_count.value)]
def nvmlDeviceGetRetiredPagesPendingStatus(device):
c_pending = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPagesPendingStatus")
ret = fn(device, byref(c_pending))
_nvmlCheckReturn(ret)
return int(c_pending.value)
def nvmlDeviceGetAPIRestriction(device, apiType):
c_permission = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAPIRestriction")
ret = fn(device, _nvmlRestrictedAPI_t(apiType), byref(c_permission))
_nvmlCheckReturn(ret)
return int(c_permission.value)
def nvmlDeviceSetAPIRestriction(handle, apiType, isRestricted):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAPIRestriction")
ret = fn(handle, _nvmlRestrictedAPI_t(apiType), _nvmlEnableState_t(isRestricted))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetBridgeChipInfo(handle):
bridgeHierarchy = c_nvmlBridgeChipHierarchy_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBridgeChipInfo")
ret = fn(handle, byref(bridgeHierarchy))
_nvmlCheckReturn(ret)
return bridgeHierarchy
def nvmlDeviceGetSamples(device, sampling_type, timeStamp):
c_sampling_type = _nvmlSamplingType_t(sampling_type)
c_time_stamp = c_ulonglong(timeStamp)
c_sample_count = c_uint(0)
c_sample_value_type = _nvmlValueType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSamples")
## First Call gets the size
ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), None)
# Stop if this fails
if (ret != NVML_SUCCESS):
raise NVMLError(ret)
sampleArray = c_sample_count.value * c_nvmlSample_t
c_samples = sampleArray()
ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), c_samples)
_nvmlCheckReturn(ret)
return (c_sample_value_type.value, c_samples[0:c_sample_count.value])
def nvmlDeviceGetViolationStatus(device, perfPolicyType):
c_perfPolicy_type = _nvmlPerfPolicyType_t(perfPolicyType)
c_violTime = c_nvmlViolationTime_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetViolationStatus")
## Invoke the method to get violation time
ret = fn(device, c_perfPolicy_type, byref(c_violTime))
_nvmlCheckReturn(ret)
return c_violTime
def nvmlDeviceGetPcieThroughput(device, counter):
c_util = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieThroughput")
ret = fn(device, _nvmlPcieUtilCounter_t(counter), byref(c_util))
_nvmlCheckReturn(ret)
return c_util.value
def nvmlSystemGetTopologyGpuSet(cpuNumber):
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlSystemGetTopologyGpuSet")
# First call will get the size
ret = fn(cpuNumber, byref(c_count), None)
if ret != NVML_SUCCESS:
raise NVMLError(ret)
# call again with a buffer
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
ret = fn(cpuNumber, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return list(c_devices[0:c_count.value])
def nvmlDeviceGetTopologyNearestGpus(device, level):
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyNearestGpus")
# First call will get the size
ret = fn(device, level, byref(c_count), None)
if ret != NVML_SUCCESS:
raise NVMLError(ret)
# call again with a buffer
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
ret = fn(device, level, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return list(c_devices[0:c_count.value])
def nvmlDeviceGetTopologyCommonAncestor(device1, device2):
c_level = _nvmlGpuTopologyLevel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyCommonAncestor")
ret = fn(device1, device2, byref(c_level))
_nvmlCheckReturn(ret)
return c_level.value
def nvmlDeviceGetNvLinkUtilizationCounter(device, link, counter):
c_rxcounter = c_ulonglong()
c_txcounter = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkUtilizationCounter")
ret = fn(device, link, counter, byref(c_rxcounter), byref(c_txcounter))
_nvmlCheckReturn(ret)
return (c_rxcounter.value, c_txcounter.value)
def nvmlDeviceFreezeNvLinkUtilizationCounter(device, link, counter, freeze):
fn = _nvmlGetFunctionPointer("nvmlDeviceFreezeNvLinkUtilizationCounter")
ret = fn(device, link, counter, freeze)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceResetNvLinkUtilizationCounter(device, link, counter):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetNvLinkUtilizationCounter")
ret = fn(device, link, counter)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetNvLinkUtilizationControl(device, link, counter, control, reset):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvLinkUtilizationControl")
ret = fn(device, link, counter, byref(control), reset)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetNvLinkUtilizationControl(device, link, counter):
c_control = nvmlNvLinkUtilizationControl_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkUtilizationControl")
ret = fn(device, link, counter, byref(c_control))
_nvmlCheckReturn(ret)
return c_control
def nvmlDeviceGetNvLinkCapability(device, link, capability):
c_capResult = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkCapability")
ret = fn(device, link, capability, byref(c_capResult))
_nvmlCheckReturn(ret)
return c_capResult.value
def nvmlDeviceGetNvLinkErrorCounter(device, link, counter):
c_result = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkErrorCounter")
ret = fn(device, link, counter, byref(c_result))
_nvmlCheckReturn(ret)
return c_result.value
def nvmlDeviceResetNvLinkErrorCounters(device, link):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetNvLinkErrorCounters")
ret = fn(device, link)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetNvLinkRemotePciInfo(device, link):
c_pci = nvmlPciInfo_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkRemotePciInfo_v2")
ret = fn(device, link, byref(c_pci))
_nvmlCheckReturn(ret)
return c_pci
def nvmlDeviceGetNvLinkRemoteDeviceType(handle, link):
c_type = _nvmlNvLinkDeviceType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkRemoteDeviceType")
ret = fn(handle, link, byref(c_type))
_nvmlCheckReturn(ret)
return c_type.value
def nvmlDeviceGetNvLinkState(device, link):
c_isActive = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkState")
ret = fn(device, link, byref(c_isActive))
_nvmlCheckReturn(ret)
return c_isActive.value
def nvmlDeviceGetNvLinkVersion(device, link):
c_version = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkVersion")
ret = fn(device, link, byref(c_version))
_nvmlCheckReturn(ret)
return c_version.value
def nvmlDeviceModifyDrainState(pciInfo, newState):
fn = _nvmlGetFunctionPointer("nvmlDeviceModifyDrainState")
ret = fn(pointer(pciInfo), newState)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceQueryDrainState(pciInfo):
c_newState = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceQueryDrainState")
ret = fn(pointer(pciInfo), byref(c_newState))
_nvmlCheckReturn(ret)
return c_newState.value
def nvmlDeviceRemoveGpu(pciInfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceRemoveGpu")
ret = fn(pointer(pciInfo))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceDiscoverGpus(pciInfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceDiscoverGpus")
ret = fn(pointer(pciInfo))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetFieldValues(handle, fieldIds):
values_arr = c_nvmlFieldValue_t * len(fieldIds)
values = values_arr()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFieldValues")
for i, fieldId in enumerate(fieldIds):
try:
(values[i].fieldId, values[i].scopeId) = fieldId
except TypeError:
values[i].fieldId = fieldId
ret = fn(handle, c_int32(len(fieldIds)), byref(values))
_nvmlCheckReturn(ret)
return values
def nvmlDeviceClearFieldValues(handle, fieldIds):
values_arr = c_nvmlFieldValue_t * len(fieldIds)
values = values_arr()
fn = _nvmlGetFunctionPointer("nvmlDeviceClearFieldValues")
for i, fieldId in enumerate(fieldIds):
try:
(values[i].fieldId, values[i].scopeId) = fieldId
except TypeError:
values[i].fieldId = fieldId
ret = fn(handle, c_int32(len(fieldIds)), byref(values))
_nvmlCheckReturn(ret)
return values
def nvmlDeviceGetVirtualizationMode(handle):
c_virtualization_mode = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVirtualizationMode")
ret = fn(handle, byref(c_virtualization_mode))
_nvmlCheckReturn(ret)
return c_virtualization_mode.value
def nvmlDeviceSetVirtualizationMode(handle, virtualization_mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetVirtualizationMode")
return fn(handle, virtualization_mode)
def nvmlGetVgpuDriverCapabilities(capability):
c_capResult = c_uint()
fn = _nvmlGetFunctionPointer("nvmlGetVgpuDriverCapabilities")
ret = fn(_nvmlVgpuDriverCapability_t(capability), byref(c_capResult))
_nvmlCheckReturn(ret)
return c_capResult.value
def nvmlDeviceGetVgpuCapabilities(handle, capability):
c_capResult = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuCapabilities")
ret = fn(handle, _nvmlDeviceVgpuCapability_t(capability), byref(c_capResult))
_nvmlCheckReturn(ret)
return c_capResult.value
def nvmlDeviceGetSupportedVgpus(handle):
# first call to get the size
c_vgpu_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedVgpus")
ret = fn(handle, byref(c_vgpu_count), None)
if (ret == NVML_SUCCESS):
# special case, no supported vGPUs
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
vgpu_type_ids_array = _nvmlVgpuTypeId_t * c_vgpu_count.value
c_vgpu_type_ids = vgpu_type_ids_array()
# make the call again
ret = fn(handle, byref(c_vgpu_count), c_vgpu_type_ids)
_nvmlCheckReturn(ret)
vgpus = []
for i in range(c_vgpu_count.value):
vgpus.append(c_vgpu_type_ids[i])
return vgpus
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetCreatableVgpus(handle):
# first call to get the size
c_vgpu_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCreatableVgpus")
ret = fn(handle, byref(c_vgpu_count), None)
if (ret == NVML_SUCCESS):
# special case, no supported vGPUs
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
vgpu_type_ids_array = _nvmlVgpuTypeId_t * c_vgpu_count.value
c_vgpu_type_ids = vgpu_type_ids_array()
# make the call again
ret = fn(handle, byref(c_vgpu_count), c_vgpu_type_ids)
_nvmlCheckReturn(ret)
vgpus = []
for i in range(c_vgpu_count.value):
vgpus.append(c_vgpu_type_ids[i])
return vgpus
else:
# error case
raise NVMLError(ret)
def nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId):
c_profile_id = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetGpuInstanceProfileId")
ret = fn(vgpuTypeId, byref(c_profile_id))
_nvmlCheckReturn(ret)
return (c_profile_id.value)
@convertStrBytes
def nvmlVgpuTypeGetClass(vgpuTypeId):
c_class = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_DEVICE_NAME_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetClass")
ret = fn(vgpuTypeId, c_class, byref(c_buffer_size))
_nvmlCheckReturn(ret)
return c_class.value
@convertStrBytes
def nvmlVgpuTypeGetName(vgpuTypeId):
c_name = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_DEVICE_NAME_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetName")
ret = fn(vgpuTypeId, c_name, byref(c_buffer_size))
_nvmlCheckReturn(ret)
return c_name.value
def nvmlVgpuTypeGetDeviceID(vgpuTypeId):
c_device_id = c_ulonglong(0)
c_subsystem_id = c_ulonglong(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetDeviceID")
ret = fn(vgpuTypeId, byref(c_device_id), byref(c_subsystem_id))
_nvmlCheckReturn(ret)
return (c_device_id.value, c_subsystem_id.value)
def nvmlVgpuTypeGetFramebufferSize(vgpuTypeId):
c_fb_size = c_ulonglong(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetFramebufferSize")
ret = fn(vgpuTypeId, byref(c_fb_size))
_nvmlCheckReturn(ret)
return c_fb_size.value
def nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId):
c_num_heads = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetNumDisplayHeads")
ret = fn(vgpuTypeId, byref(c_num_heads))
_nvmlCheckReturn(ret)
return c_num_heads.value
def nvmlVgpuTypeGetResolution(vgpuTypeId):
c_xdim = c_uint(0)
c_ydim = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetResolution")
ret = fn(vgpuTypeId, 0, byref(c_xdim), byref(c_ydim))
_nvmlCheckReturn(ret)
return (c_xdim.value, c_ydim.value)
@convertStrBytes
def nvmlVgpuTypeGetLicense(vgpuTypeId):
c_license = create_string_buffer(NVML_GRID_LICENSE_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_GRID_LICENSE_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetLicense")
ret = fn(vgpuTypeId, c_license, c_buffer_size)
_nvmlCheckReturn(ret)
return c_license.value
def nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId):
c_frl_config = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetFrameRateLimit")
ret = fn(vgpuTypeId, byref(c_frl_config))
_nvmlCheckReturn(ret)
return c_frl_config.value
def nvmlVgpuTypeGetMaxInstances(handle, vgpuTypeId):
c_max_instances = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstances")
ret = fn(handle, vgpuTypeId, byref(c_max_instances))
_nvmlCheckReturn(ret)
return c_max_instances.value
def nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId):
c_max_instances_per_vm = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstancesPerVm")
ret = fn(vgpuTypeId, byref(c_max_instances_per_vm))
_nvmlCheckReturn(ret)
return c_max_instances_per_vm.value
def nvmlDeviceGetActiveVgpus(handle):
# first call to get the size
c_vgpu_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetActiveVgpus")
ret = fn(handle, byref(c_vgpu_count), None)
if (ret == NVML_SUCCESS):
# special case, no active vGPUs
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
vgpu_instance_array = _nvmlVgpuInstance_t * c_vgpu_count.value
c_vgpu_instances = vgpu_instance_array()
# make the call again
ret = fn(handle, byref(c_vgpu_count), c_vgpu_instances)
_nvmlCheckReturn(ret)
vgpus = []
for i in range(c_vgpu_count.value):
vgpus.append(c_vgpu_instances[i])
return vgpus
else:
# error case
raise NVMLError(ret)
@convertStrBytes
def nvmlVgpuInstanceGetVmID(vgpuInstance):
c_vm_id = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_GRID_LICENSE_BUFFER_SIZE)
c_vm_id_type = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetVmID")
ret = fn(vgpuInstance, byref(c_vm_id), c_buffer_size, byref(c_vm_id_type))
_nvmlCheckReturn(ret)
return (c_vm_id.value, c_vm_id_type.value)
@convertStrBytes
def nvmlVgpuInstanceGetUUID(vgpuInstance):
c_uuid = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_DEVICE_UUID_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetUUID")
ret = fn(vgpuInstance, byref(c_uuid), c_buffer_size)
_nvmlCheckReturn(ret)
return c_uuid.value
@convertStrBytes
def nvmlVgpuInstanceGetMdevUUID(vgpuInstance):
c_uuid = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_DEVICE_UUID_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetMdevUUID")
ret = fn(vgpuInstance, byref(c_uuid), c_buffer_size)
_nvmlCheckReturn(ret)
return c_uuid.value
@convertStrBytes
def nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance):
c_driver_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)
c_buffer_size = c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetVmDriverVersion")
ret = fn(vgpuInstance, byref(c_driver_version), c_buffer_size)
_nvmlCheckReturn(ret)
return c_driver_version.value
def nvmlVgpuInstanceGetLicenseStatus(vgpuInstance):
c_license_status = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetLicenseStatus")
ret = fn(vgpuInstance, byref(c_license_status))
_nvmlCheckReturn(ret)
return c_license_status.value
def nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance):
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetLicenseInfo_v2")
c_license_info = c_nvmlVgpuLicenseInfo_t()
ret = fn(vgpuInstance, byref(c_license_info))
_nvmlCheckReturn(ret)
return c_license_info
def nvmlVgpuInstanceGetLicenseInfo(vgpuInstance):
return nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance)
def nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance):
c_frl = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFrameRateLimit")
ret = fn(vgpuInstance, byref(c_frl))
_nvmlCheckReturn(ret)
return c_frl.value
def nvmlVgpuInstanceGetEccMode(vgpuInstance):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEccMode")
ret = fn(vgpuInstance, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlVgpuInstanceGetType(vgpuInstance):
c_vgpu_type = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetType")
ret = fn(vgpuInstance, byref(c_vgpu_type))
_nvmlCheckReturn(ret)
return c_vgpu_type.value
def nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance):
c_encoder_capacity = c_ulonglong(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderCapacity")
ret = fn(vgpuInstance, byref(c_encoder_capacity))
_nvmlCheckReturn(ret)
return c_encoder_capacity.value
def nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, encoder_capacity):
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceSetEncoderCapacity")
return fn(vgpuInstance, encoder_capacity)
def nvmlVgpuInstanceGetFbUsage(vgpuInstance):
c_fb_usage = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFbUsage")
ret = fn(vgpuInstance, byref(c_fb_usage))
_nvmlCheckReturn(ret)
return c_fb_usage.value
def nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability):
c_cap_result = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetCapabilities")
ret = fn(vgpuTypeId, _nvmlVgpuCapability_t(capability), byref(c_cap_result))
_nvmlCheckReturn(ret)
return (c_cap_result.value)
def nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance):
c_id = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetGpuInstanceId")
ret = fn(vgpuInstance, byref(c_id))
_nvmlCheckReturn(ret)
return (c_id.value)
@convertStrBytes
def nvmlVgpuInstanceGetGpuPciId(vgpuInstance):
c_vgpuPciId = create_string_buffer(NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetGpuPciId")
ret = fn(vgpuInstance, c_vgpuPciId, byref(c_uint(NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE)))
_nvmlCheckReturn(ret)
return c_vgpuPciId.value
def nvmlDeviceGetVgpuUtilization(handle, timeStamp):
# first call to get the size
c_vgpu_count = c_uint(0)
c_time_stamp = c_ulonglong(timeStamp)
c_sample_value_type = _nvmlValueType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuUtilization")
ret = fn(handle, c_time_stamp, byref(c_sample_value_type), byref(c_vgpu_count), None)
if (ret == NVML_SUCCESS):
# special case, no active vGPUs
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
sampleArray = c_vgpu_count.value * c_nvmlVgpuInstanceUtilizationSample_t
c_samples = sampleArray()
# make the call again
ret = fn(handle, c_time_stamp, byref(c_sample_value_type), byref(c_vgpu_count), c_samples)
_nvmlCheckReturn(ret)
return c_samples[0:c_vgpu_count.value]
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetP2PStatus(device1, device2, p2pIndex):
c_p2pstatus = _nvmlGpuP2PStatus_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetP2PStatus")
ret = fn(device1, device2, p2pIndex, byref(c_p2pstatus))
_nvmlCheckReturn(ret)
return c_p2pstatus.value
def nvmlDeviceGetGridLicensableFeatures_v4(handle):
c_get_grid_licensable_features = c_nvmlGridLicensableFeatures_v4_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGridLicensableFeatures_v4")
ret = fn(handle, byref(c_get_grid_licensable_features))
_nvmlCheckReturn(ret)
return (c_get_grid_licensable_features)
def nvmlDeviceGetGridLicensableFeatures(handle):
return nvmlDeviceGetGridLicensableFeatures_v4(handle)
def nvmlDeviceGetGspFirmwareVersion(handle, version):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGspFirmwareVersion")
ret = fn(handle, version)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetGspFirmwareMode(handle, isEnabled, defaultMode):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGspFirmwareMode")
ret = fn(handle, isEnabled, defaultMode)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetEncoderCapacity(handle, encoderQueryType):
c_encoder_capacity = c_ulonglong(0)
c_encoderQuery_type = _nvmlEncoderQueryType_t(encoderQueryType)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderCapacity")
ret = fn(handle, c_encoderQuery_type, byref(c_encoder_capacity))
_nvmlCheckReturn(ret)
return c_encoder_capacity.value
def nvmlDeviceGetVgpuProcessUtilization(handle, timeStamp):
# first call to get the size
c_vgpu_count = c_uint(0)
c_time_stamp = c_ulonglong(timeStamp)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuProcessUtilization")
ret = fn(handle, c_time_stamp, byref(c_vgpu_count), None)
if (ret == NVML_SUCCESS):
# special case, no active vGPUs
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
sampleArray = c_vgpu_count.value * c_nvmlVgpuProcessUtilizationSample_t
c_samples = sampleArray()
# make the call again
ret = fn(handle, c_time_stamp, byref(c_vgpu_count), c_samples)
_nvmlCheckReturn(ret)
return c_samples[0:c_vgpu_count.value]
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetEncoderStats(handle):
c_encoderCount = c_ulonglong(0)
c_encodeFps = c_ulonglong(0)
c_encoderLatency = c_ulonglong(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderStats")
ret = fn(handle, byref(c_encoderCount), byref(c_encodeFps), byref(c_encoderLatency))
_nvmlCheckReturn(ret)
return (c_encoderCount.value, c_encodeFps.value, c_encoderLatency.value)
def nvmlDeviceGetEncoderSessions(handle):
# first call to get the size
c_session_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderSessions")
ret = fn(handle, byref(c_session_count), None)
if (ret == NVML_SUCCESS):
if (c_session_count.value != 0):
# typical case
session_array = c_nvmlEncoderSession_t * c_session_count.value
c_sessions = session_array()
# make the call again
ret = fn(handle, byref(c_session_count), c_sessions)
_nvmlCheckReturn(ret)
sessions = []
for i in range(c_session_count.value):
sessions.append(c_sessions[i])
return sessions
else:
return [] # no active sessions
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetFBCStats(handle):
c_fbcStats = c_nvmlFBCStats_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFBCStats")
ret = fn(handle, byref(c_fbcStats))
_nvmlCheckReturn(ret)
return c_fbcStats
def nvmlDeviceGetFBCSessions(handle):
# first call to get the size
c_session_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFBCSessions")
ret = fn(handle, byref(c_session_count), None)
if (ret == NVML_SUCCESS):
if (c_session_count.value != 0):
# typical case
session_array = c_nvmlFBCSession_t * c_session_count.value
c_sessions = session_array()
# make the call again
ret = fn(handle, byref(c_session_count), c_sessions)
_nvmlCheckReturn(ret)
sessions = []
for i in range(c_session_count.value):
sessions.append(c_sessions[i])
return sessions
else:
return [] # no active sessions
else:
# error case
raise NVMLError(ret)
def nvmlVgpuInstanceGetEncoderStats(vgpuInstance):
c_encoderCount = c_ulonglong(0)
c_encodeFps = c_ulonglong(0)
c_encoderLatency = c_ulonglong(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderStats")
ret = fn(vgpuInstance, byref(c_encoderCount), byref(c_encodeFps), byref(c_encoderLatency))
_nvmlCheckReturn(ret)
return (c_encoderCount.value, c_encodeFps.value, c_encoderLatency.value)
def nvmlVgpuInstanceGetEncoderSessions(vgpuInstance):
# first call to get the size
c_session_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderSessions")
ret = fn(vgpuInstance, byref(c_session_count), None)
if (ret == NVML_SUCCESS):
if (c_session_count.value != 0):
# typical case
session_array = c_nvmlEncoderSession_t * c_session_count.value
c_sessions = session_array()
# make the call again
ret = fn(vgpuInstance, byref(c_session_count), c_sessions)
_nvmlCheckReturn(ret)
sessions = []
for i in range(c_session_count.value):
sessions.append(c_sessions[i])
return sessions
else:
return [] # no active sessions
else:
# error case
raise NVMLError(ret)
def nvmlVgpuInstanceGetFBCStats(vgpuInstance):
c_fbcStats = c_nvmlFBCStats_t()
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFBCStats")
ret = fn(vgpuInstance, byref(c_fbcStats))
_nvmlCheckReturn(ret)
return c_fbcStats
def nvmlVgpuInstanceGetFBCSessions(vgpuInstance):
# first call to get the size
c_session_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFBCSessions")
ret = fn(vgpuInstance, byref(c_session_count), None)
if (ret == NVML_SUCCESS):
if (c_session_count.value != 0):
# typical case
session_array = c_nvmlFBCSession_t * c_session_count.value
c_sessions = session_array()
# make the call again
ret = fn(vgpuInstance, byref(c_session_count), c_sessions)
_nvmlCheckReturn(ret)
sessions = []
for i in range(c_session_count.value):
sessions.append(c_sessions[i])
return sessions
else:
return [] # no active sessions
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetProcessUtilization(handle, timeStamp):
# first call to get the size
c_count = c_uint(0)
c_time_stamp = c_ulonglong(timeStamp)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetProcessUtilization")
ret = fn(handle, None, byref(c_count), c_time_stamp)
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
sampleArray = c_count.value * c_nvmlProcessUtilizationSample_t
c_samples = sampleArray()
# make the call again
ret = fn(handle, c_samples, byref(c_count), c_time_stamp)
_nvmlCheckReturn(ret)
return c_samples[0:c_count.value]
else:
# error case
raise NVMLError(ret)
def nvmlVgpuInstanceGetMetadata(vgpuInstance):
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetMetadata")
c_vgpuMetadata = c_nvmlVgpuMetadata_t()
c_bufferSize = c_uint(0)
# Make the first NVML API call to get the c_bufferSize value.
# We have already allocated required buffer above.
ret = fn(vgpuInstance, byref(c_vgpuMetadata), byref(c_bufferSize))
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = fn(vgpuInstance, byref(c_vgpuMetadata), byref(c_bufferSize))
_nvmlCheckReturn(ret)
else:
raise NVMLError(ret)
return c_vgpuMetadata
def nvmlDeviceGetVgpuMetadata(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuMetadata")
c_vgpuPgpuMetadata = c_nvmlVgpuPgpuMetadata_t()
c_bufferSize = c_uint(0)
# Make the first NVML API call to get the c_bufferSize value.
# We have already allocated required buffer above.
ret = fn(handle, byref(c_vgpuPgpuMetadata), byref(c_bufferSize))
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = fn(handle, byref(c_vgpuPgpuMetadata), byref(c_bufferSize))
_nvmlCheckReturn(ret)
else:
raise NVMLError(ret)
return c_vgpuPgpuMetadata
def nvmlGetVgpuCompatibility(vgpuMetadata, pgpuMetadata):
fn = _nvmlGetFunctionPointer("nvmlGetVgpuCompatibility")
c_vgpuPgpuCompatibility = c_nvmlVgpuPgpuCompatibility_t()
ret = fn(byref(vgpuMetadata), byref(pgpuMetadata), byref(c_vgpuPgpuCompatibility))
_nvmlCheckReturn(ret)
return c_vgpuPgpuCompatibility
@convertStrBytes
def nvmlDeviceGetPgpuMetadataString(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPgpuMetadataString")
c_pgpuMetadata = create_string_buffer(NVML_VGPU_PGPU_METADATA_OPAQUE_DATA_SIZE)
c_bufferSize = c_uint(0)
# Make the first NVML API call to get the c_bufferSize value.
# We have already allocated required buffer above.
ret = fn(handle, byref(c_pgpuMetadata), byref(c_bufferSize))
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = fn(handle, byref(c_pgpuMetadata), byref(c_bufferSize))
_nvmlCheckReturn(ret)
else:
raise NVMLError(ret)
return (c_pgpuMetadata.value, c_bufferSize.value)
def nvmlDeviceGetVgpuSchedulerLog(handle):
c_vgpu_sched_log = c_nvmlVgpuSchedulerLog_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerLog")
ret = fn(handle, byref(c_vgpu_sched_log))
_nvmlCheckReturn(ret)
return c_vgpu_sched_log
def nvmlDeviceGetVgpuSchedulerState(handle):
c_vgpu_sched_state = c_nvmlVgpuSchedulerGetState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerState")
ret = fn(handle, byref(c_vgpu_sched_state))
_nvmlCheckReturn(ret)
return c_vgpu_sched_state
def nvmlDeviceGetVgpuSchedulerCapabilities(handle):
c_vgpu_sched_caps = c_nvmlVgpuSchedulerCapabilities_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerCapabilities")
ret = fn(handle, byref(c_vgpu_sched_caps))
_nvmlCheckReturn(ret)
return c_vgpu_sched_caps
def nvmlSetVgpuVersion(vgpuVersion):
fn = _nvmlGetFunctionPointer("nvmlSetVgpuVersion")
ret = fn(byref(vgpuVersion))
_nvmlCheckReturn(ret)
return ret
def nvmlGetVgpuVersion(supported, current):
fn = _nvmlGetFunctionPointer("nvmlGetVgpuVersion")
ret = fn(byref(supported), byref(current))
_nvmlCheckReturn(ret)
return ret
def nvmlVgpuInstanceGetAccountingMode(vgpuInstance):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingMode")
ret = fn(vgpuInstance, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlVgpuInstanceGetAccountingPids(vgpuInstance):
c_pidCount = c_uint()
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingPids")
ret = fn(vgpuInstance, byref(c_pidCount), None)
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
sampleArray = c_pidCount.value * c_uint
c_pidArray = sampleArray()
ret = fn(vgpuInstance, byref(c_pidCount), byref(c_pidArray))
_nvmlCheckReturn(ret)
else:
raise NVMLError(ret)
return (c_pidCount, c_pidArray)
def nvmlVgpuInstanceGetAccountingStats(vgpuInstance, pid):
c_accountingStats = c_nvmlAccountingStats_t()
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingStats")
ret = fn(vgpuInstance, pid, byref(c_accountingStats))
_nvmlCheckReturn(ret)
return c_accountingStats
def nvmlVgpuInstanceClearAccountingPids(vgpuInstance):
fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceClearAccountingPids")
ret = fn(vgpuInstance)
_nvmlCheckReturn(ret)
return ret
def nvmlGetExcludedDeviceCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlGetExcludedDeviceCount")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlGetExcludedDeviceInfoByIndex(index):
c_index = c_uint(index)
info = c_nvmlExcludedDeviceInfo_t()
fn = _nvmlGetFunctionPointer("nvmlGetExcludedDeviceInfoByIndex")
ret = fn(c_index, byref(info))
_nvmlCheckReturn(ret)
return info
def nvmlDeviceGetHostVgpuMode(handle):
c_host_vgpu_mode = _nvmlHostVgpuMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHostVgpuMode")
ret = fn(handle, byref(c_host_vgpu_mode))
_nvmlCheckReturn(ret)
return c_host_vgpu_mode.value
def nvmlDeviceSetMigMode(device, mode):
c_activationStatus = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceSetMigMode")
ret = fn(device, mode, byref(c_activationStatus))
_nvmlCheckReturn(ret)
return c_activationStatus.value
def nvmlDeviceGetMigMode(device):
c_currentMode = c_uint()
c_pendingMode = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMigMode")
ret = fn(device, byref(c_currentMode), byref(c_pendingMode))
_nvmlCheckReturn(ret)
return [c_currentMode.value, c_pendingMode.value]
def nvmlDeviceGetGpuInstanceProfileInfo(device, profile, version=2):
if version == 2:
c_info = c_nvmlGpuInstanceProfileInfo_v2_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceProfileInfoV")
elif version == 1:
c_info = c_nvmlGpuInstanceProfileInfo_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceProfileInfo")
else:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
ret = fn(device, profile, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
# Define function alias for the API exposed by NVML
nvmlDeviceGetGpuInstanceProfileInfoV = nvmlDeviceGetGpuInstanceProfileInfo
def nvmlDeviceGetGpuInstanceRemainingCapacity(device, profileId):
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceRemainingCapacity")
ret = fn(device, profileId, byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetGpuInstancePossiblePlacements(device, profileId, placementsRef, countRef):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstancePossiblePlacements_v2")
ret = fn(device, profileId, placementsRef, countRef)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceCreateGpuInstance(device, profileId):
c_instance = c_nvmlGpuInstance_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceCreateGpuInstance")
ret = fn(device, profileId, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlDeviceCreateGpuInstanceWithPlacement(device, profileId, placement):
c_instance = c_nvmlGpuInstance_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceCreateGpuInstanceWithPlacement")
ret = fn(device, profileId, placement, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlGpuInstanceDestroy(gpuInstance):
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceDestroy")
ret = fn(gpuInstance)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetGpuInstances(device, profileId, gpuInstancesRef, countRef):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstances")
ret = fn(device, profileId, gpuInstancesRef, countRef)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetGpuInstanceById(device, gpuInstanceId):
c_instance = c_nvmlGpuInstance_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceById")
ret = fn(device, gpuInstanceId, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlGpuInstanceGetInfo(gpuInstance):
c_info = c_nvmlGpuInstanceInfo_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetInfo")
ret = fn(gpuInstance, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlGpuInstanceGetComputeInstanceProfileInfo(device, profile, engProfile, version=2):
if version == 2:
c_info = c_nvmlComputeInstanceProfileInfo_v2_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceProfileInfoV")
elif version == 1:
c_info = c_nvmlComputeInstanceProfileInfo_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceProfileInfo")
else:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
ret = fn(device, profile, engProfile, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
# Define function alias for the API exposed by NVML
nvmlGpuInstanceGetComputeInstanceProfileInfoV = nvmlGpuInstanceGetComputeInstanceProfileInfo
def nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, profileId):
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceRemainingCapacity")
ret = fn(gpuInstance, profileId, byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, profileId, placementsRef, countRef):
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstancePossiblePlacements")
ret = fn(gpuInstance, profileId, placementsRef, countRef)
_nvmlCheckReturn(ret)
return ret
def nvmlGpuInstanceCreateComputeInstance(gpuInstance, profileId):
c_instance = c_nvmlComputeInstance_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceCreateComputeInstance")
ret = fn(gpuInstance, profileId, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, profileId, placement):
c_instance = c_nvmlComputeInstance_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceCreateComputeInstanceWithPlacement")
ret = fn(gpuInstance, profileId, placement, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlComputeInstanceDestroy(computeInstance):
fn = _nvmlGetFunctionPointer("nvmlComputeInstanceDestroy")
ret = fn(computeInstance)
_nvmlCheckReturn(ret)
return ret
def nvmlGpuInstanceGetComputeInstances(gpuInstance, profileId, computeInstancesRef, countRef):
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstances")
ret = fn(gpuInstance, profileId, computeInstancesRef, countRef)
_nvmlCheckReturn(ret)
return ret
def nvmlGpuInstanceGetComputeInstanceById(gpuInstance, computeInstanceId):
c_instance = c_nvmlComputeInstance_t()
fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceById")
ret = fn(gpuInstance, computeInstanceId, byref(c_instance))
_nvmlCheckReturn(ret)
return c_instance
def nvmlComputeInstanceGetInfo_v2(computeInstance):
c_info = c_nvmlComputeInstanceInfo_t()
fn = _nvmlGetFunctionPointer("nvmlComputeInstanceGetInfo_v2")
ret = fn(computeInstance, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlComputeInstanceGetInfo(computeInstance):
return nvmlComputeInstanceGetInfo_v2(computeInstance)
def nvmlDeviceIsMigDeviceHandle(device):
c_isMigDevice = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceIsMigDeviceHandle")
ret = fn(device, byref(c_isMigDevice))
_nvmlCheckReturn(ret)
return c_isMigDevice
def nvmlDeviceGetGpuInstanceId(device):
c_gpuInstanceId = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceId")
ret = fn(device, byref(c_gpuInstanceId))
_nvmlCheckReturn(ret)
return c_gpuInstanceId.value
def nvmlDeviceGetComputeInstanceId(device):
c_computeInstanceId = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeInstanceId")
ret = fn(device, byref(c_computeInstanceId))
_nvmlCheckReturn(ret)
return c_computeInstanceId.value
def nvmlDeviceGetMaxMigDeviceCount(device):
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxMigDeviceCount")
ret = fn(device, byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetMigDeviceHandleByIndex(device, index):
c_index = c_uint(index)
migDevice = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMigDeviceHandleByIndex")
ret = fn(device, c_index, byref(migDevice))
_nvmlCheckReturn(ret)
return migDevice
def nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice):
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDeviceHandleFromMigDeviceHandle")
ret = fn(migDevice, byref(device))
_nvmlCheckReturn(ret)
return device
def nvmlDeviceGetAttributes_v2(device):
c_attrs = c_nvmlDeviceAttributes()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAttributes_v2")
ret = fn(device, byref(c_attrs))
_nvmlCheckReturn(ret)
return c_attrs
def nvmlDeviceGetAttributes(device):
return nvmlDeviceGetAttributes_v2(device)
def nvmlDeviceGetRemappedRows(device):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRemappedRows")
c_corr = c_uint()
c_unc = c_uint()
c_bpending = c_uint()
c_bfailure = c_uint()
ret = fn(device, byref(c_corr), byref(c_unc), byref(c_bpending), byref(c_bfailure))
_nvmlCheckReturn(ret)
return (c_corr.value, c_unc.value, c_bpending.value, c_bfailure.value)
def nvmlDeviceGetRowRemapperHistogram(device):
c_vals = c_nvmlRowRemapperHistogramValues()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRowRemapperHistogram")
ret = fn(device, byref(c_vals))
_nvmlCheckReturn(ret)
return c_vals
def nvmlDeviceGetArchitecture(device):
arch = _nvmlDeviceArchitecture_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetArchitecture")
ret = fn(device, byref(arch))
_nvmlCheckReturn(ret)
return arch.value
def nvmlDeviceGetBusType(device):
c_busType = _nvmlBusType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBusType")
ret = fn(device, byref(c_busType))
_nvmlCheckReturn(ret)
return c_busType.value
def nvmlDeviceGetIrqNum(device):
c_irqNum = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetIrqNum")
ret = fn(device, byref(c_irqNum))
_nvmlCheckReturn(ret)
return c_irqNum.value
def nvmlDeviceGetNumGpuCores(device):
c_numCores = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumGpuCores")
ret = fn(device, byref(c_numCores))
_nvmlCheckReturn(ret)
return c_numCores.value
def nvmlDeviceGetPowerSource(device):
c_powerSource = _nvmlPowerSource_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerSource")
ret = fn(device, byref(c_powerSource))
_nvmlCheckReturn(ret)
return c_powerSource.value
def nvmlDeviceGetMemoryBusWidth(device):
c_memBusWidth = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryBusWidth")
ret = fn(device, byref(c_memBusWidth))
_nvmlCheckReturn(ret)
return c_memBusWidth.value
def nvmlDeviceGetPcieLinkMaxSpeed(device):
c_speed = _nvmlPcieLinkMaxSpeed_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieLinkMaxSpeed")
ret = fn(device, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetAdaptiveClockInfoStatus(device):
c_adaptiveClockInfoStatus = _nvmlAdaptiveClockInfoStatus_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAdaptiveClockInfoStatus")
ret = fn(device, byref(c_adaptiveClockInfoStatus))
_nvmlCheckReturn(ret)
return c_adaptiveClockInfoStatus.value
def nvmlDeviceGetPcieSpeed(device):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieSpeed")
ret = fn(device, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetDynamicPstatesInfo(device, c_dynamicpstatesinfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDynamicPstatesInfo")
ret = fn(device, c_dynamicpstatesinfo)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceSetFanSpeed_v2(handle, index, speed):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetFanSpeed_v2")
ret = fn(handle, index, speed)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetThermalSettings(device, sensorindex, c_thermalsettings):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetThermalSettings")
ret = fn(device, sensorindex, c_thermalsettings)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetMinMaxClockOfPState(device, type, pstate, minClockMHz, maxClockMHz):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinMaxClockOfPState")
ret = fn(device, _nvmlClockType_t(type), _nvmlClockType_t(pstate), minClockMHz, maxClockMHz)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetSupportedPerformanceStates(device):
pstates = []
c_count = c_uint(NVML_MAX_GPU_PERF_PSTATES)
c_size = sizeof(c_uint) * c_count.value
# NOTE: use 'c_uint' to represent the size of the nvmlPstate_t enumeration.
pstates_array = _nvmlPstates_t * c_count.value
c_pstates = pstates_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedPerformanceStates")
ret = fn(device, c_pstates, c_size)
_nvmlCheckReturn(ret)
for value in c_pstates:
if value != NVML_PSTATE_UNKNOWN:
pstates.append(value)
return pstates
def nvmlDeviceGetGpcClkVfOffset(device):
offset = c_int32()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkVfOffset")
ret = fn(device, byref(offset))
_nvmlCheckReturn(ret)
return offset.value
def nvmlDeviceSetGpcClkVfOffset(device, offset):
c_offset = c_int32(offset)
fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpcClkVfOffset")
ret = fn(device, c_offset)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset, maxOffset):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkMinMaxVfOffset")
ret = fn(device, minOffset, maxOffset)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetMemClkVfOffset(device):
offset = c_int32()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkVfOffset")
ret = fn(device, byref(offset))
_nvmlCheckReturn(ret)
return offset.value
def nvmlDeviceSetMemClkVfOffset(device, offset):
c_offset = c_int32(offset)
fn = _nvmlGetFunctionPointer("nvmlDeviceSetMemClkVfOffset")
ret = fn(device, c_offset)
_nvmlCheckReturn(ret)
return ret
def nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset, maxOffset):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkMinMaxVfOffset")
ret = fn(device, minOffset, maxOffset)
_nvmlCheckReturn(ret)
return ret
## GPM ##
#########
## Enums/defines
#### GPM Metric Identifiers
NVML_GPM_METRIC_GRAPHICS_UTIL = 1 # Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0
NVML_GPM_METRIC_SM_UTIL = 2 # Percentage of SMs that were busy. 0.0 - 100.0
NVML_GPM_METRIC_SM_OCCUPANCY = 3 # Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0
NVML_GPM_METRIC_INTEGER_UTIL = 4 # Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0
NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5 # Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0
NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6 # Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0
NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7 # Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0
NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9 # Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0
NVML_GPM_METRIC_DRAM_BW_UTIL = 10 # Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0
NVML_GPM_METRIC_FP64_UTIL = 11 # Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0
NVML_GPM_METRIC_FP32_UTIL = 12 # Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0
NVML_GPM_METRIC_FP16_UTIL = 13 # Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0
NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20 # PCIe traffic from this GPU in MiB/sec
NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21 # PCIe traffic to this GPU in MiB/sec
NVML_GPM_METRIC_NVDEC_0_UTIL = 30 # Percent utilization of NVDEC 0. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_1_UTIL = 31 # Percent utilization of NVDEC 1. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_2_UTIL = 32 # Percent utilization of NVDEC 2. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_3_UTIL = 33 # Percent utilization of NVDEC 3. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_4_UTIL = 34 # Percent utilization of NVDEC 4. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_5_UTIL = 35 # Percent utilization of NVDEC 5. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_6_UTIL = 36 # Percent utilization of NVDEC 6. 0.0 - 100.0
NVML_GPM_METRIC_NVDEC_7_UTIL = 37 # Percent utilization of NVDEC 7. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_0_UTIL = 40 # Percent utilization of NVJPG 0. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_1_UTIL = 41 # Percent utilization of NVJPG 1. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_2_UTIL = 42 # Percent utilization of NVJPG 2. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_3_UTIL = 43 # Percent utilization of NVJPG 3. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_4_UTIL = 44 # Percent utilization of NVJPG 4. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_5_UTIL = 45 # Percent utilization of NVJPG 5. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_6_UTIL = 46 # Percent utilization of NVJPG 6. 0.0 - 100.0
NVML_GPM_METRIC_NVJPG_7_UTIL = 47 # Percent utilization of NVJPG 7. 0.0 - 100.0
NVML_GPM_METRIC_NVOFA_0_UTIL = 50 # Percent utilization of NVOFA 0. 0.0 - 100.0
NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60 # NvLink read bandwidth for all links in MiB/sec
NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61 # NvLink write bandwidth for all links in MiB/sec
NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62 # NvLink read bandwidth for link 0 in MiB/sec
NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63 # NvLink write bandwidth for link 0 in MiB/sec
NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64 # NvLink read bandwidth for link 1 in MiB/sec
NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65 # NvLink write bandwidth for link 1 in MiB/sec
NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66 # NvLink read bandwidth for link 2 in MiB/sec
NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67 # NvLink write bandwidth for link 2 in MiB/sec
NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68 # NvLink read bandwidth for link 3 in MiB/sec
NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69 # NvLink write bandwidth for link 3 in MiB/sec
NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70 # NvLink read bandwidth for link 4 in MiB/sec
NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71 # NvLink write bandwidth for link 4 in MiB/sec
NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72 # NvLink read bandwidth for link 5 in MiB/sec
NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73 # NvLink write bandwidth for link 5 in MiB/sec
NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74 # NvLink read bandwidth for link 6 in MiB/sec
NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75 # NvLink write bandwidth for link 6 in MiB/sec
NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76 # NvLink read bandwidth for link 7 in MiB/sec
NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77 # NvLink write bandwidth for link 7 in MiB/sec
NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78 # NvLink read bandwidth for link 8 in MiB/sec
NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79 # NvLink write bandwidth for link 8 in MiB/sec
NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80 # NvLink read bandwidth for link 9 in MiB/sec
NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81 # NvLink write bandwidth for link 9 in MiB/sec
NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82 # NvLink read bandwidth for link 10 in MiB/sec
NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83 # NvLink write bandwidth for link 10 in MiB/sec
NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84 # NvLink read bandwidth for link 11 in MiB/sec
NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85 # NvLink write bandwidth for link 11 in MiB/sec
NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86 # NvLink read bandwidth for link 12 in MiB/sec
NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87 # NvLink write bandwidth for link 12 in MiB/sec
NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88 # NvLink read bandwidth for link 13 in MiB/sec
NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89 # NvLink write bandwidth for link 13 in MiB/sec
NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90 # NvLink read bandwidth for link 14 in MiB/sec
NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91 # NvLink write bandwidth for link 14 in MiB/sec
NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92 # NvLink read bandwidth for link 15 in MiB/sec
NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93 # NvLink write bandwidth for link 15 in MiB/sec
NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94 # NvLink read bandwidth for link 16 in MiB/sec
NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95 # NvLink write bandwidth for link 16 in MiB/sec
NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96 # NvLink read bandwidth for link 17 in MiB/sec
NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97 # NvLink write bandwidth for link 17 in MiB/sec
NVML_GPM_METRIC_MAX = 98
## Structs
| c_nvmlRowRemapperHistogramValues |
python | pydata__xarray | xarray/core/types.py | {
"start": 9720,
"end": 10328
} | class ____(Protocol[_T_co]):
def __len__(self, /) -> int: ...
@overload
def __getitem__(self, index: int, /) -> _T_co | NestedSequence[_T_co]: ...
@overload
def __getitem__(self, index: slice, /) -> NestedSequence[_T_co]: ...
def __iter__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ...
def __reversed__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ...
_T = TypeVar("_T")
NestedDict = dict[str, "NestedDict[_T] | _T"]
AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True)
# this is shamelessly stolen from pandas._typing
@runtime_checkable
| NestedSequence |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py | {
"start": 638,
"end": 802
} | class ____(BaseModel):
type: Literal["object"] = Field(default="object")
properties: Dict[str, ParamPropertyDefinition]
required: List[str]
| ToolParameters |
python | doocs__leetcode | solution/1200-1299/1293.Shortest Path in a Grid with Obstacles Elimination/Solution.py | {
"start": 0,
"end": 977
} | class ____:
def shortestPath(self, grid: List[List[int]], k: int) -> int:
m, n = len(grid), len(grid[0])
if k >= m + n - 3:
return m + n - 2
q = deque([(0, 0, k)])
vis = {(0, 0, k)}
ans = 0
while q:
ans += 1
for _ in range(len(q)):
i, j, k = q.popleft()
for a, b in [[0, -1], [0, 1], [1, 0], [-1, 0]]:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n:
if x == m - 1 and y == n - 1:
return ans
if grid[x][y] == 0 and (x, y, k) not in vis:
q.append((x, y, k))
vis.add((x, y, k))
if grid[x][y] == 1 and k > 0 and (x, y, k - 1) not in vis:
q.append((x, y, k - 1))
vis.add((x, y, k - 1))
return -1
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/redshift_cluster.py | {
"start": 4060,
"end": 5497
} | class ____(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftCreateClusterSnapshotOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster snapshot to be in the `available` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
aws_conn_id: str | None = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="snapshot_available",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Create Cluster Snapshot Failed",
status_message="Redshift Cluster Snapshot in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
| RedshiftCreateClusterSnapshotTrigger |
python | optuna__optuna | optuna/pruners/_base.py | {
"start": 28,
"end": 910
} | class ____(abc.ABC):
"""Base class for pruners."""
@abc.abstractmethod
def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool:
"""Judge whether the trial should be pruned based on the reported values.
Note that this method is not supposed to be called by library users. Instead,
:func:`optuna.trial.Trial.report` and :func:`optuna.trial.Trial.should_prune` provide
user interfaces to implement pruning mechanism in an objective function.
Args:
study:
Study object of the target study.
trial:
FrozenTrial object of the target trial.
Take a copy before modifying this object.
Returns:
A boolean value representing whether the trial should be pruned.
"""
raise NotImplementedError
| BasePruner |
python | Textualize__textual | tests/tree/test_tree_messages.py | {
"start": 7064,
"end": 8875
} | class ____(App[None]):
"""Testing app related to https://github.com/Textualize/textual/issues/3869"""
def __init__(self, auto_expand: bool, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.messages: list[tuple[str, str]] = []
self._auto_expand = auto_expand
def compose(self) -> ComposeResult:
"""Compose the child widgets."""
yield TreeWrapper(self._auto_expand)
def record(
self,
event: (
Tree.NodeExpanded[None]
| Tree.NodeCollapsed[None]
| Tree.NodeHighlighted[None]
),
) -> None:
self.messages.append(
(event.__class__.__name__, event.node.tree.id or "Unknown")
)
def on_tree_node_expanded(self, event: Tree.NodeExpanded[None]) -> None:
self.record(event)
def on_tree_node_collapsed(self, event: Tree.NodeCollapsed[None]) -> None:
self.record(event)
def on_tree_node_highlighted(self, event: Tree.NodeHighlighted[None]) -> None:
self.record(event)
async def test_expand_node_from_code() -> None:
"""Expanding a node from code should result in the appropriate message."""
async with TreeViaCodeApp(False).run_test() as pilot:
await pilot.click("#expander")
assert pilot.app.messages == [
("NodeHighlighted", "test-tree"),
("NodeExpanded", "test-tree"),
]
async def test_collapse_node_from_code() -> None:
"""Collapsing a node from code should result in the appropriate message."""
async with TreeViaCodeApp(True).run_test() as pilot:
await pilot.click("#collapser")
assert pilot.app.messages == [
("NodeHighlighted", "test-tree"),
("NodeCollapsed", "test-tree"),
]
| TreeViaCodeApp |
python | scipy__scipy | scipy/optimize/_differentiable_functions.py | {
"start": 1495,
"end": 3776
} | class ____:
"""
Wrapper class for hess calculation via finite differences
"""
def __init__(
self,
hess,
x0=None,
grad=None,
args=None,
finite_diff_options=None,
):
self.hess = hess
self.grad = grad
self.args = [] if args is None else args
self.finite_diff_options = finite_diff_options
# keep track of any finite difference function evaluations for grad
self.ngev = 0
self.nhev = 0
self.H = None
self._hess_func = None
if callable(hess):
self.H = hess(np.copy(x0), *args)
self.nhev += 1
if sps.issparse(self.H):
self._hess_func = "sparse_callable"
self.H = sps.csr_array(self.H)
elif isinstance(self.H, LinearOperator):
self._hess_func = "linearoperator_callable"
else:
# dense
self._hess_func = "dense_callable"
self.H = np.atleast_2d(np.asarray(self.H))
elif hess in FD_METHODS:
self._hess_func = "fd_hess"
def __call__(self, x, f0=None, **kwds):
match self._hess_func:
case "sparse_callable":
_h = self._sparse_callable
case "linearoperator_callable":
_h = self._linearoperator_callable
case "dense_callable":
_h = self._dense_callable
case "fd_hess":
_h = self._fd_hess
return _h(np.copy(x), f0=f0)
def _fd_hess(self, x, f0=None, **kwds):
self.H, dct = approx_derivative(
self.grad, x, f0=f0, **self.finite_diff_options
)
self.ngev += dct["nfev"]
return self.H
def _sparse_callable(self, x, **kwds):
self.nhev += 1
self.H = sps.csr_array(self.hess(x, *self.args))
return self.H
def _dense_callable(self, x, **kwds):
self.nhev += 1
self.H = np.atleast_2d(
np.asarray(self.hess(x, *self.args))
)
return self.H
def _linearoperator_callable(self, x, **kwds):
self.nhev += 1
self.H = self.hess(x, *self.args)
return self.H
| _ScalarHessWrapper |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/automations.py | {
"start": 6927,
"end": 7959
} | class ____(Trigger, abc.ABC):
"""
Base class for triggers that may filter by the labels of resources.
"""
type: str
match: ResourceSpecification = Field(
default_factory=lambda: ResourceSpecification.model_validate({}),
description="Labels for resources which this trigger will match.",
)
match_related: Union[ResourceSpecification, list[ResourceSpecification]] = Field(
default_factory=lambda: ResourceSpecification.model_validate({}),
description="Labels for related resources which this trigger will match.",
)
def covers_resources(
self, resource: Resource, related: Sequence[RelatedResource]
) -> bool:
if not self.match.includes([resource]):
return False
match_related = self.match_related
if not isinstance(match_related, list):
match_related = [match_related]
if not all(match.includes(related) for match in match_related):
return False
return True
| ResourceTrigger |
python | EpistasisLab__tpot | tpot/graphsklearn.py | {
"start": 9106,
"end": 16272
} | class ____(_BaseComposition):
def __init__(
self,
graph,
cross_val_predict_cv=0, #signature function(estimator, X, y=none)
method='auto',
memory=None,
use_label_encoder=False,
**kwargs,
):
super().__init__(**kwargs)
'''
An sklearn baseestimator that uses genetic programming to optimize a pipeline.
Parameters
----------
graph: networkx.DiGraph
A directed graph where the nodes are sklearn estimators and the edges are the inputs to those estimators.
cross_val_predict_cv: int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy used in inner classifiers or regressors
method: str, optional
The prediction method to use for the inner classifiers or regressors. If 'auto', it will try to use predict_proba, decision_function, or predict in that order.
memory: str or object with the joblib.Memory interface, optional
Used to cache the input and outputs of nodes to prevent refitting or computationally heavy transformations. By default, no caching is performed. If a string is given, it is the path to the caching directory.
use_label_encoder: bool, optional
If True, the label encoder is used to encode the labels to be 0 to N. If False, the label encoder is not used.
Mainly useful for classifiers (XGBoost) that require labels to be ints from 0 to N.
Can also be a sklearn.preprocessing.LabelEncoder object. If so, that label encoder is used.
'''
self.graph = graph
self.cross_val_predict_cv = cross_val_predict_cv
self.method = method
self.memory = memory
self.use_label_encoder = use_label_encoder
setup_ordered_successors(graph)
self.topo_sorted_nodes = list(nx.topological_sort(self.graph))
self.topo_sorted_nodes.reverse()
self.root = self.topo_sorted_nodes[-1]
if self.use_label_encoder:
if type(self.use_label_encoder) == LabelEncoder:
self.label_encoder = self.use_label_encoder
else:
self.label_encoder = LabelEncoder()
#TODO clean this up
try:
nx.find_cycle(self.G)
raise BaseException
except:
pass
def __str__(self):
if len(self.graph.edges) > 0:
return str(self.graph.edges)
else:
return str(self.graph.nodes)
def fit(self, X, y):
if self.use_label_encoder:
if type(self.use_label_encoder) == LabelEncoder:
y = self.label_encoder.transform(y)
else:
y = self.label_encoder.fit_transform(y)
fit_sklearn_digraph( graph=self.graph,
X=X,
y=y,
method=self.method,
cross_val_predict_cv = self.cross_val_predict_cv,
memory = self.memory,
topo_sort = self.topo_sorted_nodes,
)
return self
def plot(self, ):
plot(graph = self.graph)
def __sklearn_is_fitted__(self):
'''Indicate whether pipeline has been fit.'''
try:
# check if the last step of the pipeline is fitted
# we only check the last step since if the last step is fit, it
# means the previous steps should also be fit. This is faster than
# checking if every step of the pipeline is fit.
sklearn.utils.validation.check_is_fitted(self.graph.nodes[self.root]["instance"])
return True
except sklearn.exceptions.NotFittedError:
return False
@available_if(_estimator_has('predict'))
def predict(self, X, **predict_params):
this_X = get_inputs_to_node(self.graph,
X,
self.root,
method = self.method,
topo_sort = self.topo_sorted_nodes,
)
preds = self.graph.nodes[self.root]["instance"].predict(this_X, **predict_params)
if self.use_label_encoder:
preds = self.label_encoder.inverse_transform(preds)
return preds
@available_if(_estimator_has('predict_proba'))
def predict_proba(self, X, **predict_params):
this_X = get_inputs_to_node(self.graph,
X,
self.root,
method = self.method,
topo_sort = self.topo_sorted_nodes,
)
return self.graph.nodes[self.root]["instance"].predict_proba(this_X, **predict_params)
@available_if(_estimator_has('decision_function'))
def decision_function(self, X, **predict_params):
this_X = get_inputs_to_node(self.graph,
X,
self.root,
method = self.method,
topo_sort = self.topo_sorted_nodes,
)
return self.graph.nodes[self.root]["instance"].decision_function(this_X, **predict_params)
@available_if(_estimator_has('transform'))
def transform(self, X, **predict_params):
this_X = get_inputs_to_node(self.graph,
X,
self.root,
method = self.method,
topo_sort = self.topo_sorted_nodes,
)
return self.graph.nodes[self.root]["instance"].transform(this_X, **predict_params)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
if self.use_label_encoder:
return self.label_encoder.classes_
else:
return self.graph.nodes[self.root]["instance"].classes_
@property
def _estimator_type(self):
return self.graph.nodes[self.root]["instance"]._estimator_type
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
final_step = self.graph.nodes[self.root]["instance"]
try:
last_step_tags = final_step.__sklearn_tags__()
except:
last_step_tags = get_tags(final_step)
tags.estimator_type = last_step_tags.estimator_type
tags.target_tags.multi_output = last_step_tags.target_tags.multi_output
tags.classifier_tags = copy.deepcopy(last_step_tags.classifier_tags)
tags.regressor_tags = copy.deepcopy(last_step_tags.regressor_tags)
tags.transformer_tags = copy.deepcopy(last_step_tags.transformer_tags)
tags.input_tags.sparse = all(
self.graph.nodes[step]['instance'].__sklearn_tags__().input_tags.sparse
for step in self.topo_sorted_nodes
)
tags.input_tags.pairwise = last_step_tags.input_tags.pairwise
return tags | GraphPipeline |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/dashboard.py | {
"start": 879,
"end": 1029
} | class ____(BaseModel):
"""DAG Run Types for responses."""
backfill: int
scheduled: int
manual: int
asset_triggered: int
| DAGRunTypes |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_multiple_forward.py | {
"start": 1384,
"end": 2587
} | class ____(FSDPTest):
def _dist_train(self, wrap_fsdp):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp).to(device_type.type)
if wrap_fsdp:
model = FSDP(model, device_id=device_type.type)
else:
model = DistributedDataParallel(model, device_ids=[device_type.type])
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4).to(device_type.type)
in_data.requires_grad = True
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
return get_full_params(model)
return list(model.parameters())
@skip_if_lt_x_gpu(2)
def test_multi_forward(self):
# DDP
ddp_state = self._dist_train(wrap_fsdp=False)
# FSDP
fsdp_state = self._dist_train(wrap_fsdp=True)
self.assertEqual(ddp_state, fsdp_state)
devices = ("cpu", "hpu", "xpu")
instantiate_device_type_tests(
TestMultiForward, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
run_tests()
| TestMultiForward |
python | doocs__leetcode | solution/3300-3399/3314.Construct the Minimum Bitwise Array I/Solution.py | {
"start": 0,
"end": 369
} | class ____:
def minBitwiseArray(self, nums: List[int]) -> List[int]:
ans = []
for x in nums:
if x == 2:
ans.append(-1)
else:
for i in range(1, 32):
if x >> i & 1 ^ 1:
ans.append(x ^ 1 << (i - 1))
break
return ans
| Solution |
python | numpy__numpy | numpy/typing/tests/data/pass/ndarray_misc.py | {
"start": 365,
"end": 3205
} | class ____(npt.NDArray[np.intp]): ...
i4 = np.int32(1)
A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32)
B0 = np.empty((), dtype=np.int32).view(SubClass)
B1 = np.empty((1,), dtype=np.int32).view(SubClass)
B2 = np.empty((1, 1), dtype=np.int32).view(SubClass)
B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass)
C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32)
D = np.ones(3).view(SubClass)
ctypes_obj = A.ctypes
i4.all()
A.all()
A.all(axis=0)
A.all(keepdims=True)
A.all(out=B0)
i4.any()
A.any()
A.any(axis=0)
A.any(keepdims=True)
A.any(out=B0)
i4.argmax()
A.argmax()
A.argmax(axis=0)
A.argmax(out=B_int0)
i4.argmin()
A.argmin()
A.argmin(axis=0)
A.argmin(out=B_int0)
i4.argsort()
i4.argsort(stable=True)
A.argsort()
A.argsort(stable=True)
A.sort()
A.sort(stable=True)
i4.choose([()])
_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32)
C.choose(_choices)
C.choose(_choices, out=D)
i4.clip(1)
A.clip(1)
A.clip(None, 1)
A.clip(1, out=B2)
A.clip(None, 1, out=B2)
i4.compress([1])
A.compress([1])
A.compress([1], out=B1)
i4.conj()
A.conj()
B0.conj()
i4.conjugate()
A.conjugate()
B0.conjugate()
i4.cumprod()
A.cumprod()
A.cumprod(out=B1)
i4.cumsum()
A.cumsum()
A.cumsum(out=B1)
i4.max()
A.max()
A.max(axis=0)
A.max(keepdims=True)
A.max(out=B0)
i4.mean()
A.mean()
A.mean(axis=0)
A.mean(keepdims=True)
A.mean(out=B0)
i4.min()
A.min()
A.min(axis=0)
A.min(keepdims=True)
A.min(out=B0)
i4.prod()
A.prod()
A.prod(axis=0)
A.prod(keepdims=True)
A.prod(out=B0)
i4.round()
A.round()
A.round(out=B2)
i4.repeat(1)
A.repeat(1)
B0.repeat(1)
i4.std()
A.std()
A.std(axis=0)
A.std(keepdims=True, mean=0.)
A.std(out=B0.astype(np.float64))
i4.sum()
A.sum()
A.sum(axis=0)
A.sum(keepdims=True)
A.sum(out=B0)
i4.take(0)
A.take(0)
A.take([0])
A.take(0, out=B0)
A.take([0], out=B1)
i4.var()
A.var()
A.var(axis=0)
A.var(keepdims=True, mean=0.)
A.var(out=B0)
A.argpartition([0])
A.diagonal()
A.dot(1)
A.dot(1, out=B2)
A.nonzero()
C.searchsorted(1)
A.trace()
A.trace(out=B0)
void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0))
void.setfield(10, np.float64)
A.item(0)
C.item(0)
A.ravel()
C.ravel()
A.flatten()
C.flatten()
A.reshape(1)
C.reshape(3)
int(np.array(1.0, dtype=np.float64))
int(np.array("1", dtype=np.str_))
float(np.array(1.0, dtype=np.float64))
float(np.array("1", dtype=np.str_))
complex(np.array(1.0, dtype=np.float64))
operator.index(np.array(1, dtype=np.int64))
# this fails on numpy 2.2.1
# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44
A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)])
A_void["yop"] = A_float[:, 0]
A_void["yap"] = A_float[:, 1]
| IntSubClass |
python | run-llama__llama_index | llama-index-core/tests/tools/tool_spec/test_load_and_search.py | {
"start": 499,
"end": 9764
} | class ____(BaseModel):
query: str
def _foo(query: str) -> List[Document]:
return [Document(text=f"Test document with query: {query}")]
def test_load_and_search_tool_spec_init() -> None:
function_tool = FunctionTool.from_defaults(
fn=_foo,
name="test_loader",
description="Test loader function",
fn_schema=TestSchema,
)
metadata = ToolMetadata(
name="test_loader",
description="Test loader function",
fn_schema=TestSchema,
)
tool_spec = LoadAndSearchToolSpec(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
metadata=metadata,
)
assert tool_spec.metadata.name == "test_loader"
assert tool_spec.metadata.description == "Test loader function"
assert tool_spec.metadata.fn_schema == TestSchema
assert tool_spec.spec_functions == ["test_loader", "read_test_loader"]
tools = tool_spec.to_tool_list()
assert len(tools) == 2
assert tools[0].metadata.name == "test_loader"
assert "test_loader" in tools[0].metadata.description
assert tools[1].metadata.name == "read_test_loader"
assert "read" in tools[1].metadata.description
def test_load_and_search_tool_spec_from_defaults() -> None:
function_tool = FunctionTool.from_defaults(
fn=_foo,
name="test_loader",
description="Test loader function",
fn_schema=TestSchema,
)
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
)
assert tool_spec.metadata.name == "test_loader"
assert tool_spec.metadata.description == "Test loader function"
assert tool_spec.metadata.fn_schema == TestSchema
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
name="custom_name",
description="Custom description",
)
assert tool_spec.metadata.name == "custom_name"
assert tool_spec.metadata.description == "Custom description"
def test_load() -> None:
mock_function = MagicMock(return_value=MagicMock(raw_output="Test document"))
function_tool = FunctionTool.from_defaults(
fn=mock_function,
name="test_loader",
description="Test loader function",
)
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
)
result = tool_spec.load(query="input query")
assert "Content loaded!" in result
assert "read_test_loader" in result
assert tool_spec._index is not None
tool_spec._index = None
mock_function.return_value = MagicMock(raw_output=Document(text="Test document"))
result = tool_spec.load(query="input query")
assert "Content loaded!" in result
assert tool_spec._index is not None
tool_spec._index = None
mock_function.return_value = MagicMock(raw_output=["Doc1", "Doc2"])
result = tool_spec.load(query="input query")
assert "Content loaded!" in result
assert tool_spec._index is not None
tool_spec._index = None
mock_function.return_value = MagicMock(
raw_output=[Document(text="Doc1"), Document(text="Doc2")]
)
result = tool_spec.load(query="input query")
assert "Content loaded!" in result
assert tool_spec._index is not None
mock_index = MagicMock()
tool_spec._index = mock_index
mock_function.return_value = MagicMock(raw_output="New document")
result = tool_spec.load(query="input query")
assert "Content loaded!" in result
assert mock_index.insert.called
def test_read() -> None:
function_tool = FunctionTool.from_defaults(
fn=_foo,
name="test_loader",
description="Test loader function",
)
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
)
result = tool_spec.read(query="input query")
assert "Error" in result
assert "No content has been loaded" in result
assert "test_loader" in result
mock_query_engine = MagicMock()
mock_query_engine.query.return_value = "Query result"
mock_index = MagicMock()
mock_index.as_query_engine.return_value = mock_query_engine
tool_spec._index = mock_index
result = tool_spec.read(query="input query")
assert result == "Query result"
mock_query_engine.query.assert_called_once_with("input query")
@pytest.mark.parametrize(
("raw_output", "expected_doc_count"),
[
("Single string", 1),
(Document(text="Single document"), 1),
(123, 1),
],
)
def test_load_different_output_types(raw_output: Any, expected_doc_count: int) -> None:
mock_function = MagicMock(return_value=MagicMock(raw_output=raw_output))
function_tool = FunctionTool.from_defaults(
fn=mock_function,
name="test_loader",
description="Test loader function",
)
mock_index = MagicMock()
mock_index_cls = MagicMock(return_value=mock_index)
mock_index_cls.from_documents = MagicMock(return_value=mock_index)
tool_spec = LoadAndSearchToolSpec(
tool=function_tool,
index_cls=mock_index_cls,
index_kwargs={},
metadata=ToolMetadata(name="test_loader", description="Test loader"),
)
tool_spec.load(query="input query")
args, _ = mock_index_cls.from_documents.call_args
assert len(args[0]) == expected_doc_count
def test_load_edge_cases() -> None:
def custom_string_function(*args, **kwargs):
return "Single string"
function_tool = FunctionTool.from_defaults(
fn=custom_string_function,
name="test_loader",
description="Test loader function",
)
mock_index = MagicMock()
with patch(
"llama_index.core.indices.vector_store.VectorStoreIndex.from_documents",
return_value=mock_index,
) as mock_from_docs:
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
)
tool_spec.load(query="input query")
mock_from_docs.assert_called_once()
docs_arg = mock_from_docs.call_args[0][0]
assert len(docs_arg) == 1
assert isinstance(docs_arg[0], Document)
assert docs_arg[0].text == "Single string"
doc = Document(text="Single document")
def custom_doc_function(*args, **kwargs):
return doc
function_tool = FunctionTool.from_defaults(
fn=custom_doc_function,
name="test_loader",
description="Test loader function",
)
with patch(
"llama_index.core.indices.vector_store.VectorStoreIndex.from_documents",
return_value=mock_index,
) as mock_from_docs:
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
)
tool_spec.load(query="input query")
mock_from_docs.assert_called_once()
docs_arg = mock_from_docs.call_args[0][0]
assert len(docs_arg) == 1
assert docs_arg[0] == doc
with pytest.raises(ValueError, match="Tool name cannot be None"):
LoadAndSearchToolSpec(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
metadata=ToolMetadata(name=None, description="Test loader"),
)
def test_load_list_output_types() -> None:
def custom_function(*args, **kwargs):
return ["String 1", "String 2"]
function_tool = FunctionTool.from_defaults(
fn=custom_function,
name="test_loader",
description="Test loader function",
)
mock_index = MagicMock()
with patch(
"llama_index.core.indices.vector_store.VectorStoreIndex.from_documents",
return_value=mock_index,
) as mock_from_docs:
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
)
tool_spec.load(query="input query")
mock_from_docs.assert_called_once()
docs_arg = mock_from_docs.call_args[0][0]
assert len(docs_arg) == 2
assert isinstance(docs_arg[0], Document)
assert isinstance(docs_arg[1], Document)
assert docs_arg[0].text == "String 1"
assert docs_arg[1].text == "String 2"
def custom_doc_function(*args, **kwargs):
return [Document(text="Doc 1"), Document(text="Doc 2")]
function_tool = FunctionTool.from_defaults(
fn=custom_doc_function,
name="test_loader",
description="Test loader function",
)
with patch(
"llama_index.core.indices.vector_store.VectorStoreIndex.from_documents",
return_value=mock_index,
) as mock_from_docs:
tool_spec = LoadAndSearchToolSpec.from_defaults(
tool=function_tool,
index_cls=VectorStoreIndex,
index_kwargs={},
)
tool_spec.load(query="input query")
mock_from_docs.assert_called_once()
docs_arg = mock_from_docs.call_args[0][0]
assert len(docs_arg) == 2
assert isinstance(docs_arg[0], Document)
assert isinstance(docs_arg[1], Document)
assert docs_arg[0].text == "Doc 1"
assert docs_arg[1].text == "Doc 2"
| TestSchema |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py | {
"start": 30470,
"end": 39508
} | class ____(Mock):
def __instancecheck__(self, instance):
return True
@RunIf(skip_windows=True)
def test_connector_with_tpu_accelerator_instance(tpu_available, monkeypatch):
monkeypatch.setattr(torch, "device", DeviceMock())
accelerator = XLAAccelerator()
trainer = Trainer(accelerator=accelerator, devices=1)
assert trainer.accelerator is accelerator
assert isinstance(trainer.strategy, SingleDeviceXLAStrategy)
trainer = Trainer(accelerator=accelerator)
assert trainer.accelerator is accelerator
assert isinstance(trainer.strategy, XLAStrategy)
@pytest.mark.parametrize("is_interactive", [False, True])
@RunIf(min_python="3.9") # mocking issue
def test_connector_auto_selection(monkeypatch, is_interactive):
import lightning.fabric # avoid breakage with standalone package
def _mock_interactive():
monkeypatch.setattr(
lightning.pytorch.trainer.connectors.accelerator_connector, "_IS_INTERACTIVE", is_interactive
)
if _IS_WINDOWS:
# simulate fork support on windows
monkeypatch.setattr(torch.multiprocessing, "get_all_start_methods", lambda: ["fork", "spawn"])
_mock_interactive()
def _mock_tpu_available(value):
mock_tpu_available(monkeypatch, value)
monkeypatch.setattr(lightning.fabric.plugins.environments.XLAEnvironment, "node_rank", lambda *_: 0)
# CPU
with monkeypatch.context():
mock_cuda_count(monkeypatch, 0)
mock_mps_count(monkeypatch, 0)
mock_tpu_available(monkeypatch, False)
trainer = Trainer()
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, SingleDeviceStrategy)
assert trainer._accelerator_connector._devices_flag == 1
assert trainer.num_devices == 1
# single CUDA
with monkeypatch.context():
mock_cuda_count(monkeypatch, 1)
mock_mps_count(monkeypatch, 0)
mock_tpu_available(monkeypatch, False)
trainer = Trainer()
assert isinstance(trainer.accelerator, CUDAAccelerator)
assert isinstance(trainer.strategy, SingleDeviceStrategy)
assert trainer._accelerator_connector._devices_flag == [0]
assert trainer.num_devices == 1
# multi CUDA
with monkeypatch.context():
mock_cuda_count(monkeypatch, 4)
mock_mps_count(monkeypatch, 0)
mock_tpu_available(monkeypatch, False)
trainer = Trainer()
assert isinstance(trainer.accelerator, CUDAAccelerator)
assert isinstance(trainer.strategy, (SingleDeviceStrategy if is_interactive else DDPStrategy))
assert trainer._accelerator_connector._devices_flag == [0] if is_interactive else list(range(4))
assert trainer.num_devices == 1 if is_interactive else 4
if not is_interactive:
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
assert trainer.strategy._start_method == ("fork" if is_interactive else "popen")
assert trainer.strategy.launcher.is_interactive_compatible == is_interactive
# MPS (there's no distributed)
with monkeypatch.context():
mock_cuda_count(monkeypatch, 0)
mock_mps_count(monkeypatch, 1)
mock_tpu_available(monkeypatch, False)
connector = _AcceleratorConnector()
assert isinstance(connector.accelerator, MPSAccelerator)
assert isinstance(connector.strategy, SingleDeviceStrategy)
assert connector._devices_flag == [0]
# single TPU
with monkeypatch.context():
mock_cuda_count(monkeypatch, 0)
mock_mps_count(monkeypatch, 0)
_mock_tpu_available(True)
monkeypatch.setattr(lightning.pytorch.accelerators.XLAAccelerator, "auto_device_count", lambda *_: 1)
monkeypatch.setattr(torch, "device", DeviceMock())
connector = _AcceleratorConnector()
assert isinstance(connector.accelerator, XLAAccelerator)
assert isinstance(connector.strategy, SingleDeviceXLAStrategy)
assert connector._devices_flag == 1
monkeypatch.undo() # for some reason `.context()` is not working properly
_mock_interactive()
# Multi TPU
with monkeypatch.context():
mock_cuda_count(monkeypatch, 0)
mock_mps_count(monkeypatch, 0)
_mock_tpu_available(True)
connector = _AcceleratorConnector()
assert isinstance(connector.accelerator, XLAAccelerator)
assert isinstance(connector.strategy, XLAStrategy)
assert connector._devices_flag == 8
assert isinstance(connector.strategy.cluster_environment, XLAEnvironment)
assert connector.strategy._start_method == "fork"
assert connector.strategy.launcher.is_interactive_compatible
monkeypatch.undo() # for some reason `.context()` is not working properly
_mock_interactive()
# TPU and CUDA: prefers TPU
with monkeypatch.context():
mock_cuda_count(monkeypatch, 2)
mock_mps_count(monkeypatch, 0)
_mock_tpu_available(True)
connector = _AcceleratorConnector()
assert isinstance(connector.accelerator, XLAAccelerator)
assert isinstance(connector.strategy, XLAStrategy)
assert connector._devices_flag == 8
assert isinstance(connector.strategy.cluster_environment, XLAEnvironment)
assert connector.strategy._start_method == "fork"
assert connector.strategy.launcher.is_interactive_compatible
@pytest.mark.parametrize(
"strategy",
[
"ddp",
"ddp_spawn",
pytest.param("deepspeed", marks=RunIf(deepspeed=True)),
"fsdp",
],
)
def test_connector_sets_num_nodes(strategy, cuda_count_2):
trainer = Trainer(accelerator="cuda", strategy=strategy, devices=2, num_nodes=2)
assert trainer.strategy.num_nodes == 2
def test_connector_num_nodes_input_validation():
with pytest.raises(ValueError, match="`num_nodes` must be a positive integer"):
_AcceleratorConnector(num_nodes=0)
with pytest.raises(ValueError, match="`num_nodes` must be a positive integer"):
_AcceleratorConnector(num_nodes=-1)
@pytest.mark.parametrize(
("precision_str", "strategy_str", "expected_precision_cls"),
[
("64-true", "auto", DoublePrecision),
("32-true", "auto", Precision),
("16-true", "auto", HalfPrecision),
("bf16-true", "auto", HalfPrecision),
("16-mixed", "auto", MixedPrecision),
("bf16-mixed", "auto", MixedPrecision),
pytest.param("32-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)),
pytest.param("16-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)),
pytest.param("bf16-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)),
pytest.param("16-mixed", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)),
pytest.param("bf16-mixed", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)),
pytest.param("32-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)),
pytest.param("16-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)),
pytest.param("bf16-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)),
pytest.param("16-mixed", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)),
pytest.param("bf16-mixed", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)),
],
)
def test_precision_selection(precision_str, strategy_str, expected_precision_cls):
connector = _AcceleratorConnector(precision=precision_str, strategy=strategy_str)
assert isinstance(connector.precision_plugin, expected_precision_cls)
def test_bitsandbytes_precision_cuda_required(monkeypatch):
monkeypatch.setattr(lightning.fabric.plugins.precision.bitsandbytes, "_BITSANDBYTES_AVAILABLE", True)
monkeypatch.setitem(sys.modules, "bitsandbytes", Mock())
with pytest.raises(RuntimeError, match="Bitsandbytes is only supported on CUDA GPUs"):
_AcceleratorConnector(accelerator="cpu", plugins=BitsandbytesPrecision(mode="int8"))
@RunIf(min_torch="2.4")
@pytest.mark.parametrize(
("precision", "raises"),
[("32-true", False), ("16-true", False), ("bf16-true", False), ("16-mixed", True), ("bf16-mixed", False)],
)
def test_precision_selection_model_parallel(precision, raises, mps_count_0):
error_context = pytest.raises(ValueError, match=f"does not support .*{precision}") if raises else nullcontext()
with error_context:
_AcceleratorConnector(precision=precision, strategy=ModelParallelStrategy())
@RunIf(mps=True)
@pytest.mark.parametrize("accelerator", ["mps", "cpu"])
@pytest.mark.parametrize("precision", ["16-mixed", "bf16-mixed"])
def test_mps_amp_device_selection(accelerator, precision):
"""Test that MPS accelerator with mixed precision correctly sets device to 'mps' instead of 'cuda'."""
connector = _AcceleratorConnector(accelerator=accelerator, precision=precision)
assert isinstance(connector.precision_plugin, MixedPrecision)
assert connector.precision_plugin.device == accelerator
| DeviceMock |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/key_binding/bindings/vi.py | {
"start": 1784,
"end": 75602
} | class ____:
"""
Return struct for functions wrapped in ``text_object``.
Both `start` and `end` are relative to the current cursor position.
"""
def __init__(
self, start: int, end: int = 0, type: TextObjectType = TextObjectType.EXCLUSIVE
):
self.start = start
self.end = end
self.type = type
@property
def selection_type(self) -> SelectionType:
if self.type == TextObjectType.LINEWISE:
return SelectionType.LINES
if self.type == TextObjectType.BLOCK:
return SelectionType.BLOCK
else:
return SelectionType.CHARACTERS
def sorted(self) -> tuple[int, int]:
"""
Return a (start, end) tuple where start <= end.
"""
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start
def operator_range(self, document: Document) -> tuple[int, int]:
"""
Return a (start, end) tuple with start <= end that indicates the range
operators should operate on.
`buffer` is used to get start and end of line positions.
This should return something that can be used in a slice, so the `end`
position is *not* included.
"""
start, end = self.sorted()
doc = document
if (
self.type == TextObjectType.EXCLUSIVE
and doc.translate_index_to_position(end + doc.cursor_position)[1] == 0
):
# If the motion is exclusive and the end of motion is on the first
# column, the end position becomes end of previous line.
end -= 1
if self.type == TextObjectType.INCLUSIVE:
end += 1
if self.type == TextObjectType.LINEWISE:
# Select whole lines
row, col = doc.translate_index_to_position(start + doc.cursor_position)
start = doc.translate_row_col_to_index(row, 0) - doc.cursor_position
row, col = doc.translate_index_to_position(end + doc.cursor_position)
end = (
doc.translate_row_col_to_index(row, len(doc.lines[row]))
- doc.cursor_position
)
return start, end
def get_line_numbers(self, buffer: Buffer) -> tuple[int, int]:
"""
Return a (start_line, end_line) pair.
"""
# Get absolute cursor positions from the text object.
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
# Take the start of the lines.
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
return from_, to
def cut(self, buffer: Buffer) -> tuple[Document, ClipboardData]:
"""
Turn text object into `ClipboardData` instance.
"""
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
# For Vi mode, the SelectionState does include the upper position,
# while `self.operator_range` does not. So, go one to the left, unless
# we're in the line mode, then we don't want to risk going to the
# previous line, and missing one line in the selection.
if self.type != TextObjectType.LINEWISE:
to -= 1
document = Document(
buffer.text,
to,
SelectionState(original_cursor_position=from_, type=self.selection_type),
)
new_document, clipboard_data = document.cut_selection()
return new_document, clipboard_data
# Typevar for any text object function:
TextObjectFunction = Callable[[E], TextObject]
_TOF = TypeVar("_TOF", bound=TextObjectFunction)
def create_text_object_decorator(
key_bindings: KeyBindings,
) -> Callable[..., Callable[[_TOF], _TOF]]:
"""
Create a decorator that can be used to register Vi text object implementations.
"""
def text_object_decorator(
*keys: Keys | str,
filter: Filter = Always(),
no_move_handler: bool = False,
no_selection_handler: bool = False,
eager: bool = False,
) -> Callable[[_TOF], _TOF]:
"""
Register a text object function.
Usage::
@text_object('w', filter=..., no_move_handler=False)
def handler(event):
# Return a text object for this key.
return TextObject(...)
:param no_move_handler: Disable the move handler in navigation mode.
(It's still active in selection mode.)
"""
def decorator(text_object_func: _TOF) -> _TOF:
@key_bindings.add(
*keys, filter=vi_waiting_for_text_object_mode & filter, eager=eager
)
def _apply_operator_to_text_object(event: E) -> None:
# Arguments are multiplied.
vi_state = event.app.vi_state
event._arg = str((vi_state.operator_arg or 1) * (event.arg or 1))
# Call the text object handler.
text_obj = text_object_func(event)
# Get the operator function.
# (Should never be None here, given the
# `vi_waiting_for_text_object_mode` filter state.)
operator_func = vi_state.operator_func
if text_obj is not None and operator_func is not None:
# Call the operator function with the text object.
operator_func(event, text_obj)
# Clear operator.
event.app.vi_state.operator_func = None
event.app.vi_state.operator_arg = None
# Register a move operation. (Doesn't need an operator.)
if not no_move_handler:
@key_bindings.add(
*keys,
filter=~vi_waiting_for_text_object_mode
& filter
& vi_navigation_mode,
eager=eager,
)
def _move_in_navigation_mode(event: E) -> None:
"""
Move handler for navigation mode.
"""
text_object = text_object_func(event)
event.current_buffer.cursor_position += text_object.start
# Register a move selection operation.
if not no_selection_handler:
@key_bindings.add(
*keys,
filter=~vi_waiting_for_text_object_mode
& filter
& vi_selection_mode,
eager=eager,
)
def _move_in_selection_mode(event: E) -> None:
"""
Move handler for selection mode.
"""
text_object = text_object_func(event)
buff = event.current_buffer
selection_state = buff.selection_state
if selection_state is None:
return # Should not happen, because of the `vi_selection_mode` filter.
# When the text object has both a start and end position, like 'i(' or 'iw',
# Turn this into a selection, otherwise the cursor.
if text_object.end:
# Take selection positions from text object.
start, end = text_object.operator_range(buff.document)
start += buff.cursor_position
end += buff.cursor_position
selection_state.original_cursor_position = start
buff.cursor_position = end
# Take selection type from text object.
if text_object.type == TextObjectType.LINEWISE:
selection_state.type = SelectionType.LINES
else:
selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.cursor_position += text_object.start
# Make it possible to chain @text_object decorators.
return text_object_func
return decorator
return text_object_decorator
# Typevar for any operator function:
OperatorFunction = Callable[[E, TextObject], None]
_OF = TypeVar("_OF", bound=OperatorFunction)
def create_operator_decorator(
key_bindings: KeyBindings,
) -> Callable[..., Callable[[_OF], _OF]]:
"""
Create a decorator that can be used for registering Vi operators.
"""
def operator_decorator(
*keys: Keys | str, filter: Filter = Always(), eager: bool = False
) -> Callable[[_OF], _OF]:
"""
Register a Vi operator.
Usage::
@operator('d', filter=...)
def handler(event, text_object):
# Do something with the text object here.
"""
def decorator(operator_func: _OF) -> _OF:
@key_bindings.add(
*keys,
filter=~vi_waiting_for_text_object_mode & filter & vi_navigation_mode,
eager=eager,
)
def _operator_in_navigation(event: E) -> None:
"""
Handle operator in navigation mode.
"""
# When this key binding is matched, only set the operator
# function in the ViState. We should execute it after a text
# object has been received.
event.app.vi_state.operator_func = operator_func
event.app.vi_state.operator_arg = event.arg
@key_bindings.add(
*keys,
filter=~vi_waiting_for_text_object_mode & filter & vi_selection_mode,
eager=eager,
)
def _operator_in_selection(event: E) -> None:
"""
Handle operator in selection mode.
"""
buff = event.current_buffer
selection_state = buff.selection_state
if selection_state is not None:
# Create text object from selection.
if selection_state.type == SelectionType.LINES:
text_obj_type = TextObjectType.LINEWISE
elif selection_state.type == SelectionType.BLOCK:
text_obj_type = TextObjectType.BLOCK
else:
text_obj_type = TextObjectType.INCLUSIVE
text_object = TextObject(
selection_state.original_cursor_position - buff.cursor_position,
type=text_obj_type,
)
# Execute operator.
operator_func(event, text_object)
# Quit selection mode.
buff.selection_state = None
return operator_func
return decorator
return operator_decorator
@Condition
def is_returnable() -> bool:
return get_app().current_buffer.is_returnable
@Condition
def in_block_selection() -> bool:
buff = get_app().current_buffer
return bool(
buff.selection_state and buff.selection_state.type == SelectionType.BLOCK
)
@Condition
def digraph_symbol_1_given() -> bool:
return get_app().vi_state.digraph_symbol1 is not None
@Condition
def search_buffer_is_empty() -> bool:
"Returns True when the search buffer is empty."
return get_app().current_buffer.text == ""
@Condition
def tilde_operator() -> bool:
return get_app().vi_state.tilde_operator
def load_vi_bindings() -> KeyBindingsBase:
"""
Vi extensions.
# Overview of Readline Vi commands:
# http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf
"""
# Note: Some key bindings have the "~IsReadOnly()" filter added. This
# prevents the handler to be executed when the focus is on a
# read-only buffer.
# This is however only required for those that change the ViState to
# INSERT mode. The `Buffer` class itself throws the
# `EditReadOnlyBuffer` exception for any text operations which is
# handled correctly. There is no need to add "~IsReadOnly" to all key
# bindings that do text manipulation.
key_bindings = KeyBindings()
handle = key_bindings.add
# (Note: Always take the navigation bindings in read-only mode, even when
# ViState says different.)
TransformFunction = Tuple[Tuple[str, ...], Filter, Callable[[str], str]]
vi_transform_functions: list[TransformFunction] = [
# Rot 13 transformation
(
("g", "?"),
Always(),
lambda string: codecs.encode(string, "rot_13"),
),
# To lowercase
(("g", "u"), Always(), lambda string: string.lower()),
# To uppercase.
(("g", "U"), Always(), lambda string: string.upper()),
# Swap case.
(("g", "~"), Always(), lambda string: string.swapcase()),
(
("~",),
tilde_operator,
lambda string: string.swapcase(),
),
]
# Insert a character literally (quoted insert).
handle("c-v", filter=vi_insert_mode)(get_by_name("quoted-insert"))
@handle("escape")
def _back_to_navigation(event: E) -> None:
"""
Escape goes to vi navigation mode.
"""
buffer = event.current_buffer
vi_state = event.app.vi_state
if vi_state.input_mode in (InputMode.INSERT, InputMode.REPLACE):
buffer.cursor_position += buffer.document.get_cursor_left_position()
vi_state.input_mode = InputMode.NAVIGATION
if bool(buffer.selection_state):
buffer.exit_selection()
@handle("k", filter=vi_selection_mode)
def _up_in_selection(event: E) -> None:
"""
Arrow up in selection mode.
"""
event.current_buffer.cursor_up(count=event.arg)
@handle("j", filter=vi_selection_mode)
def _down_in_selection(event: E) -> None:
"""
Arrow down in selection mode.
"""
event.current_buffer.cursor_down(count=event.arg)
@handle("up", filter=vi_navigation_mode)
@handle("c-p", filter=vi_navigation_mode)
def _up_in_navigation(event: E) -> None:
"""
Arrow up and ControlP in navigation mode go up.
"""
event.current_buffer.auto_up(count=event.arg)
@handle("k", filter=vi_navigation_mode)
def _go_up(event: E) -> None:
"""
Go up, but if we enter a new history entry, move to the start of the
line.
"""
event.current_buffer.auto_up(
count=event.arg, go_to_start_of_line_if_history_changes=True
)
@handle("down", filter=vi_navigation_mode)
@handle("c-n", filter=vi_navigation_mode)
def _go_down(event: E) -> None:
"""
Arrow down and Control-N in navigation mode.
"""
event.current_buffer.auto_down(count=event.arg)
@handle("j", filter=vi_navigation_mode)
def _go_down2(event: E) -> None:
"""
Go down, but if we enter a new history entry, go to the start of the line.
"""
event.current_buffer.auto_down(
count=event.arg, go_to_start_of_line_if_history_changes=True
)
@handle("backspace", filter=vi_navigation_mode)
def _go_left(event: E) -> None:
"""
In navigation-mode, move cursor.
"""
event.current_buffer.cursor_position += (
event.current_buffer.document.get_cursor_left_position(count=event.arg)
)
@handle("c-n", filter=vi_insert_mode)
def _complete_next(event: E) -> None:
b = event.current_buffer
if b.complete_state:
b.complete_next()
else:
b.start_completion(select_first=True)
@handle("c-p", filter=vi_insert_mode)
def _complete_prev(event: E) -> None:
"""
Control-P: To previous completion.
"""
b = event.current_buffer
if b.complete_state:
b.complete_previous()
else:
b.start_completion(select_last=True)
@handle("c-g", filter=vi_insert_mode)
@handle("c-y", filter=vi_insert_mode)
def _accept_completion(event: E) -> None:
"""
Accept current completion.
"""
event.current_buffer.complete_state = None
@handle("c-e", filter=vi_insert_mode)
def _cancel_completion(event: E) -> None:
"""
Cancel completion. Go back to originally typed text.
"""
event.current_buffer.cancel_completion()
# In navigation mode, pressing enter will always return the input.
handle("enter", filter=vi_navigation_mode & is_returnable)(
get_by_name("accept-line")
)
# In insert mode, also accept input when enter is pressed, and the buffer
# has been marked as single line.
handle("enter", filter=is_returnable & ~is_multiline)(get_by_name("accept-line"))
@handle("enter", filter=~is_returnable & vi_navigation_mode)
def _start_of_next_line(event: E) -> None:
"""
Go to the beginning of next line.
"""
b = event.current_buffer
b.cursor_down(count=event.arg)
b.cursor_position += b.document.get_start_of_line_position(
after_whitespace=True
)
# ** In navigation mode **
# List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html
@handle("insert", filter=vi_navigation_mode)
def _insert_mode(event: E) -> None:
"""
Pressing the Insert key.
"""
event.app.vi_state.input_mode = InputMode.INSERT
@handle("insert", filter=vi_insert_mode)
def _navigation_mode(event: E) -> None:
"""
Pressing the Insert key.
"""
event.app.vi_state.input_mode = InputMode.NAVIGATION
@handle("a", filter=vi_navigation_mode & ~is_read_only)
# ~IsReadOnly, because we want to stay in navigation mode for
# read-only buffers.
def _a(event: E) -> None:
event.current_buffer.cursor_position += (
event.current_buffer.document.get_cursor_right_position()
)
event.app.vi_state.input_mode = InputMode.INSERT
@handle("A", filter=vi_navigation_mode & ~is_read_only)
def _A(event: E) -> None:
event.current_buffer.cursor_position += (
event.current_buffer.document.get_end_of_line_position()
)
event.app.vi_state.input_mode = InputMode.INSERT
@handle("C", filter=vi_navigation_mode & ~is_read_only)
def _change_until_end_of_line(event: E) -> None:
"""
Change to end of line.
Same as 'c$' (which is implemented elsewhere.)
"""
buffer = event.current_buffer
deleted = buffer.delete(count=buffer.document.get_end_of_line_position())
event.app.clipboard.set_text(deleted)
event.app.vi_state.input_mode = InputMode.INSERT
@handle("c", "c", filter=vi_navigation_mode & ~is_read_only)
@handle("S", filter=vi_navigation_mode & ~is_read_only)
def _change_current_line(event: E) -> None: # TODO: implement 'arg'
"""
Change current line
"""
buffer = event.current_buffer
# We copy the whole line.
data = ClipboardData(buffer.document.current_line, SelectionType.LINES)
event.app.clipboard.set_data(data)
# But we delete after the whitespace
buffer.cursor_position += buffer.document.get_start_of_line_position(
after_whitespace=True
)
buffer.delete(count=buffer.document.get_end_of_line_position())
event.app.vi_state.input_mode = InputMode.INSERT
@handle("D", filter=vi_navigation_mode)
def _delete_until_end_of_line(event: E) -> None:
"""
Delete from cursor position until the end of the line.
"""
buffer = event.current_buffer
deleted = buffer.delete(count=buffer.document.get_end_of_line_position())
event.app.clipboard.set_text(deleted)
@handle("d", "d", filter=vi_navigation_mode)
def _delete_line(event: E) -> None:
"""
Delete line. (Or the following 'n' lines.)
"""
buffer = event.current_buffer
# Split string in before/deleted/after text.
lines = buffer.document.lines
before = "\n".join(lines[: buffer.document.cursor_position_row])
deleted = "\n".join(
lines[
buffer.document.cursor_position_row : buffer.document.cursor_position_row
+ event.arg
]
)
after = "\n".join(lines[buffer.document.cursor_position_row + event.arg :])
# Set new text.
if before and after:
before = before + "\n"
# Set text and cursor position.
buffer.document = Document(
text=before + after,
# Cursor At the start of the first 'after' line, after the leading whitespace.
cursor_position=len(before) + len(after) - len(after.lstrip(" ")),
)
# Set clipboard data
event.app.clipboard.set_data(ClipboardData(deleted, SelectionType.LINES))
@handle("x", filter=vi_selection_mode)
def _cut(event: E) -> None:
"""
Cut selection.
('x' is not an operator.)
"""
clipboard_data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(clipboard_data)
@handle("i", filter=vi_navigation_mode & ~is_read_only)
def _i(event: E) -> None:
event.app.vi_state.input_mode = InputMode.INSERT
@handle("I", filter=vi_navigation_mode & ~is_read_only)
def _I(event: E) -> None:
event.app.vi_state.input_mode = InputMode.INSERT
event.current_buffer.cursor_position += (
event.current_buffer.document.get_start_of_line_position(
after_whitespace=True
)
)
@handle("I", filter=in_block_selection & ~is_read_only)
def insert_in_block_selection(event: E, after: bool = False) -> None:
"""
Insert in block selection mode.
"""
buff = event.current_buffer
# Store all cursor positions.
positions = []
if after:
def get_pos(from_to: tuple[int, int]) -> int:
return from_to[1]
else:
def get_pos(from_to: tuple[int, int]) -> int:
return from_to[0]
for i, from_to in enumerate(buff.document.selection_ranges()):
positions.append(get_pos(from_to))
if i == 0:
buff.cursor_position = get_pos(from_to)
buff.multiple_cursor_positions = positions
# Go to 'INSERT_MULTIPLE' mode.
event.app.vi_state.input_mode = InputMode.INSERT_MULTIPLE
buff.exit_selection()
@handle("A", filter=in_block_selection & ~is_read_only)
def _append_after_block(event: E) -> None:
insert_in_block_selection(event, after=True)
@handle("J", filter=vi_navigation_mode & ~is_read_only)
def _join(event: E) -> None:
"""
Join lines.
"""
for i in range(event.arg):
event.current_buffer.join_next_line()
@handle("g", "J", filter=vi_navigation_mode & ~is_read_only)
def _join_nospace(event: E) -> None:
"""
Join lines without space.
"""
for i in range(event.arg):
event.current_buffer.join_next_line(separator="")
@handle("J", filter=vi_selection_mode & ~is_read_only)
def _join_selection(event: E) -> None:
"""
Join selected lines.
"""
event.current_buffer.join_selected_lines()
@handle("g", "J", filter=vi_selection_mode & ~is_read_only)
def _join_selection_nospace(event: E) -> None:
"""
Join selected lines without space.
"""
event.current_buffer.join_selected_lines(separator="")
@handle("p", filter=vi_navigation_mode)
def _paste(event: E) -> None:
"""
Paste after
"""
event.current_buffer.paste_clipboard_data(
event.app.clipboard.get_data(),
count=event.arg,
paste_mode=PasteMode.VI_AFTER,
)
@handle("P", filter=vi_navigation_mode)
def _paste_before(event: E) -> None:
"""
Paste before
"""
event.current_buffer.paste_clipboard_data(
event.app.clipboard.get_data(),
count=event.arg,
paste_mode=PasteMode.VI_BEFORE,
)
@handle('"', Keys.Any, "p", filter=vi_navigation_mode)
def _paste_register(event: E) -> None:
"""
Paste from named register.
"""
c = event.key_sequence[1].data
if c in vi_register_names:
data = event.app.vi_state.named_registers.get(c)
if data:
event.current_buffer.paste_clipboard_data(
data, count=event.arg, paste_mode=PasteMode.VI_AFTER
)
@handle('"', Keys.Any, "P", filter=vi_navigation_mode)
def _paste_register_before(event: E) -> None:
"""
Paste (before) from named register.
"""
c = event.key_sequence[1].data
if c in vi_register_names:
data = event.app.vi_state.named_registers.get(c)
if data:
event.current_buffer.paste_clipboard_data(
data, count=event.arg, paste_mode=PasteMode.VI_BEFORE
)
@handle("r", filter=vi_navigation_mode)
def _replace(event: E) -> None:
"""
Go to 'replace-single'-mode.
"""
event.app.vi_state.input_mode = InputMode.REPLACE_SINGLE
@handle("R", filter=vi_navigation_mode)
def _replace_mode(event: E) -> None:
"""
Go to 'replace'-mode.
"""
event.app.vi_state.input_mode = InputMode.REPLACE
@handle("s", filter=vi_navigation_mode & ~is_read_only)
def _substitute(event: E) -> None:
"""
Substitute with new text
(Delete character(s) and go to insert mode.)
"""
text = event.current_buffer.delete(count=event.arg)
event.app.clipboard.set_text(text)
event.app.vi_state.input_mode = InputMode.INSERT
@handle("u", filter=vi_navigation_mode, save_before=(lambda e: False))
def _undo(event: E) -> None:
for i in range(event.arg):
event.current_buffer.undo()
@handle("V", filter=vi_navigation_mode)
def _visual_line(event: E) -> None:
"""
Start lines selection.
"""
event.current_buffer.start_selection(selection_type=SelectionType.LINES)
@handle("c-v", filter=vi_navigation_mode)
def _visual_block(event: E) -> None:
"""
Enter block selection mode.
"""
event.current_buffer.start_selection(selection_type=SelectionType.BLOCK)
@handle("V", filter=vi_selection_mode)
def _visual_line2(event: E) -> None:
"""
Exit line selection mode, or go from non line selection mode to line
selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state is not None:
if selection_state.type != SelectionType.LINES:
selection_state.type = SelectionType.LINES
else:
event.current_buffer.exit_selection()
@handle("v", filter=vi_navigation_mode)
def _visual(event: E) -> None:
"""
Enter character selection mode.
"""
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@handle("v", filter=vi_selection_mode)
def _visual2(event: E) -> None:
"""
Exit character selection mode, or go from non-character-selection mode
to character selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state is not None:
if selection_state.type != SelectionType.CHARACTERS:
selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.exit_selection()
@handle("c-v", filter=vi_selection_mode)
def _visual_block2(event: E) -> None:
"""
Exit block selection mode, or go from non block selection mode to block
selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state is not None:
if selection_state.type != SelectionType.BLOCK:
selection_state.type = SelectionType.BLOCK
else:
event.current_buffer.exit_selection()
@handle("a", "w", filter=vi_selection_mode)
@handle("a", "W", filter=vi_selection_mode)
def _visual_auto_word(event: E) -> None:
"""
Switch from visual linewise mode to visual characterwise mode.
"""
buffer = event.current_buffer
if (
buffer.selection_state
and buffer.selection_state.type == SelectionType.LINES
):
buffer.selection_state.type = SelectionType.CHARACTERS
@handle("x", filter=vi_navigation_mode)
def _delete(event: E) -> None:
"""
Delete character.
"""
buff = event.current_buffer
count = min(event.arg, len(buff.document.current_line_after_cursor))
if count:
text = event.current_buffer.delete(count=count)
event.app.clipboard.set_text(text)
@handle("X", filter=vi_navigation_mode)
def _delete_before_cursor(event: E) -> None:
buff = event.current_buffer
count = min(event.arg, len(buff.document.current_line_before_cursor))
if count:
text = event.current_buffer.delete_before_cursor(count=count)
event.app.clipboard.set_text(text)
@handle("y", "y", filter=vi_navigation_mode)
@handle("Y", filter=vi_navigation_mode)
def _yank_line(event: E) -> None:
"""
Yank the whole line.
"""
text = "\n".join(event.current_buffer.document.lines_from_current[: event.arg])
event.app.clipboard.set_data(ClipboardData(text, SelectionType.LINES))
@handle("+", filter=vi_navigation_mode)
def _next_line(event: E) -> None:
"""
Move to first non whitespace of next line
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_down_position(
count=event.arg
)
buffer.cursor_position += buffer.document.get_start_of_line_position(
after_whitespace=True
)
@handle("-", filter=vi_navigation_mode)
def _prev_line(event: E) -> None:
"""
Move to first non whitespace of previous line
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_up_position(
count=event.arg
)
buffer.cursor_position += buffer.document.get_start_of_line_position(
after_whitespace=True
)
@handle(">", ">", filter=vi_navigation_mode)
@handle("c-t", filter=vi_insert_mode)
def _indent(event: E) -> None:
"""
Indent lines.
"""
buffer = event.current_buffer
current_row = buffer.document.cursor_position_row
indent(buffer, current_row, current_row + event.arg)
@handle("<", "<", filter=vi_navigation_mode)
@handle("c-d", filter=vi_insert_mode)
def _unindent(event: E) -> None:
"""
Unindent lines.
"""
current_row = event.current_buffer.document.cursor_position_row
unindent(event.current_buffer, current_row, current_row + event.arg)
@handle("O", filter=vi_navigation_mode & ~is_read_only)
def _open_above(event: E) -> None:
"""
Open line above and enter insertion mode
"""
event.current_buffer.insert_line_above(copy_margin=not in_paste_mode())
event.app.vi_state.input_mode = InputMode.INSERT
@handle("o", filter=vi_navigation_mode & ~is_read_only)
def _open_below(event: E) -> None:
"""
Open line below and enter insertion mode
"""
event.current_buffer.insert_line_below(copy_margin=not in_paste_mode())
event.app.vi_state.input_mode = InputMode.INSERT
@handle("~", filter=vi_navigation_mode)
def _reverse_case(event: E) -> None:
"""
Reverse case of current character and move cursor forward.
"""
buffer = event.current_buffer
c = buffer.document.current_char
if c is not None and c != "\n":
buffer.insert_text(c.swapcase(), overwrite=True)
@handle("g", "u", "u", filter=vi_navigation_mode & ~is_read_only)
def _lowercase_line(event: E) -> None:
"""
Lowercase current line.
"""
buff = event.current_buffer
buff.transform_current_line(lambda s: s.lower())
@handle("g", "U", "U", filter=vi_navigation_mode & ~is_read_only)
def _uppercase_line(event: E) -> None:
"""
Uppercase current line.
"""
buff = event.current_buffer
buff.transform_current_line(lambda s: s.upper())
@handle("g", "~", "~", filter=vi_navigation_mode & ~is_read_only)
def _swapcase_line(event: E) -> None:
"""
Swap case of the current line.
"""
buff = event.current_buffer
buff.transform_current_line(lambda s: s.swapcase())
@handle("#", filter=vi_navigation_mode)
def _prev_occurrence(event: E) -> None:
"""
Go to previous occurrence of this word.
"""
b = event.current_buffer
search_state = event.app.current_search_state
search_state.text = b.document.get_word_under_cursor()
search_state.direction = SearchDirection.BACKWARD
b.apply_search(search_state, count=event.arg, include_current_position=False)
@handle("*", filter=vi_navigation_mode)
def _next_occurrence(event: E) -> None:
"""
Go to next occurrence of this word.
"""
b = event.current_buffer
search_state = event.app.current_search_state
search_state.text = b.document.get_word_under_cursor()
search_state.direction = SearchDirection.FORWARD
b.apply_search(search_state, count=event.arg, include_current_position=False)
@handle("(", filter=vi_navigation_mode)
def _begin_of_sentence(event: E) -> None:
# TODO: go to begin of sentence.
# XXX: should become text_object.
pass
@handle(")", filter=vi_navigation_mode)
def _end_of_sentence(event: E) -> None:
# TODO: go to end of sentence.
# XXX: should become text_object.
pass
operator = create_operator_decorator(key_bindings)
text_object = create_text_object_decorator(key_bindings)
@handle(Keys.Any, filter=vi_waiting_for_text_object_mode)
def _unknown_text_object(event: E) -> None:
"""
Unknown key binding while waiting for a text object.
"""
event.app.output.bell()
#
# *** Operators ***
#
def create_delete_and_change_operators(
delete_only: bool, with_register: bool = False
) -> None:
"""
Delete and change operators.
:param delete_only: Create an operator that deletes, but doesn't go to insert mode.
:param with_register: Copy the deleted text to this named register instead of the clipboard.
"""
handler_keys: Iterable[str]
if with_register:
handler_keys = ('"', Keys.Any, "cd"[delete_only])
else:
handler_keys = "cd"[delete_only]
@operator(*handler_keys, filter=~is_read_only)
def delete_or_change_operator(event: E, text_object: TextObject) -> None:
clipboard_data = None
buff = event.current_buffer
if text_object:
new_document, clipboard_data = text_object.cut(buff)
buff.document = new_document
# Set deleted/changed text to clipboard or named register.
if clipboard_data and clipboard_data.text:
if with_register:
reg_name = event.key_sequence[1].data
if reg_name in vi_register_names:
event.app.vi_state.named_registers[reg_name] = clipboard_data
else:
event.app.clipboard.set_data(clipboard_data)
# Only go back to insert mode in case of 'change'.
if not delete_only:
event.app.vi_state.input_mode = InputMode.INSERT
create_delete_and_change_operators(False, False)
create_delete_and_change_operators(False, True)
create_delete_and_change_operators(True, False)
create_delete_and_change_operators(True, True)
def create_transform_handler(
filter: Filter, transform_func: Callable[[str], str], *a: str
) -> None:
@operator(*a, filter=filter & ~is_read_only)
def _(event: E, text_object: TextObject) -> None:
"""
Apply transformation (uppercase, lowercase, rot13, swap case).
"""
buff = event.current_buffer
start, end = text_object.operator_range(buff.document)
if start < end:
# Transform.
buff.transform_region(
buff.cursor_position + start,
buff.cursor_position + end,
transform_func,
)
# Move cursor
buff.cursor_position += text_object.end or text_object.start
for k, f, func in vi_transform_functions:
create_transform_handler(f, func, *k)
@operator("y")
def _yank(event: E, text_object: TextObject) -> None:
"""
Yank operator. (Copy text.)
"""
_, clipboard_data = text_object.cut(event.current_buffer)
if clipboard_data.text:
event.app.clipboard.set_data(clipboard_data)
@operator('"', Keys.Any, "y")
def _yank_to_register(event: E, text_object: TextObject) -> None:
"""
Yank selection to named register.
"""
c = event.key_sequence[1].data
if c in vi_register_names:
_, clipboard_data = text_object.cut(event.current_buffer)
event.app.vi_state.named_registers[c] = clipboard_data
@operator(">")
def _indent_text_object(event: E, text_object: TextObject) -> None:
"""
Indent.
"""
buff = event.current_buffer
from_, to = text_object.get_line_numbers(buff)
indent(buff, from_, to + 1, count=event.arg)
@operator("<")
def _unindent_text_object(event: E, text_object: TextObject) -> None:
"""
Unindent.
"""
buff = event.current_buffer
from_, to = text_object.get_line_numbers(buff)
unindent(buff, from_, to + 1, count=event.arg)
@operator("g", "q")
def _reshape(event: E, text_object: TextObject) -> None:
"""
Reshape text.
"""
buff = event.current_buffer
from_, to = text_object.get_line_numbers(buff)
reshape_text(buff, from_, to)
#
# *** Text objects ***
#
@text_object("b")
def _b(event: E) -> TextObject:
"""
Move one word or token left.
"""
return TextObject(
event.current_buffer.document.find_start_of_previous_word(count=event.arg)
or 0
)
@text_object("B")
def _B(event: E) -> TextObject:
"""
Move one non-blank word left
"""
return TextObject(
event.current_buffer.document.find_start_of_previous_word(
count=event.arg, WORD=True
)
or 0
)
@text_object("$")
def _dollar(event: E) -> TextObject:
"""
'c$', 'd$' and '$': Delete/change/move until end of line.
"""
return TextObject(event.current_buffer.document.get_end_of_line_position())
@text_object("w")
def _word_forward(event: E) -> TextObject:
"""
'word' forward. 'cw', 'dw', 'w': Delete/change/move one word.
"""
return TextObject(
event.current_buffer.document.find_next_word_beginning(count=event.arg)
or event.current_buffer.document.get_end_of_document_position()
)
@text_object("W")
def _WORD_forward(event: E) -> TextObject:
"""
'WORD' forward. 'cW', 'dW', 'W': Delete/change/move one WORD.
"""
return TextObject(
event.current_buffer.document.find_next_word_beginning(
count=event.arg, WORD=True
)
or event.current_buffer.document.get_end_of_document_position()
)
@text_object("e")
def _end_of_word(event: E) -> TextObject:
"""
End of 'word': 'ce', 'de', 'e'
"""
end = event.current_buffer.document.find_next_word_ending(count=event.arg)
return TextObject(end - 1 if end else 0, type=TextObjectType.INCLUSIVE)
@text_object("E")
def _end_of_WORD(event: E) -> TextObject:
"""
End of 'WORD': 'cE', 'dE', 'E'
"""
end = event.current_buffer.document.find_next_word_ending(
count=event.arg, WORD=True
)
return TextObject(end - 1 if end else 0, type=TextObjectType.INCLUSIVE)
@text_object("i", "w", no_move_handler=True)
def _inner_word(event: E) -> TextObject:
"""
Inner 'word': ciw and diw
"""
start, end = event.current_buffer.document.find_boundaries_of_current_word()
return TextObject(start, end)
@text_object("a", "w", no_move_handler=True)
def _a_word(event: E) -> TextObject:
"""
A 'word': caw and daw
"""
start, end = event.current_buffer.document.find_boundaries_of_current_word(
include_trailing_whitespace=True
)
return TextObject(start, end)
@text_object("i", "W", no_move_handler=True)
def _inner_WORD(event: E) -> TextObject:
"""
Inner 'WORD': ciW and diW
"""
start, end = event.current_buffer.document.find_boundaries_of_current_word(
WORD=True
)
return TextObject(start, end)
@text_object("a", "W", no_move_handler=True)
def _a_WORD(event: E) -> TextObject:
"""
A 'WORD': caw and daw
"""
start, end = event.current_buffer.document.find_boundaries_of_current_word(
WORD=True, include_trailing_whitespace=True
)
return TextObject(start, end)
@text_object("a", "p", no_move_handler=True)
def _paragraph(event: E) -> TextObject:
"""
Auto paragraph.
"""
start = event.current_buffer.document.start_of_paragraph()
end = event.current_buffer.document.end_of_paragraph(count=event.arg)
return TextObject(start, end)
@text_object("^")
def _start_of_line(event: E) -> TextObject:
"""'c^', 'd^' and '^': Soft start of line, after whitespace."""
return TextObject(
event.current_buffer.document.get_start_of_line_position(
after_whitespace=True
)
)
@text_object("0")
def _hard_start_of_line(event: E) -> TextObject:
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return TextObject(
event.current_buffer.document.get_start_of_line_position(
after_whitespace=False
)
)
def create_ci_ca_handles(
ci_start: str, ci_end: str, inner: bool, key: str | None = None
) -> None:
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
def handler(event: E) -> TextObject:
if ci_start == ci_end:
# Quotes
start = event.current_buffer.document.find_backwards(
ci_start, in_current_line=False
)
end = event.current_buffer.document.find(ci_end, in_current_line=False)
else:
# Brackets
start = event.current_buffer.document.find_enclosing_bracket_left(
ci_start, ci_end
)
end = event.current_buffer.document.find_enclosing_bracket_right(
ci_start, ci_end
)
if start is not None and end is not None:
offset = 0 if inner else 1
return TextObject(start + 1 - offset, end + offset)
else:
# Nothing found.
return TextObject(0)
if key is None:
text_object("ai"[inner], ci_start, no_move_handler=True)(handler)
text_object("ai"[inner], ci_end, no_move_handler=True)(handler)
else:
text_object("ai"[inner], key, no_move_handler=True)(handler)
for inner in (False, True):
for ci_start, ci_end in [
('"', '"'),
("'", "'"),
("`", "`"),
("[", "]"),
("<", ">"),
("{", "}"),
("(", ")"),
]:
create_ci_ca_handles(ci_start, ci_end, inner)
create_ci_ca_handles("(", ")", inner, "b") # 'dab', 'dib'
create_ci_ca_handles("{", "}", inner, "B") # 'daB', 'diB'
@text_object("{")
def _previous_section(event: E) -> TextObject:
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
index = event.current_buffer.document.start_of_paragraph(
count=event.arg, before=True
)
return TextObject(index)
@text_object("}")
def _next_section(event: E) -> TextObject:
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
index = event.current_buffer.document.end_of_paragraph(
count=event.arg, after=True
)
return TextObject(index)
@text_object("f", Keys.Any)
def _find_next_occurrence(event: E) -> TextObject:
"""
Go to next occurrence of character. Typing 'fx' will move the
cursor to the next occurrence of character. 'x'.
"""
event.app.vi_state.last_character_find = CharacterFind(event.data, False)
match = event.current_buffer.document.find(
event.data, in_current_line=True, count=event.arg
)
if match:
return TextObject(match, type=TextObjectType.INCLUSIVE)
else:
return TextObject(0)
@text_object("F", Keys.Any)
def _find_previous_occurrence(event: E) -> TextObject:
"""
Go to previous occurrence of character. Typing 'Fx' will move the
cursor to the previous occurrence of character. 'x'.
"""
event.app.vi_state.last_character_find = CharacterFind(event.data, True)
return TextObject(
event.current_buffer.document.find_backwards(
event.data, in_current_line=True, count=event.arg
)
or 0
)
@text_object("t", Keys.Any)
def _t(event: E) -> TextObject:
"""
Move right to the next occurrence of c, then one char backward.
"""
event.app.vi_state.last_character_find = CharacterFind(event.data, False)
match = event.current_buffer.document.find(
event.data, in_current_line=True, count=event.arg
)
if match:
return TextObject(match - 1, type=TextObjectType.INCLUSIVE)
else:
return TextObject(0)
@text_object("T", Keys.Any)
def _T(event: E) -> TextObject:
"""
Move left to the previous occurrence of c, then one char forward.
"""
event.app.vi_state.last_character_find = CharacterFind(event.data, True)
match = event.current_buffer.document.find_backwards(
event.data, in_current_line=True, count=event.arg
)
return TextObject(match + 1 if match else 0)
def repeat(reverse: bool) -> None:
"""
Create ',' and ';' commands.
"""
@text_object("," if reverse else ";")
def _(event: E) -> TextObject:
"""
Repeat the last 'f'/'F'/'t'/'T' command.
"""
pos: int | None = 0
vi_state = event.app.vi_state
type = TextObjectType.EXCLUSIVE
if vi_state.last_character_find:
char = vi_state.last_character_find.character
backwards = vi_state.last_character_find.backwards
if reverse:
backwards = not backwards
if backwards:
pos = event.current_buffer.document.find_backwards(
char, in_current_line=True, count=event.arg
)
else:
pos = event.current_buffer.document.find(
char, in_current_line=True, count=event.arg
)
type = TextObjectType.INCLUSIVE
if pos:
return TextObject(pos, type=type)
else:
return TextObject(0)
repeat(True)
repeat(False)
@text_object("h")
@text_object("left")
def _left(event: E) -> TextObject:
"""
Implements 'ch', 'dh', 'h': Cursor left.
"""
return TextObject(
event.current_buffer.document.get_cursor_left_position(count=event.arg)
)
@text_object("j", no_move_handler=True, no_selection_handler=True)
# Note: We also need `no_selection_handler`, because we in
# selection mode, we prefer the other 'j' binding that keeps
# `buffer.preferred_column`.
def _down(event: E) -> TextObject:
"""
Implements 'cj', 'dj', 'j', ... Cursor up.
"""
return TextObject(
event.current_buffer.document.get_cursor_down_position(count=event.arg),
type=TextObjectType.LINEWISE,
)
@text_object("k", no_move_handler=True, no_selection_handler=True)
def _up(event: E) -> TextObject:
"""
Implements 'ck', 'dk', 'k', ... Cursor up.
"""
return TextObject(
event.current_buffer.document.get_cursor_up_position(count=event.arg),
type=TextObjectType.LINEWISE,
)
@text_object("l")
@text_object(" ")
@text_object("right")
def _right(event: E) -> TextObject:
"""
Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right.
"""
return TextObject(
event.current_buffer.document.get_cursor_right_position(count=event.arg)
)
@text_object("H")
def _top_of_screen(event: E) -> TextObject:
"""
Moves to the start of the visible region. (Below the scroll offset.)
Implements 'cH', 'dH', 'H'.
"""
w = event.app.layout.current_window
b = event.current_buffer
if w and w.render_info:
# When we find a Window that has BufferControl showing this window,
# move to the start of the visible area.
pos = (
b.document.translate_row_col_to_index(
w.render_info.first_visible_line(after_scroll_offset=True), 0
)
- b.cursor_position
)
else:
# Otherwise, move to the start of the input.
pos = -len(b.document.text_before_cursor)
return TextObject(pos, type=TextObjectType.LINEWISE)
@text_object("M")
def _middle_of_screen(event: E) -> TextObject:
"""
Moves cursor to the vertical center of the visible region.
Implements 'cM', 'dM', 'M'.
"""
w = event.app.layout.current_window
b = event.current_buffer
if w and w.render_info:
# When we find a Window that has BufferControl showing this window,
# move to the center of the visible area.
pos = (
b.document.translate_row_col_to_index(
w.render_info.center_visible_line(), 0
)
- b.cursor_position
)
else:
# Otherwise, move to the start of the input.
pos = -len(b.document.text_before_cursor)
return TextObject(pos, type=TextObjectType.LINEWISE)
@text_object("L")
def _end_of_screen(event: E) -> TextObject:
"""
Moves to the end of the visible region. (Above the scroll offset.)
"""
w = event.app.layout.current_window
b = event.current_buffer
if w and w.render_info:
# When we find a Window that has BufferControl showing this window,
# move to the end of the visible area.
pos = (
b.document.translate_row_col_to_index(
w.render_info.last_visible_line(before_scroll_offset=True), 0
)
- b.cursor_position
)
else:
# Otherwise, move to the end of the input.
pos = len(b.document.text_after_cursor)
return TextObject(pos, type=TextObjectType.LINEWISE)
@text_object("n", no_move_handler=True)
def _search_next(event: E) -> TextObject:
"""
Search next.
"""
buff = event.current_buffer
search_state = event.app.current_search_state
cursor_position = buff.get_search_position(
search_state, include_current_position=False, count=event.arg
)
return TextObject(cursor_position - buff.cursor_position)
@handle("n", filter=vi_navigation_mode)
def _search_next2(event: E) -> None:
"""
Search next in navigation mode. (This goes through the history.)
"""
search_state = event.app.current_search_state
event.current_buffer.apply_search(
search_state, include_current_position=False, count=event.arg
)
@text_object("N", no_move_handler=True)
def _search_previous(event: E) -> TextObject:
"""
Search previous.
"""
buff = event.current_buffer
search_state = event.app.current_search_state
cursor_position = buff.get_search_position(
~search_state, include_current_position=False, count=event.arg
)
return TextObject(cursor_position - buff.cursor_position)
@handle("N", filter=vi_navigation_mode)
def _search_previous2(event: E) -> None:
"""
Search previous in navigation mode. (This goes through the history.)
"""
search_state = event.app.current_search_state
event.current_buffer.apply_search(
~search_state, include_current_position=False, count=event.arg
)
@handle("z", "+", filter=vi_navigation_mode | vi_selection_mode)
@handle("z", "t", filter=vi_navigation_mode | vi_selection_mode)
@handle("z", "enter", filter=vi_navigation_mode | vi_selection_mode)
def _scroll_top(event: E) -> None:
"""
Scrolls the window to makes the current line the first line in the visible region.
"""
b = event.current_buffer
event.app.layout.current_window.vertical_scroll = b.document.cursor_position_row
@handle("z", "-", filter=vi_navigation_mode | vi_selection_mode)
@handle("z", "b", filter=vi_navigation_mode | vi_selection_mode)
def _scroll_bottom(event: E) -> None:
"""
Scrolls the window to makes the current line the last line in the visible region.
"""
# We can safely set the scroll offset to zero; the Window will make
# sure that it scrolls at least enough to make the cursor visible
# again.
event.app.layout.current_window.vertical_scroll = 0
@handle("z", "z", filter=vi_navigation_mode | vi_selection_mode)
def _scroll_center(event: E) -> None:
"""
Center Window vertically around cursor.
"""
w = event.app.layout.current_window
b = event.current_buffer
if w and w.render_info:
info = w.render_info
# Calculate the offset that we need in order to position the row
# containing the cursor in the center.
scroll_height = info.window_height // 2
y = max(0, b.document.cursor_position_row - 1)
height = 0
while y > 0:
line_height = info.get_height_for_line(y)
if height + line_height < scroll_height:
height += line_height
y -= 1
else:
break
w.vertical_scroll = y
@text_object("%")
def _goto_corresponding_bracket(event: E) -> TextObject:
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
buffer = event.current_buffer
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = buffer.document.translate_row_col_to_index(
int((event.arg * buffer.document.line_count - 1) / 100), 0
)
return TextObject(
absolute_index - buffer.document.cursor_position,
type=TextObjectType.LINEWISE,
)
else:
return TextObject(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
match = buffer.document.find_matching_bracket_position()
if match:
return TextObject(match, type=TextObjectType.INCLUSIVE)
else:
return TextObject(0)
@text_object("|")
def _to_column(event: E) -> TextObject:
"""
Move to the n-th column (you may specify the argument n by typing it on
number keys, for example, 20|).
"""
return TextObject(
event.current_buffer.document.get_column_cursor_position(event.arg - 1)
)
@text_object("g", "g")
def _goto_first_line(event: E) -> TextObject:
"""
Go to the start of the very first line.
Implements 'gg', 'cgg', 'ygg'
"""
d = event.current_buffer.document
if event._arg:
# Move to the given line.
return TextObject(
d.translate_row_col_to_index(event.arg - 1, 0) - d.cursor_position,
type=TextObjectType.LINEWISE,
)
else:
# Move to the top of the input.
return TextObject(
d.get_start_of_document_position(), type=TextObjectType.LINEWISE
)
@text_object("g", "_")
def _goto_last_line(event: E) -> TextObject:
"""
Go to last non-blank of line.
'g_', 'cg_', 'yg_', etc..
"""
return TextObject(
event.current_buffer.document.last_non_blank_of_current_line_position(),
type=TextObjectType.INCLUSIVE,
)
@text_object("g", "e")
def _ge(event: E) -> TextObject:
"""
Go to last character of previous word.
'ge', 'cge', 'yge', etc..
"""
prev_end = event.current_buffer.document.find_previous_word_ending(
count=event.arg
)
return TextObject(
prev_end - 1 if prev_end is not None else 0, type=TextObjectType.INCLUSIVE
)
@text_object("g", "E")
def _gE(event: E) -> TextObject:
"""
Go to last character of previous WORD.
'gE', 'cgE', 'ygE', etc..
"""
prev_end = event.current_buffer.document.find_previous_word_ending(
count=event.arg, WORD=True
)
return TextObject(
prev_end - 1 if prev_end is not None else 0, type=TextObjectType.INCLUSIVE
)
@text_object("g", "m")
def _gm(event: E) -> TextObject:
"""
Like g0, but half a screenwidth to the right. (Or as much as possible.)
"""
w = event.app.layout.current_window
buff = event.current_buffer
if w and w.render_info:
width = w.render_info.window_width
start = buff.document.get_start_of_line_position(after_whitespace=False)
start += int(min(width / 2, len(buff.document.current_line)))
return TextObject(start, type=TextObjectType.INCLUSIVE)
return TextObject(0)
@text_object("G")
def _last_line(event: E) -> TextObject:
"""
Go to the end of the document. (If no arg has been given.)
"""
buf = event.current_buffer
return TextObject(
buf.document.translate_row_col_to_index(buf.document.line_count - 1, 0)
- buf.cursor_position,
type=TextObjectType.LINEWISE,
)
#
# *** Other ***
#
@handle("G", filter=has_arg)
def _to_nth_history_line(event: E) -> None:
"""
If an argument is given, move to this line in the history. (for
example, 15G)
"""
event.current_buffer.go_to_history(event.arg - 1)
for n in "123456789":
@handle(
n,
filter=vi_navigation_mode
| vi_selection_mode
| vi_waiting_for_text_object_mode,
)
def _arg(event: E) -> None:
"""
Always handle numerics in navigation mode as arg.
"""
event.append_to_arg_count(event.data)
@handle(
"0",
filter=(
vi_navigation_mode | vi_selection_mode | vi_waiting_for_text_object_mode
)
& has_arg,
)
def _0_arg(event: E) -> None:
"""
Zero when an argument was already give.
"""
event.append_to_arg_count(event.data)
@handle(Keys.Any, filter=vi_replace_mode)
def _insert_text(event: E) -> None:
"""
Insert data at cursor position.
"""
event.current_buffer.insert_text(event.data, overwrite=True)
@handle(Keys.Any, filter=vi_replace_single_mode)
def _replace_single(event: E) -> None:
"""
Replace single character at cursor position.
"""
event.current_buffer.insert_text(event.data, overwrite=True)
event.current_buffer.cursor_position -= 1
event.app.vi_state.input_mode = InputMode.NAVIGATION
@handle(
Keys.Any,
filter=vi_insert_multiple_mode,
save_before=(lambda e: not e.is_repeat),
)
def _insert_text_multiple_cursors(event: E) -> None:
"""
Insert data at multiple cursor positions at once.
(Usually a result of pressing 'I' or 'A' in block-selection mode.)
"""
buff = event.current_buffer
original_text = buff.text
# Construct new text.
text = []
p = 0
for p2 in buff.multiple_cursor_positions:
text.append(original_text[p:p2])
text.append(event.data)
p = p2
text.append(original_text[p:])
# Shift all cursor positions.
new_cursor_positions = [
pos + i + 1 for i, pos in enumerate(buff.multiple_cursor_positions)
]
# Set result.
buff.text = "".join(text)
buff.multiple_cursor_positions = new_cursor_positions
buff.cursor_position += 1
@handle("backspace", filter=vi_insert_multiple_mode)
def _delete_before_multiple_cursors(event: E) -> None:
"""
Backspace, using multiple cursors.
"""
buff = event.current_buffer
original_text = buff.text
# Construct new text.
deleted_something = False
text = []
p = 0
for p2 in buff.multiple_cursor_positions:
if p2 > 0 and original_text[p2 - 1] != "\n": # Don't delete across lines.
text.append(original_text[p : p2 - 1])
deleted_something = True
else:
text.append(original_text[p:p2])
p = p2
text.append(original_text[p:])
if deleted_something:
# Shift all cursor positions.
lengths = [len(part) for part in text[:-1]]
new_cursor_positions = list(accumulate(lengths))
# Set result.
buff.text = "".join(text)
buff.multiple_cursor_positions = new_cursor_positions
buff.cursor_position -= 1
else:
event.app.output.bell()
@handle("delete", filter=vi_insert_multiple_mode)
def _delete_after_multiple_cursors(event: E) -> None:
"""
Delete, using multiple cursors.
"""
buff = event.current_buffer
original_text = buff.text
# Construct new text.
deleted_something = False
text = []
new_cursor_positions = []
p = 0
for p2 in buff.multiple_cursor_positions:
text.append(original_text[p:p2])
if p2 >= len(original_text) or original_text[p2] == "\n":
# Don't delete across lines.
p = p2
else:
p = p2 + 1
deleted_something = True
text.append(original_text[p:])
if deleted_something:
# Shift all cursor positions.
lengths = [len(part) for part in text[:-1]]
new_cursor_positions = list(accumulate(lengths))
# Set result.
buff.text = "".join(text)
buff.multiple_cursor_positions = new_cursor_positions
else:
event.app.output.bell()
@handle("left", filter=vi_insert_multiple_mode)
def _left_multiple(event: E) -> None:
"""
Move all cursors to the left.
(But keep all cursors on the same line.)
"""
buff = event.current_buffer
new_positions = []
for p in buff.multiple_cursor_positions:
if buff.document.translate_index_to_position(p)[1] > 0:
p -= 1
new_positions.append(p)
buff.multiple_cursor_positions = new_positions
if buff.document.cursor_position_col > 0:
buff.cursor_position -= 1
@handle("right", filter=vi_insert_multiple_mode)
def _right_multiple(event: E) -> None:
"""
Move all cursors to the right.
(But keep all cursors on the same line.)
"""
buff = event.current_buffer
new_positions = []
for p in buff.multiple_cursor_positions:
row, column = buff.document.translate_index_to_position(p)
if column < len(buff.document.lines[row]):
p += 1
new_positions.append(p)
buff.multiple_cursor_positions = new_positions
if not buff.document.is_cursor_at_the_end_of_line:
buff.cursor_position += 1
@handle("up", filter=vi_insert_multiple_mode)
@handle("down", filter=vi_insert_multiple_mode)
def _updown_multiple(event: E) -> None:
"""
Ignore all up/down key presses when in multiple cursor mode.
"""
@handle("c-x", "c-l", filter=vi_insert_mode)
def _complete_line(event: E) -> None:
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based on the other lines in the document and the history.
"""
event.current_buffer.start_history_lines_completion()
@handle("c-x", "c-f", filter=vi_insert_mode)
def _complete_filename(event: E) -> None:
"""
Complete file names.
"""
# TODO
pass
@handle("c-k", filter=vi_insert_mode | vi_replace_mode)
def _digraph(event: E) -> None:
"""
Go into digraph mode.
"""
event.app.vi_state.waiting_for_digraph = True
@handle(Keys.Any, filter=vi_digraph_mode & ~digraph_symbol_1_given)
def _digraph1(event: E) -> None:
"""
First digraph symbol.
"""
event.app.vi_state.digraph_symbol1 = event.data
@handle(Keys.Any, filter=vi_digraph_mode & digraph_symbol_1_given)
def _create_digraph(event: E) -> None:
"""
Insert digraph.
"""
try:
# Lookup.
code: tuple[str, str] = (
event.app.vi_state.digraph_symbol1 or "",
event.data,
)
if code not in DIGRAPHS:
code = code[::-1] # Try reversing.
symbol = DIGRAPHS[code]
except KeyError:
# Unknown digraph.
event.app.output.bell()
else:
# Insert digraph.
overwrite = event.app.vi_state.input_mode == InputMode.REPLACE
event.current_buffer.insert_text(chr(symbol), overwrite=overwrite)
event.app.vi_state.waiting_for_digraph = False
finally:
event.app.vi_state.waiting_for_digraph = False
event.app.vi_state.digraph_symbol1 = None
@handle("c-o", filter=vi_insert_mode | vi_replace_mode)
def _quick_normal_mode(event: E) -> None:
"""
Go into normal mode for one single action.
"""
event.app.vi_state.temporary_navigation_mode = True
@handle("q", Keys.Any, filter=vi_navigation_mode & ~vi_recording_macro)
def _start_macro(event: E) -> None:
"""
Start recording macro.
"""
c = event.key_sequence[1].data
if c in vi_register_names:
vi_state = event.app.vi_state
vi_state.recording_register = c
vi_state.current_recording = ""
@handle("q", filter=vi_navigation_mode & vi_recording_macro)
def _stop_macro(event: E) -> None:
"""
Stop recording macro.
"""
vi_state = event.app.vi_state
# Store and stop recording.
if vi_state.recording_register:
vi_state.named_registers[vi_state.recording_register] = ClipboardData(
vi_state.current_recording
)
vi_state.recording_register = None
vi_state.current_recording = ""
@handle("@", Keys.Any, filter=vi_navigation_mode, record_in_macro=False)
def _execute_macro(event: E) -> None:
"""
Execute macro.
Notice that we pass `record_in_macro=False`. This ensures that the `@x`
keys don't appear in the recording itself. This function inserts the
body of the called macro back into the KeyProcessor, so these keys will
be added later on to the macro of their handlers have
`record_in_macro=True`.
"""
# Retrieve macro.
c = event.key_sequence[1].data
try:
macro = event.app.vi_state.named_registers[c]
except KeyError:
return
# Expand macro (which is a string in the register), in individual keys.
# Use vt100 parser for this.
keys: list[KeyPress] = []
parser = Vt100Parser(keys.append)
parser.feed(macro.text)
parser.flush()
# Now feed keys back to the input processor.
for _ in range(event.arg):
event.app.key_processor.feed_multiple(keys, first=True)
return ConditionalKeyBindings(key_bindings, vi_mode)
def load_vi_search_bindings() -> KeyBindingsBase:
key_bindings = KeyBindings()
handle = key_bindings.add
from . import search
# Vi-style forward search.
handle(
"/",
filter=(vi_navigation_mode | vi_selection_mode) & ~vi_search_direction_reversed,
)(search.start_forward_incremental_search)
handle(
"?",
filter=(vi_navigation_mode | vi_selection_mode) & vi_search_direction_reversed,
)(search.start_forward_incremental_search)
handle("c-s")(search.start_forward_incremental_search)
# Vi-style backward search.
handle(
"?",
filter=(vi_navigation_mode | vi_selection_mode) & ~vi_search_direction_reversed,
)(search.start_reverse_incremental_search)
handle(
"/",
filter=(vi_navigation_mode | vi_selection_mode) & vi_search_direction_reversed,
)(search.start_reverse_incremental_search)
handle("c-r")(search.start_reverse_incremental_search)
# Apply the search. (At the / or ? prompt.)
handle("enter", filter=is_searching)(search.accept_search)
handle("c-r", filter=is_searching)(search.reverse_incremental_search)
handle("c-s", filter=is_searching)(search.forward_incremental_search)
handle("c-c")(search.abort_search)
handle("c-g")(search.abort_search)
handle("backspace", filter=search_buffer_is_empty)(search.abort_search)
# Handle escape. This should accept the search, just like readline.
# `abort_search` would be a meaningful alternative.
handle("escape")(search.accept_search)
return ConditionalKeyBindings(key_bindings, vi_mode)
| TextObject |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 42719,
"end": 45448
} | class ____(Request):
"""
Deletes a project
:param project: Project ID
:type project: str
:param force: If not true, fails if project has tasks. If true, and project has tasks, they will be unassigned
:type force: bool
:param delete_contents: If set to 'true' then the project tasks and models will be deleted.
Otherwise their project property will be unassigned. Default value is 'false'
:type delete_contents: bool
"""
_service = "projects"
_action = "delete"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"delete_contents": {
"description": "If set to 'true' then the project tasks, models and dataviews will be deleted. Otherwise their project property will be unassigned. Default value is 'false'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, fails if project has tasks. If true, and project has tasks, they will be unassigned",
"type": "boolean",
},
"project": {"description": "Project ID", "type": "string"},
},
"required": ["project"],
"type": "object",
}
def __init__(
self, project: str, force: Optional[bool] = False, delete_contents: Optional[bool] = None, **kwargs: Any
) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.project = project
self.force = force
self.delete_contents = delete_contents
@schema_property("project")
def project(self) -> str:
return self._property_project
@project.setter
def project(self, value: str) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("delete_contents")
def delete_contents(self) -> Optional[bool]:
return self._property_delete_contents
@delete_contents.setter
def delete_contents(self, value: Optional[bool]) -> None:
if value is None:
self._property_delete_contents = None
return
self.assert_isinstance(value, "delete_contents", (bool,))
self._property_delete_contents = value
| DeleteRequest |
python | pypa__pip | tests/unit/test_network_cache.py | {
"start": 387,
"end": 4396
} | class ____:
"""
The no_perms test are useless on Windows since SafeFileCache uses
pip._internal.utils.filesystem.check_path_owner which is based on
os.geteuid which is absent on Windows.
"""
def test_cache_roundtrip(self, cache_tmpdir: Path) -> None:
cache = SafeFileCache(os.fspath(cache_tmpdir))
assert cache.get("test key") is None
cache.set("test key", b"a test string")
# Body hasn't been stored yet, so the entry isn't valid yet
assert cache.get("test key") is None
# With a body, the cache entry is valid:
cache.set_body("test key", b"body")
assert cache.get("test key") == b"a test string"
cache.delete("test key")
assert cache.get("test key") is None
def test_cache_roundtrip_body(self, cache_tmpdir: Path) -> None:
cache = SafeFileCache(os.fspath(cache_tmpdir))
assert cache.get_body("test key") is None
cache.set_body("test key", b"a test string")
# Metadata isn't available, so the entry isn't valid yet (this
# shouldn't happen, but just in case)
assert cache.get_body("test key") is None
# With metadata, the cache entry is valid:
cache.set("test key", b"metadata")
body = cache.get_body("test key")
assert body is not None
with body:
assert body.read() == b"a test string"
cache.delete("test key")
assert cache.get_body("test key") is None
@pytest.mark.skipif("sys.platform == 'win32'")
def test_safe_get_no_perms(
self, cache_tmpdir: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(os.path, "exists", lambda x: True)
with chmod(cache_tmpdir, 000):
cache = SafeFileCache(os.fspath(cache_tmpdir))
cache.get("foo")
@pytest.mark.skipif("sys.platform == 'win32'")
def test_safe_set_no_perms(self, cache_tmpdir: Path) -> None:
with chmod(cache_tmpdir, 000):
cache = SafeFileCache(os.fspath(cache_tmpdir))
cache.set("foo", b"bar")
@pytest.mark.skipif("sys.platform == 'win32'")
def test_safe_delete_no_perms(self, cache_tmpdir: Path) -> None:
with chmod(cache_tmpdir, 000):
cache = SafeFileCache(os.fspath(cache_tmpdir))
cache.delete("foo")
def test_cache_hashes_are_same(self, cache_tmpdir: Path) -> None:
cache = SafeFileCache(os.fspath(cache_tmpdir))
key = "test key"
fake_cache = Mock(FileCache, directory=cache.directory, encode=FileCache.encode)
assert cache._get_cache_path(key) == FileCache._fn(fake_cache, key)
@pytest.mark.skipif("sys.platform == 'win32'")
@pytest.mark.skipif(
os.chmod not in os.supports_fd and os.chmod not in os.supports_follow_symlinks,
reason="requires os.chmod to support file descriptors or not follow symlinks",
)
@pytest.mark.parametrize(
"perms, expected_perms", [(0o300, 0o600), (0o700, 0o600), (0o777, 0o666)]
)
def test_cache_inherit_perms(
self, cache_tmpdir: Path, perms: int, expected_perms: int
) -> None:
key = "foo"
with chmod(cache_tmpdir, perms):
cache = SafeFileCache(os.fspath(cache_tmpdir))
cache.set(key, b"bar")
assert (os.stat(cache._get_cache_path(key)).st_mode & 0o777) == expected_perms
@pytest.mark.skipif("sys.platform == 'win32'")
def test_cache_not_inherit_perms(
self, cache_tmpdir: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(os, "supports_fd", os.supports_fd - {os.chmod})
monkeypatch.setattr(
os, "supports_follow_symlinks", os.supports_follow_symlinks - {os.chmod}
)
key = "foo"
with chmod(cache_tmpdir, 0o777):
cache = SafeFileCache(os.fspath(cache_tmpdir))
cache.set(key, b"bar")
assert (os.stat(cache._get_cache_path(key)).st_mode & 0o777) == 0o600
| TestSafeFileCache |
python | tensorflow__tensorflow | tensorflow/core/function/polymorphism/function_type_test.py | {
"start": 27574,
"end": 29611
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.product(
name=["arg_0", "param"],
kind=[
function_type.Parameter.POSITIONAL_ONLY,
function_type.Parameter.POSITIONAL_OR_KEYWORD
],
optional=[True, False],
type_contraint=[None, trace_type.from_value(1)])
def testParameter(self, name, kind, optional, type_contraint):
original = function_type.Parameter(name, kind, optional, type_contraint)
expected_type_constraint = serialization.serialize(
type_contraint) if type_contraint else None
expected = function_type_pb2.Parameter(
name=name,
kind=function_type.PY_TO_PROTO_ENUM[kind],
is_optional=optional,
type_constraint=expected_type_constraint)
self.assertEqual(original.to_proto(), expected)
self.assertEqual(function_type.Parameter.from_proto(expected), original)
def testFunctionType(self):
original = function_type.FunctionType([
function_type.Parameter("a", function_type.Parameter.POSITIONAL_ONLY,
False, None),
], collections.OrderedDict([("b", trace_type.from_value(1))]))
expected = function_type_pb2.FunctionType(
parameters=[
function_type_pb2.Parameter(
name="a",
kind=function_type_pb2.Parameter.Kind.POSITIONAL_ONLY,
is_optional=False)
],
captures=[
function_type_pb2.Capture(
name="b",
type_constraint=serialization.serialize(
trace_type.from_value(1)))
])
self.assertEqual(original.to_proto(), expected)
self.assertEqual(function_type.FunctionType.from_proto(expected), original)
def testCapturedDefaultValueStr(self):
f_type = function_type.FunctionType([
function_type.Parameter(
"a", function_type.Parameter.POSITIONAL_OR_KEYWORD, True, None
),
])
self.assertEqual(str(f_type), "(a=<captured_default_value>)")
| SerializationTest |
python | huggingface__transformers | src/transformers/models/ministral/modular_ministral.py | {
"start": 11959,
"end": 12044
} | class ____(Qwen2ForSequenceClassification):
pass
| MinistralForSequenceClassification |
python | scikit-learn__scikit-learn | sklearn/pipeline.py | {
"start": 59192,
"end": 82478
} | class ____(TransformerMixin, _BaseComposition):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer, removed by
setting to 'drop' or disabled by setting to 'passthrough' (features are
passed without transformation).
Read more in the :ref:`User Guide <feature_union>`.
.. versionadded:: 0.13
Parameters
----------
transformer_list : list of (str, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer. The transformer can
be 'drop' for it to be ignored or can be 'passthrough' for features to
be passed unchanged.
.. versionadded:: 1.1
Added the option `"passthrough"`.
.. versionchanged:: 0.22
Deprecated `None` as a transformer in favor of 'drop'.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
transformer_weights : dict, default=None
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
Raises ValueError if key not present in ``transformer_list``.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, :meth:`get_feature_names_out` will prefix all feature names
with the name of the transformer that generated that feature.
If False, :meth:`get_feature_names_out` will not prefix any feature
names and will error if feature names are not unique.
.. versionadded:: 1.5
Attributes
----------
named_transformers : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
Read-only attribute to access any transformer parameter by user
given name. Keys are transformer names and values are
transformer parameters.
.. versionadded:: 1.2
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first transformer in `transformer_list` exposes such an
attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when
`X` has feature names that are all strings.
.. versionadded:: 1.3
See Also
--------
make_union : Convenience function for simplified feature union
construction.
Examples
--------
>>> from sklearn.pipeline import FeatureUnion
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> union = FeatureUnion([("pca", PCA(n_components=1)),
... ("svd", TruncatedSVD(n_components=2))])
>>> X = [[0., 1., 3], [2., 2., 5]]
>>> union.fit_transform(X)
array([[-1.5 , 3.04, -0.872],
[ 1.5 , 5.72, 0.463]])
>>> # An estimator's parameter can be set using '__' syntax
>>> union.set_params(svd__n_components=1).fit_transform(X)
array([[-1.5 , 3.04],
[ 1.5 , 5.72]])
For a more detailed example of usage, see
:ref:`sphx_glr_auto_examples_compose_plot_feature_union.py`.
"""
def __init__(
self,
transformer_list,
*,
n_jobs=None,
transformer_weights=None,
verbose=False,
verbose_feature_names_out=True,
):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
self.verbose_feature_names_out = verbose_feature_names_out
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
`set_output` will set the output of all estimators in `transformer_list`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
Returns
-------
self : estimator instance
Estimator instance.
"""
super().set_output(transform=transform)
for _, step, _ in self._iter():
_safe_set_output(step, transform=transform)
return self
@property
def named_transformers(self):
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.transformer_list))
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformer_list` of the
`FeatureUnion`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("transformer_list", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`transformer_list`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `transform_list`. Parameters of the transformers may be set
using its name and the parameter name separated by a '__'.
Returns
-------
self : object
FeatureUnion class instance.
"""
self._set_params("transformer_list", **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ("drop", "passthrough"):
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" % (t, type(t))
)
def _validate_transformer_weights(self):
if not self.transformer_weights:
return
transformer_names = set(name for name, _ in self.transformer_list)
for name in self.transformer_weights:
if name not in transformer_names:
raise ValueError(
f'Attempting to weight transformer "{name}", '
"but it is not present in transformer_list."
)
def _iter(self):
"""
Generate (name, trans, weight) tuples excluding None and
'drop' transformers.
"""
get_weight = (self.transformer_weights or {}).get
for name, trans in self.transformer_list:
if trans == "drop":
continue
if trans == "passthrough":
trans = FunctionTransformer(feature_names_out="one-to-one")
yield (name, trans, get_weight(name))
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, _ in self._iter():
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
"Transformer %s (type %s) does not provide get_feature_names_out."
% (str(name), type(trans).__name__)
)
feature_names_out = trans.get_feature_names_out(input_features)
transformer_with_feature_names_out.append((name, feature_names_out))
return self._add_prefix_for_feature_names_out(
transformer_with_feature_names_out
)
def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
"""Add prefix for feature names out that includes the transformer names.
Parameters
----------
transformer_with_feature_names_out : list of tuples of (str, array-like of str)
The tuple consistent of the transformer's name and its feature names out.
Returns
-------
feature_names_out : ndarray of shape (n_features,), dtype=str
Transformed feature names.
"""
if self.verbose_feature_names_out:
# Prefix the feature names out with the transformers name
names = list(
chain.from_iterable(
(f"{name}__{i}" for i in feature_names_out)
for name, feature_names_out in transformer_with_feature_names_out
)
)
return np.asarray(names, dtype=object)
# verbose_feature_names_out is False
# Check that names are all unique without a prefix
feature_names_count = Counter(
chain.from_iterable(s for _, s in transformer_with_feature_names_out)
)
top_6_overlap = [
name for name, count in feature_names_count.most_common(6) if count > 1
]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
# There are more than 5 overlapping names, we only show the 5
# of the feature names
names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
else:
names_repr = str(top_6_overlap)
raise ValueError(
f"Output feature names: {names_repr} are not unique. Please set "
"verbose_feature_names_out=True to add prefixes to feature names"
)
return np.concatenate(
[name for _, name in transformer_with_feature_names_out],
)
def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**fit_params` can be routed via metadata routing API.
Returns
-------
self : object
FeatureUnion class instance.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, _ in self.transformer_list:
routed_params[name] = Bunch(fit={})
routed_params[name].fit = fit_params
transformers = self._parallel_func(X, y, _fit_one, routed_params)
if not transformers:
# All transformers are None
return self
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**params` can now be routed via metadata routing API.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit_transform", **params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, obj in self.transformer_list:
if hasattr(obj, "fit_transform"):
routed_params[name] = Bunch(fit_transform={})
routed_params[name].fit_transform = params
else:
routed_params[name] = Bunch(fit={})
routed_params[name] = Bunch(transform={})
routed_params[name].fit = params
results = self._parallel_func(X, y, _fit_transform_one, routed_params)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return self._hstack(Xs)
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return "(step %d of %d) Processing %s" % (idx, total, name)
def _parallel_func(self, X, y, func, routed_params):
"""Runs func in parallel on X and y"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
self._validate_transformer_weights()
transformers = list(self._iter())
return Parallel(n_jobs=self.n_jobs)(
delayed(func)(
transformer,
X,
y,
weight,
message_clsname="FeatureUnion",
message=self._log_message(name, idx, len(transformers)),
params=routed_params[name],
)
for idx, (name, transformer, weight) in enumerate(transformers, 1)
)
def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
**params : dict, default=None
Parameters routed to the `transform` method of the sub-transformers via the
metadata routing API. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionadded:: 1.5
Returns
-------
X_t : array-like or sparse matrix of shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
_raise_for_params(params, self, "transform")
if _routing_enabled():
routed_params = process_routing(self, "transform", **params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, _ in self.transformer_list:
routed_params[name] = Bunch(transform={})
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight, params=routed_params[name])
for name, trans, weight in self._iter()
)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(Xs)
def _hstack(self, Xs):
# Check if Xs dimensions are valid
for X, (name, _) in zip(Xs, self.transformer_list):
if hasattr(X, "shape") and len(X.shape) != 2:
raise ValueError(
f"Transformer '{name}' returned an array or dataframe with "
f"{len(X.shape)} dimensions, but expected 2 dimensions "
"(n_samples, n_features)."
)
adapter = _get_container_adapter("transform", self)
if adapter and all(adapter.is_supported_container(X) for X in Xs):
return adapter.hstack(Xs)
if any(sparse.issparse(f) for f in Xs):
return sparse.hstack(Xs).tocsr()
return np.hstack(Xs)
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, old if old == "drop" else next(transformers))
for name, old in self.transformer_list
]
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# X is passed to all transformers so we just delegate to the first one
return self.transformer_list[0][1].n_features_in_
@property
def feature_names_in_(self):
"""Names of features seen during :term:`fit`."""
# X is passed to all transformers -- delegate to the first one
return self.transformer_list[0][1].feature_names_in_
def __sklearn_is_fitted__(self):
# Delegate whether feature union was fitted
for _, transformer, _ in self._iter():
check_is_fitted(transformer)
return True
def _sk_visual_block_(self):
names, transformers = zip(*self.transformer_list)
return _VisualBlock("parallel", transformers, names=names)
def __getitem__(self, name):
"""Return transformer with name."""
if not isinstance(name, str):
raise KeyError("Only string keys are supported")
return self.named_transformers[name]
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
for name, transformer in self.transformer_list:
router.add(
**{name: transformer},
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit_transform", callee="fit_transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
.add(caller="transform", callee="transform"),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
try:
tags.input_tags.sparse = all(
get_tags(trans).input_tags.sparse
for name, trans in self.transformer_list
if trans not in {"passthrough", "drop"}
)
except Exception:
# If `transformer_list` does not comply with our API (list of tuples)
# then it will fail. In this case, we assume that `sparse` is False
# but the parameter validation will raise an error during `fit`.
pass # pragma: no cover
return tags
def make_union(
*transformers, n_jobs=None, verbose=False, verbose_feature_names_out=True
):
"""Construct a :class:`FeatureUnion` from the given transformers.
This is a shorthand for the :class:`FeatureUnion` constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, the feature names generated by `get_feature_names_out` will
include prefixes derived from the transformer names.
Returns
-------
f : FeatureUnion
A :class:`FeatureUnion` object for concatenating the results of multiple
transformer objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(
_name_estimators(transformers),
n_jobs=n_jobs,
verbose=verbose,
verbose_feature_names_out=verbose_feature_names_out,
)
| FeatureUnion |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/types.py | {
"start": 10513,
"end": 10828
} | class ____(TypedDict, Generic[ResponseT]):
"""State schema for the agent."""
messages: Required[Annotated[list[AnyMessage], add_messages]]
jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
| AgentState |
python | PyCQA__pylint | tests/functional/m/member/member_checks.py | {
"start": 294,
"end": 589
} | class ____:
"""provide some attributes and method"""
cattr = 4
def __init__(self):
self.attr = 4
def method(self, val):
"""impressive method"""
return self.attr * val
def hophop(self):
"""hop method"""
print('hop hop hop', self)
| Provider |
python | pytorch__pytorch | test/export/test_export.py | {
"start": 609495,
"end": 611297
} | class ____(torch.nn.Module):
def forward(self, x: "f32[2, 4]", y: "f32[4]"):
add: "f32[2, 4]" = torch.ops.aten.add.Tensor(x, y); x = None
hints_wrapper_body_graph_0 = self.hints_wrapper_body_graph_0
hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_graph_0, (add, y), {}, hints = {'outer_body': True}); hints_wrapper_body_graph_0 = add = y = None
getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None
return (getitem,)
class hints_wrapper_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "f32[2, 4]", arg1_1: "f32[4]"):
hints_wrapper_body_graph_0 = self.hints_wrapper_body_graph_0
hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_graph_0, (arg0_1, arg1_1), {}, hints = {'inner_body': True}); hints_wrapper_body_graph_0 = arg0_1 = arg1_1 = None
getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None
abs_1: "f32[2, 4]" = torch.ops.aten.abs.default(getitem); getitem = None
return (abs_1,)
class hints_wrapper_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "f32[2, 4]", arg1_1: "f32[4]"):
relu: "f32[2, 4]" = torch.ops.aten.relu.default(arg0_1); arg0_1 = None
add: "f32[2, 4]" = torch.ops.aten.add.Tensor(relu, arg1_1); relu = arg1_1 = None
return (add,)
""",
ignore_empty_lines=True,
)
ep = export(M(), (x, y), strict=strict).run_decompositions({})
export_res = ep.module()(x, y)
ref_res = M()(x, y)
self.assertEqual(export_res, ref_res)
self.assertExpectedInline(
normalize_gm(ep.graph_module.print_readable(print_output=False)),
"""\
| GraphModule |
python | fsspec__filesystem_spec | fsspec/implementations/arrow.py | {
"start": 855,
"end": 6549
} | class ____(AbstractFileSystem):
"""FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
Parameters
----------
fs : pyarrow.fs.FileSystem
"""
root_marker = "/"
def __init__(self, fs, **kwargs):
global PYARROW_VERSION
PYARROW_VERSION = get_package_version_without_import("pyarrow")
self.fs = fs
super().__init__(**kwargs)
@property
def protocol(self):
return self.fs.type_name
@cached_property
def fsid(self):
return "hdfs_" + tokenize(self.fs.host, self.fs.port)
@classmethod
def _strip_protocol(cls, path):
ops = infer_storage_options(path)
path = ops["path"]
if path.startswith("//"):
# special case for "hdfs://path" (without the triple slash)
path = path[1:]
return path
def ls(self, path, detail=False, **kwargs):
path = self._strip_protocol(path)
from pyarrow.fs import FileSelector
try:
entries = [
self._make_entry(entry)
for entry in self.fs.get_file_info(FileSelector(path))
]
except (FileNotFoundError, NotADirectoryError):
entries = [self.info(path, **kwargs)]
if detail:
return entries
else:
return [entry["name"] for entry in entries]
def info(self, path, **kwargs):
path = self._strip_protocol(path)
[info] = self.fs.get_file_info([path])
return self._make_entry(info)
def exists(self, path):
path = self._strip_protocol(path)
try:
self.info(path)
except FileNotFoundError:
return False
else:
return True
def _make_entry(self, info):
from pyarrow.fs import FileType
if info.type is FileType.Directory:
kind = "directory"
elif info.type is FileType.File:
kind = "file"
elif info.type is FileType.NotFound:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
else:
kind = "other"
return {
"name": info.path,
"size": info.size,
"type": kind,
"mtime": info.mtime,
}
@wrap_exceptions
def cp_file(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1).rstrip("/")
path2 = self._strip_protocol(path2).rstrip("/")
with self._open(path1, "rb") as lstream:
tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
try:
with self.open(tmp_fname, "wb") as rstream:
shutil.copyfileobj(lstream, rstream)
self.fs.move(tmp_fname, path2)
except BaseException:
with suppress(FileNotFoundError):
self.fs.delete_file(tmp_fname)
raise
@wrap_exceptions
def mv(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1).rstrip("/")
path2 = self._strip_protocol(path2).rstrip("/")
self.fs.move(path1, path2)
@wrap_exceptions
def rm_file(self, path):
path = self._strip_protocol(path)
self.fs.delete_file(path)
@wrap_exceptions
def rm(self, path, recursive=False, maxdepth=None):
path = self._strip_protocol(path).rstrip("/")
if self.isdir(path):
if recursive:
self.fs.delete_dir(path)
else:
raise ValueError("Can't delete directories without recursive=False")
else:
self.fs.delete_file(path)
@wrap_exceptions
def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
if mode == "rb":
if seekable:
method = self.fs.open_input_file
else:
method = self.fs.open_input_stream
elif mode == "wb":
method = self.fs.open_output_stream
elif mode == "ab":
method = self.fs.open_append_stream
else:
raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
_kwargs = {}
if mode != "rb" or not seekable:
if int(PYARROW_VERSION.split(".")[0]) >= 4:
# disable compression auto-detection
_kwargs["compression"] = None
stream = method(path, **_kwargs)
return ArrowFile(self, stream, path, mode, block_size, **kwargs)
@wrap_exceptions
def mkdir(self, path, create_parents=True, **kwargs):
path = self._strip_protocol(path)
if create_parents:
self.makedirs(path, exist_ok=True)
else:
self.fs.create_dir(path, recursive=False)
@wrap_exceptions
def makedirs(self, path, exist_ok=False):
path = self._strip_protocol(path)
self.fs.create_dir(path, recursive=True)
@wrap_exceptions
def rmdir(self, path):
path = self._strip_protocol(path)
self.fs.delete_dir(path)
@wrap_exceptions
def modified(self, path):
path = self._strip_protocol(path)
return self.fs.get_file_info(path).mtime
def cat_file(self, path, start=None, end=None, **kwargs):
kwargs.setdefault("seekable", start not in [None, 0])
return super().cat_file(path, start=None, end=None, **kwargs)
def get_file(self, rpath, lpath, **kwargs):
kwargs.setdefault("seekable", False)
super().get_file(rpath, lpath, **kwargs)
@mirror_from(
"stream",
[
"read",
"seek",
"tell",
"write",
"readable",
"writable",
"close",
"seekable",
],
)
| ArrowFSWrapper |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 56641,
"end": 57393
} | class ____(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
.. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
aliases = ['js+lasso', 'javascript+lasso']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+lasso',
'text/x-javascript+lasso',
'text/javascript+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer,
**options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
return rv
| LassoJavascriptLexer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 537792,
"end": 538135
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PullRequestReviewComment", graphql_name="node")
| PullRequestReviewCommentEdge |
python | Textualize__textual | docs/examples/guide/layout/combining_layouts.py | {
"start": 157,
"end": 1050
} | class ____(App):
CSS_PATH = "combining_layouts.tcss"
def compose(self) -> ComposeResult:
yield Header()
with Container(id="app-grid"):
with VerticalScroll(id="left-pane"):
for number in range(15):
yield Static(f"Vertical layout, child {number}")
with Horizontal(id="top-right"):
yield Static("Horizontally")
yield Static("Positioned")
yield Static("Children")
yield Static("Here")
with Container(id="bottom-right"):
yield Static("This")
yield Static("panel")
yield Static("is")
yield Static("using")
yield Static("grid layout!", id="bottom-right-final")
if __name__ == "__main__":
app = CombiningLayoutsExample()
app.run()
| CombiningLayoutsExample |
python | django__django | tests/handlers/tests_custom_error_handlers.py | {
"start": 212,
"end": 1096
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
# Response.content should be available in the middleware even with a
# TemplateResponse-based exception response.
assert response.content
return response
def template_response_error_handler(request, exception=None):
return TemplateResponse(request, "test_handler.html", status=403)
def permission_denied_view(request):
raise PermissionDenied
urlpatterns = [
path("", permission_denied_view),
]
handler403 = template_response_error_handler
@override_settings(ROOT_URLCONF="handlers.tests_custom_error_handlers")
@modify_settings(
MIDDLEWARE={
"append": "handlers.tests_custom_error_handlers.MiddlewareAccessingContent"
}
)
| MiddlewareAccessingContent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.