language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/metrics/multicolumn_map_metrics/compound_columns_unique.py
|
{
"start": 1263,
"end": 11691
}
|
class ____(MulticolumnMapMetricProvider):
"""
While the support for "PandasExecutionEngine" and "SparkDFExecutionEngine" is accomplished using a compact
implementation, which combines the "map" and "condition" parts in a single step, the support for
"SqlAlchemyExecutionEngine" is more detailed. Thus, the "map" and "condition" parts for "SqlAlchemyExecutionEngine"
are handled separately, with the "condition" part relying on the "map" part as a metric dependency.
""" # noqa: E501 # FIXME CoP
function_metric_name = "compound_columns.count" # pre-requisite "map" style metric
condition_metric_name = (
"compound_columns.unique" # "condition" style metric required to be implemented by provider
)
condition_domain_keys = (
"batch_id",
"table",
"column_list",
"row_condition",
"condition_parser",
"ignore_row_if",
)
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, **kwargs):
row_wise_cond = ~column_list.duplicated(keep=False)
return row_wise_cond
@multicolumn_function_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy_function(self, column_list, **kwargs):
"""
Computes the "map" between the specified "column_list" (treated as a group so as to model the "compound" aspect)
and the number of occurrences of every permutation of the values of "column_list" as the grouped subset of all
rows of the table. In the present context, the term "compound" refers to having to treat the specified columns
as unique together (e.g., as a multi-column primary key). For example, suppose that in the example below, all
three columns ("A", "B", and "C") of the table are included as part of the "compound" columns list (i.e.,
column_list = ["A", "B", "C"]):
A B C _num_rows
1 1 2 2
1 2 3 1
1 1 2 2
2 2 2 1
3 2 3 1
The fourth column, "_num_rows", holds the value of the "map" function -- the number of rows the group occurs in.
""" # noqa: E501 # FIXME CoP
# Needed as keys (hence, string valued) to access "ColumnElement" objects contained within the "FROM" clauses. # noqa: E501 # FIXME CoP
column_names = kwargs.get("_column_names")
# Need all columns of the table for the purposes of reporting entire rows satisfying unexpected condition logic. # noqa: E501 # FIXME CoP
table_columns = kwargs.get("_table_columns")
table = kwargs.get(
"_table"
) # Note that here, "table" is of the "sqlalchemy.sql.selectable.Subquery" type.
# Filipe - 20231114
# This is a special case that needs to be handled for mysql, where you cannot refer to a temp_table # noqa: E501 # FIXME CoP
# more than once in the same query. The solution to this is to perform our operation without the need # noqa: E501 # FIXME CoP
# for a sub query. We can do this by using the window function count, to get the number of duplicate # noqa: E501 # FIXME CoP
# rows by over partition by the compound unique columns. This will give a table which has the same # noqa: E501 # FIXME CoP
# number of rows as the original table, but with an additional column _num_rows column.
dialect = kwargs.get("_dialect")
try:
dialect_name = dialect.dialect.name
except AttributeError:
try:
dialect_name = dialect.name
except AttributeError:
dialect_name = ""
if dialect and dialect_name == "mysql":
table_columns_selector = [sa.column(column_name) for column_name in table_columns]
partition_by_columns = (
sa.func.count()
.over(partition_by=[sa.column(column) for column in column_names])
.label("_num_rows")
)
count_selector = table_columns_selector + [partition_by_columns]
original_table_clause = (
sa.select(*count_selector).select_from(table).alias("original_table_clause")
)
return original_table_clause
# Step-1: Obtain the SQLAlchemy "FromClause" version of the original "table" for the purposes of gaining the # noqa: E501 # FIXME CoP
# "FromClause.c" attribute, which is a namespace of all the columns contained within the "FROM" clause (these # noqa: E501 # FIXME CoP
# elements are themselves subclasses of the SQLAlchemy "ColumnElement" class).
table_columns_selector = [sa.column(column_name) for column_name in table_columns]
original_table_clause = (
sa.select(*table_columns_selector).select_from(table).alias("original_table_clause")
)
# Step-2: "SELECT FROM" the original table, represented by the "FromClause" object, querying all columns of the # noqa: E501 # FIXME CoP
# table and the count of occurrences of distinct "compound" (i.e., group, as specified by "column_list") values. # noqa: E501 # FIXME CoP
# Give this aggregated group count a distinctive label.
# Give the resulting sub-query a unique alias in order to disambiguate column names in subsequent queries. # noqa: E501 # FIXME CoP
count_selector = column_list + [sa.func.count().label("_num_rows")]
group_count_query = (
sa.select(*count_selector)
.group_by(*column_list)
.select_from(original_table_clause)
.alias("group_counts_subquery")
)
# The above "group_count_query", if executed, will produce the result set containing the number of rows that # noqa: E501 # FIXME CoP
# equals the number of distinct values of the group -- unique grouping (e.g., as in a multi-column primary key). # noqa: E501 # FIXME CoP
# Hence, in order for the "_num_rows" column values to provide an entry for each row of the original table, the # noqa: E501 # FIXME CoP
# "SELECT FROM" of "group_count_query" must undergo an "INNER JOIN" operation with the "original_table_clause" # noqa: E501 # FIXME CoP
# object, whereby all table columns in the two "FromClause" objects must match, respectively, as the conditions. # noqa: E501 # FIXME CoP
conditions = sa.and_(
*(group_count_query.c[name] == original_table_clause.c[name] for name in column_names)
)
# noinspection PyProtectedMember
compound_columns_count_query = (
sa.select(
original_table_clause,
group_count_query.c._num_rows.label("_num_rows"),
)
.select_from(
original_table_clause.join(
right=group_count_query, onclause=conditions, isouter=False
)
)
.alias("records_with_grouped_column_counts_subquery")
)
# The returned SQLAlchemy "FromClause" "compound_columns_count_query" object realizes the "map" metric function. # noqa: E501 # FIXME CoP
return compound_columns_count_query
@multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy_condition(cls, column_list, **kwargs):
"""
Retrieve the specified "map" metric dependency value as the "FromClause" "compound_columns_count_query" object
and extract from it -- using the supported SQLAlchemy column access method -- the "_num_rows" columns. The
uniqueness of "compound" columns (as a group) is expressed by the "BinaryExpression" "row_wise_cond" returned.
Importantly, since the "compound_columns_count_query" is the "FromClause" object that incorporates all columns
of the original table, no additional "FromClause" objects ("select_from") must augment this "condition" metric.
Other than boolean operations, column access, argument of filtering, and limiting the size of the result set,
this "row_wise_cond", serving as the main component of the unexpected condition logic, carries along with it
the entire object hierarchy, making any encapsulating query ready for execution against the database engine.
""" # noqa: E501 # FIXME CoP
metrics = kwargs.get("_metrics")
compound_columns_count_query, _, _ = metrics[
f"compound_columns.count.{MetricPartialFunctionTypeSuffixes.MAP.value}"
]
# noinspection PyProtectedMember
row_wise_cond = compound_columns_count_query.c._num_rows < 2 # noqa: PLR2004 # FIXME CoP
return row_wise_cond
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_list, **kwargs):
column_names = column_list.columns
row_wise_cond = (
F.count(F.lit(1)).over(pyspark.Window.partitionBy(F.struct(*column_names))) <= 1
)
return row_wise_cond
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying the metric types
and their respective domains.
""" # noqa: E501 # FIXME CoP
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(execution_engine, SqlAlchemyExecutionEngine):
if (
metric.metric_name
== f"compound_columns.unique.{MetricPartialFunctionTypeSuffixes.CONDITION.value}"
):
dependencies[
f"compound_columns.count.{MetricPartialFunctionTypeSuffixes.MAP.value}"
] = MetricConfiguration(
metric_name=f"compound_columns.count.{MetricPartialFunctionTypeSuffixes.MAP.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=None,
)
return dependencies
|
CompoundColumnsUnique
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/base.py
|
{
"start": 4084,
"end": 4813
}
|
class ____(MutationType):
"""
This case of VariableTracker.mutation_type marker indicates
1. Dynamo allows mutation on the value itself (rather than its attributes).
2. The value is created by the bytecode Dynamo is tracing through.
For instance, Dynamo could model a newly created list with this marker,
indicating that while we need to model mutations to this list, we don't have
to emit bytecode for these mutations if the list doesn't escape into the
Python world.
"""
def __init__(self) -> None:
super().__init__(SourceType.New)
def __hash__(self) -> int:
return id(self)
def __eq__(self, other: object) -> bool:
return self is other
|
ValueMutationNew
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
|
{
"start": 9089,
"end": 9187
}
|
class ____(AsyncIterator[int]):
def __aiter__(self: Self) -> Self:
...
|
GoodAsyncIterator
|
python
|
ray-project__ray
|
python/ray/cluster_utils.py
|
{
"start": 550,
"end": 4392
}
|
class ____:
"""Create a local autoscaling cluster for testing.
See test_autoscaler_fake_multinode.py for an end-to-end example.
"""
def __init__(
self,
head_resources: dict,
worker_node_types: dict,
autoscaler_v2: bool = False,
**config_kwargs,
):
"""Create the cluster.
Args:
head_resources: resources of the head node, including CPU.
worker_node_types: autoscaler node types config for worker nodes.
"""
self._head_resources = head_resources
self._config = self._generate_config(
head_resources,
worker_node_types,
autoscaler_v2=autoscaler_v2,
**config_kwargs,
)
self._autoscaler_v2 = autoscaler_v2
def _generate_config(
self, head_resources, worker_node_types, autoscaler_v2=False, **config_kwargs
):
base_config = yaml.safe_load(
open(
os.path.join(
os.path.dirname(ray.__file__),
"autoscaler/_private/fake_multi_node/example.yaml",
)
)
)
custom_config = copy.deepcopy(base_config)
custom_config["available_node_types"] = worker_node_types
custom_config["available_node_types"]["ray.head.default"] = {
"resources": head_resources,
"node_config": {},
"max_workers": 0,
}
# Autoscaler v2 specific configs
if autoscaler_v2:
custom_config["provider"]["launch_multiple"] = True
custom_config["provider"]["head_node_id"] = FAKE_HEAD_NODE_ID
custom_config.update(config_kwargs)
return custom_config
def start(self, _system_config=None, override_env: Optional[Dict] = None):
"""Start the cluster.
After this call returns, you can connect to the cluster with
ray.init("auto").
"""
subprocess.check_call(["ray", "stop", "--force"])
_, fake_config = tempfile.mkstemp()
with open(fake_config, "w") as f:
f.write(json.dumps(self._config))
cmd = [
"ray",
"start",
"--autoscaling-config={}".format(fake_config),
"--head",
]
if "CPU" in self._head_resources:
cmd.append("--num-cpus={}".format(self._head_resources.pop("CPU")))
if "GPU" in self._head_resources:
cmd.append("--num-gpus={}".format(self._head_resources.pop("GPU")))
if "object_store_memory" in self._head_resources:
cmd.append(
"--object-store-memory={}".format(
self._head_resources.pop("object_store_memory")
)
)
if self._head_resources:
cmd.append("--resources='{}'".format(json.dumps(self._head_resources)))
if _system_config is not None:
cmd.append(
"--system-config={}".format(
json.dumps(_system_config, separators=(",", ":"))
)
)
env = os.environ.copy()
env.update({"AUTOSCALER_UPDATE_INTERVAL_S": "1", "RAY_FAKE_CLUSTER": "1"})
if self._autoscaler_v2:
# Set the necessary environment variables for autoscaler v2.
env.update(
{
"RAY_enable_autoscaler_v2": "1",
"RAY_CLOUD_INSTANCE_ID": FAKE_HEAD_NODE_ID,
"RAY_OVERRIDE_NODE_ID_FOR_TESTING": FAKE_HEAD_NODE_ID,
}
)
if override_env:
env.update(override_env)
subprocess.check_call(cmd, env=env)
def shutdown(self):
"""Terminate the cluster."""
subprocess.check_call(["ray", "stop", "--force"])
@DeveloperAPI
|
AutoscalingCluster
|
python
|
apache__airflow
|
providers/papermill/src/airflow/providers/papermill/operators/papermill.py
|
{
"start": 1645,
"end": 5163
}
|
class ____(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters.
:param input_nb: input notebook, either path or NoteBook inlet.
:param output_nb: output notebook, either path or NoteBook outlet.
:param parameters: the notebook parameters to set
:param kernel_name: (optional) name of kernel to execute the notebook against
(ignores kernel name in the notebook document metadata)
"""
# TODO: Remove this when provider drops 2.x support.
supports_lineage = True
template_fields: Sequence[str] = (
"input_nb",
"output_nb",
"parameters",
"kernel_name",
"language_name",
"kernel_conn_id",
"nbconvert",
"nbconvert_args",
)
def __init__(
self,
*,
input_nb: str | NoteBook | None = None,
output_nb: str | NoteBook | None = None,
parameters: dict | None = None,
kernel_name: str | None = None,
language_name: str | None = None,
kernel_conn_id: str | None = None,
nbconvert: bool = False,
nbconvert_args: list[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.parameters = parameters
if not input_nb:
raise ValueError("Input notebook is not specified")
self.input_nb = input_nb
if not output_nb:
raise ValueError("Output notebook is not specified")
self.output_nb = output_nb
self.kernel_name = kernel_name
self.language_name = language_name
self.kernel_conn_id = kernel_conn_id
self.nbconvert = nbconvert
self.nbconvert_args = nbconvert_args
def execute(self, context: Context):
if not isinstance(self.input_nb, NoteBook):
self.input_nb = NoteBook(url=self.input_nb, parameters=self.parameters)
if not isinstance(self.output_nb, NoteBook):
self.output_nb = NoteBook(url=self.output_nb)
if not AIRFLOW_V_3_0_PLUS:
self.inlets.append(self.input_nb)
self.outlets.append(self.output_nb)
remote_kernel_kwargs = {}
kernel_hook = self.hook
if kernel_hook:
engine_name = REMOTE_KERNEL_ENGINE
kernel_connection = kernel_hook.get_conn()
remote_kernel_kwargs = {
"kernel_ip": kernel_connection.ip,
"kernel_shell_port": kernel_connection.shell_port,
"kernel_iopub_port": kernel_connection.iopub_port,
"kernel_stdin_port": kernel_connection.stdin_port,
"kernel_control_port": kernel_connection.control_port,
"kernel_hb_port": kernel_connection.hb_port,
"kernel_session_key": kernel_connection.session_key,
}
else:
engine_name = None
pm.execute_notebook(
self.input_nb.url,
self.output_nb.url,
parameters=self.input_nb.parameters,
progress_bar=False,
report_mode=True,
kernel_name=self.kernel_name,
language=self.language_name,
engine_name=engine_name,
**remote_kernel_kwargs,
)
return self.output_nb
@cached_property
def hook(self) -> KernelHook | None:
"""Get valid hook."""
if self.kernel_conn_id:
return KernelHook(kernel_conn_id=self.kernel_conn_id)
return None
|
PapermillOperator
|
python
|
ray-project__ray
|
python/ray/tests/test_placement_group_5.py
|
{
"start": 10010,
"end": 18881
}
|
class ____(RuntimeEnvPlugin):
name = MyPlugin
async def create(
self,
uri,
runtime_env,
ctx,
logger, # noqa: F821
) -> float:
await asyncio.sleep(PLUGIN_TIMEOUT)
@staticmethod
def validate(runtime_env_dict: dict) -> str:
return 1
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + MY_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_placement_group_leaks(set_runtime_env_plugins, shutdown_only):
"""Handles https://github.com/ray-project/ray/pull/42942
Handle an edge case where if a task is scheduled & worker is not
started before pg is removed, it leaks.
"""
ray.init(num_cpus=1, _system_config={"prestart_worker_first_driver": False})
@ray.remote
class Actor:
pass
@ray.remote
def f():
pass
pg = ray.util.placement_group(bundles=[{"CPU": 1}])
actor = Actor.options( # noqa
num_cpus=1,
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg,
),
runtime_env={MyPlugin: {"name": "f2"}},
).remote()
# The race condition is triggered
# if scheduling succeeds, but a worker is not started.
# So we should make sure to wait until actor is scheduled.
# Since there's no API to get that timing, we just wait sufficient time.
time.sleep(PLUGIN_TIMEOUT // 2)
# Verify pg resources are created.
def verify_pg_resources_created():
r_keys = ray.available_resources().keys()
return any("group" in k for k in r_keys)
wait_for_condition(verify_pg_resources_created)
ray.util.remove_placement_group(pg)
wait_for_condition(lambda: list_placement_groups()[0].state == "REMOVED")
# Verify pg resources are cleaned up.
def verify_pg_resources_cleaned():
r_keys = ray.available_resources().keys()
return all("group" not in k for k in r_keys)
wait_for_condition(verify_pg_resources_cleaned, timeout=30)
# Verify an actor is killed properly.
def verify_actor_killed():
state = list_actors()[0].state
return state == "DEAD"
wait_for_condition(verify_actor_killed)
def test_placement_group_strict_pack_soft_target_node_id(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=8, resources={"head": 1})
cluster.wait_for_nodes()
ray.init(address=cluster.address)
cluster.add_node(num_cpus=2, resources={"worker1": 1})
worker2_node = cluster.add_node(num_cpus=4, resources={"worker2": 1})
cluster.wait_for_nodes()
@ray.remote
def get_node_id():
return ray.get_runtime_context().get_node_id()
head_node_id = ray.get(get_node_id.options(resources={"head": 1}).remote())
worker1_node_id = ray.get(get_node_id.options(resources={"worker1": 1}).remote())
worker2_node_id = ray.get(get_node_id.options(resources={"worker2": 1}).remote())
# soft_target_node_id only works with STRICT_PACK
with pytest.raises(ValueError):
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}],
strategy="PACK",
_soft_target_node_id=ray.NodeID.from_random().hex(),
)
# Invalid target node id
with pytest.raises(ValueError):
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}], strategy="PACK", _soft_target_node_id="a"
)
# No target node.
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}], strategy="STRICT_PACK"
)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 10)
assert (
ray.get(
get_node_id.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
== head_node_id
)
ray.util.remove_placement_group(pg)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 14)
# Target node doesn't have enough available resources.
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}],
strategy="STRICT_PACK",
_soft_target_node_id=worker1_node_id,
)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 10)
assert (
ray.get(
get_node_id.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
== head_node_id
)
ray.util.remove_placement_group(pg)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 14)
# Target node doesn't exist.
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}],
strategy="STRICT_PACK",
_soft_target_node_id=ray.NodeID.from_random().hex(),
)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 10)
assert (
ray.get(
get_node_id.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
== head_node_id
)
ray.util.remove_placement_group(pg)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 14)
# Target node has enough available resources.
pg = ray.util.placement_group(
bundles=[{"CPU": 2}, {"CPU": 2}],
strategy="STRICT_PACK",
_soft_target_node_id=worker2_node_id,
)
wait_for_condition(lambda: ray.available_resources()["CPU"] == 10)
assert (
ray.get(
get_node_id.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
== worker2_node_id
)
# After target node dies, the pg can be recovered elsewhere.
cluster.remove_node(worker2_node)
cluster.wait_for_nodes()
assert (
ray.get(
get_node_id.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
== head_node_id
)
def test_remove_placement_group_with_pending_worker_lease_waiting_for_pg_resource(
shutdown_only,
):
"""
Test removing a pg with a pending worker lease request acquiring the pg resources.
details: https://github.com/ray-project/ray/issues/51124
Specific test steps:
1. Create a placement group with only 1 bundle.
2. Create two actors using the aforementioned pg. At this point,
the latter actor lease request will definitely be pending in local lease manager leases_to_grant queue due to
unavailable pg bundle resources.
3. Remove the pg while the latter actor lease request is pending.
4. Verify that the pending actor lease request is cancelled and the pg
is removed successfully.
"""
context = ray.init(num_cpus=1)
prom_address = build_address(
context.address_info["node_ip_address"],
context.address_info["metrics_export_port"],
)
pg = ray.util.placement_group(
[{"CPU": 1}],
)
@ray.remote(
num_cpus=1,
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=0
),
)
class Actor:
def ping(self):
pass
actor1 = Actor.remote()
# Actor1 is scheduled and used all the PG resources.
ray.get(actor1.ping.remote())
actor2 = Actor.remote()
def wait_for_actor2_added_to_dispatch_queue():
metrics = fetch_prometheus_metrics([prom_address])
samples = metrics.get("ray_scheduler_tasks", None)
if samples is None:
return False
for sample in samples:
if sample.labels["State"] == "Dispatched" and sample.value == 1:
# actor2 is in the local lease manager leases_to_grant queue
return True
return False
wait_for_condition(wait_for_actor2_added_to_dispatch_queue, timeout=30)
ray.util.remove_placement_group(pg)
def check_pg_removed():
pgs = list_placement_groups()
assert len(pgs) == 1
assert "REMOVED" == pgs[0].state
return True
wait_for_condition(check_pg_removed)
# Actors should be dead due to the pg removal.
def check_actor_dead():
actors = list_actors()
assert len(actors) == 2
assert [actors[0].state, actors[1].state] == ["DEAD", "DEAD"]
return True
wait_for_condition(check_actor_dead)
# Actor2 should be cancelled due to the pg removal.
with pytest.raises(ray.exceptions.ActorUnschedulableError):
ray.get(actor2.ping.remote())
# Check that the raylet is still running.
@ray.remote
def task():
return 1
assert ray.get(task.remote()) == 1
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
HangPlugin
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/bound_method.py
|
{
"start": 0,
"end": 107
}
|
class ____:
def method(self):
"""Method docstring"""
pass
bound_method = Cls().method
|
Cls
|
python
|
google__jax
|
tests/custom_api_test.py
|
{
"start": 137684,
"end": 139075
}
|
class ____(jtu.JaxTestCase):
"""Test interactions among the custom_{vmap,jvp,vjp,transpose,*} APIs"""
def test_method_forwarding(self):
@jax.custom_batching.custom_vmap
@jax.custom_jvp
@jax.custom_transpose.custom_transpose
def f(x): return 2. * x
# none of these err:
@f.def_vmap
def f_batch(sz, b, xs): return 2. * xs
@f.defjvp
def f_jvp(x, tx): return 2. * x, 2. * tx
@f.def_transpose
def f_transpose(x): return 2. * x
def test_def_method_forwarding_all_permutations(self):
for wraps in it.permutations([
jax.custom_jvp, jax.custom_transpose.custom_transpose, jax.custom_batching.custom_vmap]):
f = lambda x: x + 1.
for wrap in wraps:
f = wrap(f)
for methods in it.permutations(['defjvp', 'def_vmap', 'def_transpose']):
for method in methods:
self.assertIsInstance(getattr(f, method), Callable)
for decorators in it.permutations([
jax.custom_vjp, jax.custom_transpose.custom_transpose, jax.custom_batching.custom_vmap]):
f = lambda x: x + 1.
for decorator in decorators:
f = decorator(f)
for methods in it.permutations(['defvjp', 'def_vmap', 'def_transpose']):
for method in methods:
self.assertIsInstance(getattr(f, method), Callable)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
CustomApiTest
|
python
|
nedbat__coveragepy
|
coverage/jsonreport.py
|
{
"start": 908,
"end": 7333
}
|
class ____:
"""A reporter for writing JSON coverage results."""
report_type = "JSON report"
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.total = Numbers(self.config.precision)
self.report_data: JsonObj = {}
def make_summary(self, nums: Numbers) -> JsonObj:
"""Create a dict summarizing `nums`."""
return {
"covered_lines": nums.n_executed,
"num_statements": nums.n_statements,
"percent_covered": nums.pc_covered,
"percent_covered_display": nums.pc_covered_str,
"missing_lines": nums.n_missing,
"excluded_lines": nums.n_excluded,
"percent_statements_covered": nums.pc_statements,
"percent_statements_covered_display": nums.pc_statements_str,
}
def make_branch_summary(self, nums: Numbers) -> JsonObj:
"""Create a dict summarizing the branch info in `nums`."""
return {
"num_branches": nums.n_branches,
"num_partial_branches": nums.n_partial_branches,
"covered_branches": nums.n_executed_branches,
"missing_branches": nums.n_missing_branches,
"percent_branches_covered": nums.pc_branches,
"percent_branches_covered_display": nums.pc_branches_str,
}
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Generate a json report for `morfs`.
`morfs` is a list of modules or file names.
`outfile` is a file object to write the json to.
"""
outfile = outfile or sys.stdout
coverage_data = self.coverage.get_data()
coverage_data.set_query_contexts(self.config.report_contexts)
self.report_data["meta"] = {
"format": FORMAT_VERSION,
"version": __version__,
"timestamp": datetime.datetime.now().isoformat(),
"branch_coverage": coverage_data.has_arcs(),
"show_contexts": self.config.json_show_contexts,
}
measured_files = {}
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
measured_files[file_reporter.relative_filename()] = self.report_one_file(
coverage_data,
analysis,
file_reporter,
)
self.report_data["files"] = measured_files
self.report_data["totals"] = self.make_summary(self.total)
if coverage_data.has_arcs():
self.report_data["totals"].update(self.make_branch_summary(self.total))
json.dump(
self.report_data,
outfile,
indent=(4 if self.config.json_pretty_print else None),
)
return self.total.n_statements and self.total.pc_covered
def report_one_file(
self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter
) -> JsonObj:
"""Extract the relevant report data for a single file."""
nums = analysis.numbers
self.total += nums
summary = self.make_summary(nums)
reported_file: JsonObj = {
"executed_lines": sorted(analysis.executed),
"summary": summary,
"missing_lines": sorted(analysis.missing),
"excluded_lines": sorted(analysis.excluded),
}
if self.config.json_show_contexts:
reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename)
if coverage_data.has_arcs():
summary.update(self.make_branch_summary(nums))
reported_file["executed_branches"] = list(
_convert_branch_arcs(analysis.executed_branch_arcs()),
)
reported_file["missing_branches"] = list(
_convert_branch_arcs(analysis.missing_branch_arcs()),
)
num_lines = len(file_reporter.source().splitlines())
regions = file_reporter.code_regions()
for noun, plural in file_reporter.code_region_kinds():
outside_lines = set(range(1, num_lines + 1))
for region in regions:
if region.kind != noun:
continue
outside_lines -= region.lines
narrower = AnalysisNarrower(analysis)
narrower.add_regions(r.lines for r in regions if r.kind == noun)
narrower.add_regions([outside_lines])
reported_file[plural] = region_data = {}
for region in regions:
if region.kind != noun:
continue
region_data[region.name] = self.make_region_data(
coverage_data,
narrower.narrow(region.lines),
)
region_data[""] = self.make_region_data(
coverage_data,
narrower.narrow(outside_lines),
)
return reported_file
def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj:
"""Create the data object for one region of a file."""
narrowed_nums = narrowed_analysis.numbers
narrowed_summary = self.make_summary(narrowed_nums)
this_region = {
"executed_lines": sorted(narrowed_analysis.executed),
"summary": narrowed_summary,
"missing_lines": sorted(narrowed_analysis.missing),
"excluded_lines": sorted(narrowed_analysis.excluded),
}
if self.config.json_show_contexts:
contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename)
this_region["contexts"] = contexts
if coverage_data.has_arcs():
narrowed_summary.update(self.make_branch_summary(narrowed_nums))
this_region["executed_branches"] = list(
_convert_branch_arcs(narrowed_analysis.executed_branch_arcs()),
)
this_region["missing_branches"] = list(
_convert_branch_arcs(narrowed_analysis.missing_branch_arcs()),
)
return this_region
def _convert_branch_arcs(
branch_arcs: dict[TLineNo, list[TLineNo]],
) -> Iterable[tuple[TLineNo, TLineNo]]:
"""Convert branch arcs to a list of two-element tuples."""
for source, targets in branch_arcs.items():
for target in targets:
yield source, target
|
JsonReporter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/tpu_strategy.py
|
{
"start": 9684,
"end": 28407
}
|
class ____(distribute_lib.Strategy):
"""Synchronous training on TPUs and TPU Pods.
To construct a TPUStrategy object, you need to run the
initialization code as below:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
While using distribution strategies, the variables created within the
strategy's scope will be replicated across all the replicas and can be kept in
sync using all-reduce algorithms.
To run TF2 programs on TPUs, you can either use `.compile` and
`.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized
training loop by calling `strategy.run` directly. Note that
TPUStrategy doesn't support pure eager execution, so please make sure the
function passed into `strategy.run` is a `tf.function` or
`strategy.run` is called inside a `tf.function` if eager
behavior is enabled. See more details in https://www.tensorflow.org/guide/tpu.
`distribute_datasets_from_function` and
`experimental_distribute_dataset` APIs can be used to distribute the dataset
across the TPU workers when writing your own training loop. If you are using
`fit` and `compile` methods available in `tf.keras.Model`, then Keras will
handle the distribution for you.
An example of writing customized training loop on TPUs:
>>> with strategy.scope():
... model = tf.keras.Sequential([
... tf.keras.layers.Dense(2, input_shape=(5,)),
... ])
... optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
>>> def dataset_fn(ctx):
... x = np.random.random((2, 5)).astype(np.float32)
... y = np.random.randint(2, size=(2, 1))
... dataset = tf.data.Dataset.from_tensor_slices((x, y))
... return dataset.repeat().batch(1, drop_remainder=True)
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> @tf.function()
... def train_step(iterator):
...
... def step_fn(inputs):
... features, labels = inputs
... with tf.GradientTape() as tape:
... logits = model(features, training=True)
... loss = tf.keras.losses.sparse_categorical_crossentropy(
... labels, logits)
...
... grads = tape.gradient(loss, model.trainable_variables)
... optimizer.apply_gradients(zip(grads, model.trainable_variables))
...
... strategy.run(step_fn, args=(next(iterator),))
>>> train_step(iterator)
For the advanced use cases like model parallelism, you can set
`experimental_device_assignment` argument when creating TPUStrategy to specify
number of replicas and number of logical devices. Below is an example to
initialize TPU system with 2 logical devices and 1 replica.
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> topology = tf.tpu.experimental.initialize_tpu_system(resolver)
>>> device_assignment = tf.tpu.experimental.DeviceAssignment.build(
... topology,
... computation_shape=[1, 1, 1, 2],
... num_replicas=1)
>>> strategy = tf.distribute.TPUStrategy(
... resolver, experimental_device_assignment=device_assignment)
Then you can run a `tf.add` operation only on logical device 0.
>>> @tf.function()
... def step_fn(inputs):
... features, _ = inputs
... output = tf.add(features, features)
...
... # Add operation will be executed on logical device 0.
... output = strategy.experimental_assign_to_logical_device(output, 0)
... return output
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> strategy.run(step_fn, args=(next(iterator),))
`experimental_spmd_xla_partitioning` enables the experimental XLA SPMD feature
for model parallelism. This flag can reduce the compilation time and HBM
requirements. When running in this mode, every input tensor must either be
partitioned (via `strategy.experimental_split_to_logical_devices`) or fully
replicated (via `strategy.experimental_replicate_to_logical_devices`) to all
logical devices. And calling `strategy.experimental_assign_to_logical_device`
will result in a ValueError in this mode.
"""
def __init__(self,
tpu_cluster_resolver=None,
experimental_device_assignment=None,
experimental_spmd_xla_partitioning=False):
"""Synchronous training in TPU donuts or Pods.
Args:
tpu_cluster_resolver: A
`tf.distribute.cluster_resolver.TPUClusterResolver` instance, which
provides information about the TPU cluster. If None, it will assume
running on a local TPU worker.
experimental_device_assignment: Optional
`tf.tpu.experimental.DeviceAssignment` to specify the placement of
replicas on the TPU cluster.
experimental_spmd_xla_partitioning: If True, enable the SPMD (Single
Program Multiple Data) mode in XLA compiler. This flag only affects the
performance of XLA compilation and the HBM requirement of the compiled
TPU program. Ceveat: if this flag is True, calling
`tf.distribute.TPUStrategy.experimental_assign_to_logical_device` will
result in a ValueError.
"""
super().__init__(
TPUExtended(
self,
tpu_cluster_resolver,
device_assignment=experimental_device_assignment,
use_spmd_for_xla_partitioning=experimental_spmd_xla_partitioning,
)
)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
def run(self, fn, args=(), kwargs=None, options=None):
"""Run the computation defined by `fn` on each TPU replica.
Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
`tf.distribute.DistributedValues`, such as those produced by a
`tf.distribute.DistributedDataset` from
`tf.distribute.Strategy.experimental_distribute_dataset` or
`tf.distribute.Strategy.distribute_datasets_from_function`,
when `fn` is executed on a particular replica, it will be executed with the
component of `tf.distribute.DistributedValues` that correspond to that
replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `all_reduce`.
All arguments in `args` or `kwargs` should either be nest of tensors or
`tf.distribute.DistributedValues` containing tensors or composite tensors.
Example usage:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
>>> @tf.function
... def run():
... def value_fn(value_context):
... return value_context.num_replicas_in_sync
... distributed_values = (
... strategy.experimental_distribute_values_from_function(value_fn))
... def replica_fn(input):
... return input * 2
... return strategy.run(replica_fn, args=(distributed_values,))
>>> result = run()
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `tf.distribute.DistributedValues`, `Tensor`
objects, or `Tensor`s (for example, if running on a single replica).
"""
validate_run_function(fn)
fn, args, kwargs = _maybe_partial_apply_variables(fn, args, kwargs)
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
`tf.distribute.TPUStrategy` provides the associated
`tf.distribute.cluster_resolver.ClusterResolver`. If the user provides one
in `__init__`, that instance is returned; if the user does not, a default
`tf.distribute.cluster_resolver.TPUClusterResolver` is provided.
"""
return self.extended._tpu_cluster_resolver # pylint: disable=protected-access
def experimental_assign_to_logical_device(self, tensor, logical_device_id):
"""Adds annotation that `tensor` will be assigned to a logical device.
This adds an annotation to `tensor` specifying that operations on
`tensor` will be invoked on logical core device id `logical_device_id`.
When model parallelism is used, the default behavior is that all ops
are placed on zero-th logical device.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
output = tf.add(inputs, inputs)
# Add operation will be executed on logical device 0.
output = strategy.experimental_assign_to_logical_device(output, 0)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
logical_device_id: Id of the logical core to which the tensor will be
assigned.
Raises:
ValueError: The logical device id presented is not consistent with total
number of partitions specified by the device assignment or the TPUStrategy
is constructed with `experimental_spmd_xla_partitioning=True`.
Returns:
Annotated tensor with identical value as `tensor`.
"""
if self.extended._use_spmd_for_xla_partitioning: # pylint: disable=protected-access
raise ValueError(
"Cannot assign a tensor to a logical device in SPMD mode. To disable "
"SPMD, Please construct the TPUStrategy with "
"`experimental_spmd_xla_partitioning=False`")
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
if (logical_device_id < 0 or
logical_device_id >= num_logical_devices_per_replica):
raise ValueError("`logical_core_id` to assign must be lower then total "
"number of logical devices per replica. Received "
"logical device id {} but there are only total of {} "
"logical devices in replica.".format(
logical_device_id, num_logical_devices_per_replica))
return xla_sharding.assign_device(
tensor, logical_device_id, use_sharding_op=True)
def experimental_split_to_logical_devices(self, tensor, partition_dimensions):
"""Adds annotation that `tensor` will be split across logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be split among multiple logical devices. Tensor `tensor` will
be split across dimensions specified by `partition_dimensions`.
The dimensions of `tensor` must be divisible by corresponding value in
`partition_dimensions`.
For example, for system with 8 logical devices, if `tensor` is an image
tensor with shape (batch_size, width, height, channel) and
`partition_dimensions` is [1, 2, 4, 1], then `tensor` will be split
2 in width dimension and 4 way in height dimension and the split
tensor values will be fed into 8 logical devices.
```python
# Initializing TPU system with 8 logical devices and 1 replica.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 2, 2, 2],
num_replicas=1)
# Construct the TPUStrategy. Since we are going to split the image across
# logical devices, here we set `experimental_spmd_xla_partitioning=True`
# so that the partitioning can be compiled in SPMD mode, which usually
# results in faster compilation and smaller HBM requirement if the size of
# input and activation tensors are much bigger than that of the model
# parameters. Note that this flag is suggested but not a hard requirement
# for `experimental_split_to_logical_devices`.
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment,
experimental_spmd_xla_partitioning=True)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
inputs = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
partition_dimensions: An unnested list of integers with the size equal to
rank of `tensor` specifying how `tensor` will be partitioned. The
product of all elements in `partition_dimensions` must be equal to the
total number of logical devices per replica.
Raises:
ValueError: 1) If the size of partition_dimensions does not equal to rank
of `tensor` or 2) if product of elements of `partition_dimensions` does
not match the number of logical devices per replica defined by the
implementing DistributionStrategy's device specification or
3) if a known size of `tensor` is not divisible by corresponding
value in `partition_dimensions`.
Returns:
Annotated tensor with identical value as `tensor`.
"""
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
num_partition_splits = np.prod(partition_dimensions)
input_shape = tensor.shape
tensor_rank = len(input_shape)
if tensor_rank != len(partition_dimensions):
raise ValueError("Length of `partition_dimensions` must equal to the "
"rank of `tensor.shape` ({}). Received "
"len(partition_dimensions)={}.".format(
tensor_rank, len(partition_dimensions)))
for dim_index, dim_size in enumerate(input_shape):
if dim_size is None:
continue
split_size = partition_dimensions[dim_index]
if dim_size % split_size != 0:
raise ValueError("Tensor shape at `partition_dimensions[{}]` must be "
"divisible by corresponding value specified "
"by `partition_dimensions` ({}). Received: {}.".format(
dim_index, split_size, dim_size))
if num_partition_splits != num_logical_devices_per_replica:
raise ValueError(
"The product of `partition_dimensions` should be the same as the "
"number of logical devices (={}). Received `partition_dimensions`={},"
"and their product is {}.".format(num_logical_devices_per_replica,
partition_dimensions,
num_partition_splits))
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def experimental_replicate_to_logical_devices(self, tensor):
"""Adds annotation that `tensor` will be replicated to all logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be invoked on all logical devices.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
images, labels = inputs
images = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
# For loss calculation, all logical devices share the same logits
# and labels.
labels = strategy.experimental_replicate_to_logical_devices(labels)
output = strategy.experimental_replicate_to_logical_devices(output)
loss = loss_fn(labels, output)
return loss
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
Returns:
Annotated tensor with identical value as `tensor`.
"""
return xla_sharding.replicate(tensor, use_sharding_op=True)
@tf_export.tf_export("distribute.experimental.TPUStrategy", v1=[])
@deprecation.deprecated_endpoints("distribute.experimental.TPUStrategy")
|
TPUStrategyV2
|
python
|
django__django
|
tests/check_framework/test_security.py
|
{
"start": 13542,
"end": 14432
}
|
class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(base.check_ssl_redirect(None), [base.W008])
@override_settings(MIDDLEWARE=[], SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middleware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(base.check_ssl_redirect(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True,
)
def test_with_ssl_redirect(self):
self.assertEqual(base.check_ssl_redirect(None), [])
|
CheckSSLRedirectTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/dataviews.py
|
{
"start": 141046,
"end": 144171
}
|
class ____(Response):
"""
Response of dataviews.publish_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "dataviews"
_action = "publish_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
"published": {
"description": "Indicates whether the dataview was published",
"type": "boolean",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, succeeded=None, failed=None, **kwargs):
super(PublishManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self):
return self._property_succeeded
@succeeded.setter
def succeeded(self, value):
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self):
return self._property_failed
@failed.setter
def failed(self, value):
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
|
PublishManyResponse
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_concurrency_limits.py
|
{
"start": 4213,
"end": 12510
}
|
class ____:
@pytest.fixture
async def tags_with_limits(
self,
client: AsyncClient,
) -> List[str]:
tags = ["tag1", "tag2"]
for tag in tags:
await client.post(
"/concurrency_limits/",
json=ConcurrencyLimitCreate(
tag=tag,
concurrency_limit=2,
).model_dump(mode="json"),
)
return tags
async def test_acquiring_and_releasing_limits(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = tags_with_limits + ["does-not-exist"]
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
for tag in tags_with_limits:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert concurrency_limit.active_slots == [task_run_id]
response = await client.post(
"/concurrency_limits/decrement",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
# confirm the slots have been released
for tag in tags_with_limits:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert concurrency_limit.active_slots == []
async def test_failing_to_acquire_one_slot(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = tags_with_limits + ["does-not-exist"]
# Acquire the slots by two random other task runs
for i in range(2):
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags_with_limits[:1], "task_run_id": str(uuid4())},
)
assert response.status_code == status.HTTP_200_OK
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_423_LOCKED
assert "Retry-After" in response.headers
assert (
0.0
<= float(response.headers["Retry-After"])
<= PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS.value() * 2
)
assert (
"Concurrency limit for the tag1 tag has been reached"
in response.json()["detail"]
)
for tag in tags_with_limits:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert task_run_id not in concurrency_limit.active_slots
response = await client.post(
"/concurrency_limits/decrement",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
# confirm the slots have been released
for tag in tags_with_limits:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert task_run_id not in concurrency_limit.active_slots
@pytest.fixture
async def tag_with_zero_concurrency(
self,
client: AsyncClient,
) -> str:
await client.post(
"/concurrency_limits/",
json=ConcurrencyLimitCreate(
tag="zero",
concurrency_limit=0,
).model_dump(mode="json"),
)
return "zero"
async def test_setting_tag_to_zero_concurrency(
self,
client: AsyncClient,
tags_with_limits: List[str],
tag_with_zero_concurrency: str,
):
task_run_id = uuid4()
tags = tags_with_limits + [tag_with_zero_concurrency] + ["does-not-exist"]
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_423_LOCKED
assert "Retry-After" not in response.headers
assert (
'The concurrency limit on tag "zero" is 0 and will deadlock if the task '
"tries to run again." in response.json()["detail"]
)
for tag in tags_with_limits + [tag_with_zero_concurrency]:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert concurrency_limit.active_slots == []
response = await client.post(
"/concurrency_limits/decrement",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
# confirm the slots have been released
for tag in tags_with_limits + [tag_with_zero_concurrency]:
read_response = await client.get(f"/concurrency_limits/tag/{tag}")
concurrency_limit = schemas.core.ConcurrencyLimit.model_validate(
read_response.json()
)
assert concurrency_limit.active_slots == []
async def test_acquiring_returns_limits(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = tags_with_limits + ["does-not-exist"]
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
limits = [
MinimalConcurrencyLimitResponse.model_validate(limit)
for limit in response.json()
]
assert len(limits) == 2 # ignores tags that don't exist
async def test_releasing_returns_limits(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = tags_with_limits + ["does-not-exist"]
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
response = await client.post(
"/concurrency_limits/decrement",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
limits = [
MinimalConcurrencyLimitResponse.model_validate(limit)
for limit in response.json()
]
assert len(limits) == 2 # ignores tags that don't exist
async def test_acquiring_returns_empty_list_if_no_limits(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = ["does-not-exist"]
response = await client.post(
"/concurrency_limits/increment",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == []
async def test_releasing_returns_empty_list_if_no_limits(
self,
client: AsyncClient,
tags_with_limits: List[str],
):
task_run_id = uuid4()
tags = ["does-not-exist"]
response = await client.post(
"/concurrency_limits/decrement",
json={"names": tags, "task_run_id": str(task_run_id)},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == []
|
TestAcquiringAndReleasing
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_crc32.py
|
{
"start": 469,
"end": 1566
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_crc32"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def matches_crc32_regex(x):
return bool(re.match(CRC32_REGEX, str(x)))
return column.apply(lambda x: matches_crc32_regex(x) if x else False)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidCrc32
|
python
|
explosion__spaCy
|
spacy/schemas.py
|
{
"start": 6529,
"end": 8362
}
|
class ____(BaseModel):
REGEX: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="regex")
IN: Optional[List[StrictStr]] = Field(None, alias="in")
NOT_IN: Optional[List[StrictStr]] = Field(None, alias="not_in")
IS_SUBSET: Optional[List[StrictStr]] = Field(None, alias="is_subset")
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy1"
)
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy2"
)
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy3"
)
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy4"
)
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy5"
)
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy6"
)
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy7"
)
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy8"
)
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy9"
)
class Config:
extra = "forbid"
allow_population_by_field_name = True # allow alias and field name
@validator("*", pre=True, each_item=True, allow_reuse=True)
def raise_for_none(cls, v):
if v is None:
raise ValueError("None / null is not allowed")
return v
|
TokenPatternString
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/models/test_data_condition.py
|
{
"start": 1794,
"end": 5111
}
|
class ____(DataConditionHandlerMixin, BaseWorkflowTest):
def test(self) -> None:
dc = self.create_data_condition(
type=Condition.GREATER, comparison=1.0, condition_result=DetectorPriorityLevel.HIGH
)
assert dc.evaluate_value(2) == DetectorPriorityLevel.HIGH
assert dc.evaluate_value(1) is None
def test_dict_comparison_result(self) -> None:
def evaluate_value(
value: int, comparison: dict[str, DetectorPriorityLevel]
) -> DetectorPriorityLevel:
return (
DetectorPriorityLevel.HIGH
if comparison["baz"].value > 1
else DetectorPriorityLevel.OK
)
dc = self.setup_condition_mocks(
evaluate_value, ["sentry.workflow_engine.models.data_condition"]
)
dc.update(comparison={"baz": MockDataConditionEnum.BAR})
assert dc.evaluate_value(2) == DetectorPriorityLevel.HIGH
dc.update(comparison={"baz": MockDataConditionEnum.FOO})
result = dc.evaluate_value(0)
assert result == DetectorPriorityLevel.OK
self.teardown_condition_mocks()
def test_bad_condition(self) -> None:
with pytest.raises(ValueError):
# Raises ValueError because the condition is invalid
self.create_data_condition(
type="invalid", comparison=1.0, condition_result=DetectorPriorityLevel.HIGH
)
def test_bad_comparison(self) -> None:
dc = self.create_data_condition(
type=Condition.GREATER, comparison="hi", condition_result=DetectorPriorityLevel.HIGH
)
# Raises a TypeError because str vs int comparison
with mock.patch("sentry.workflow_engine.models.data_condition.logger") as mock_logger:
dc.evaluate_value(2)
assert mock_logger.exception.call_args[0][0] == "Invalid comparison for data condition"
def test_condition_result_comparison_fails(self) -> None:
dc = self.create_data_condition(
type=Condition.GREATER, comparison=1.0, condition_result="wrong"
)
assert dc.evaluate_value(2) == ConditionError(msg="Invalid condition result")
def test_condition_evaluation__data_condition_exception(self) -> None:
def evaluate_value(value: int, comparison: int) -> bool:
raise DataConditionEvaluationException("A known error occurred")
dc = self.setup_condition_mocks(
evaluate_value, ["sentry.workflow_engine.models.data_condition"]
)
with mock.patch("sentry.workflow_engine.models.data_condition.logger.info") as mock_logger:
dc.evaluate_value(2)
assert (
mock_logger.call_args[0][0]
== "A known error occurred while evaluating a data condition"
)
self.teardown_condition_mocks()
def test_condition_evaluation___exception(self) -> None:
def evaluate_value(value: int, comparison: int) -> bool:
raise Exception("Something went wrong")
dc = self.setup_condition_mocks(
evaluate_value, ["sentry.workflow_engine.models.data_condition"]
)
with pytest.raises(Exception):
dc.evaluate_value(2)
self.teardown_condition_mocks()
|
EvaluateValueTest
|
python
|
huggingface__transformers
|
tests/models/llama4/test_processing_llama4.py
|
{
"start": 797,
"end": 1482
}
|
class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Llama4Processor
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class(max_patches=1, size={"height": 20, "width": 20})
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit")
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
|
Llama4ProcessorTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/frog-jump.py
|
{
"start": 33,
"end": 576
}
|
class ____(object):
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if stones[1] != 1:
return False
last_jump_units = {s: set() for s in stones}
last_jump_units[1].add(1)
for s in stones[:-1]:
for j in last_jump_units[s]:
for k in (j-1, j, j+1):
if k > 0 and s+k in last_jump_units:
last_jump_units[s+k].add(k)
return bool(last_jump_units[stones[-1]])
|
Solution
|
python
|
ipython__ipython
|
tests/test_interactiveshell.py
|
{
"start": 27436,
"end": 28021
}
|
class ____(unittest.TestCase):
def test_transform_only_once(self):
cleanup = 0
line_t = 0
def count_cleanup(lines):
nonlocal cleanup
cleanup += 1
return lines
def count_line_t(lines):
nonlocal line_t
line_t += 1
return lines
ip.input_transformer_manager.cleanup_transforms.append(count_cleanup)
ip.input_transformer_manager.line_transforms.append(count_line_t)
ip.run_cell("1")
assert cleanup == 1
assert line_t == 1
|
TestMiscTransform
|
python
|
ray-project__ray
|
rllib/policy/torch_mixins.py
|
{
"start": 7088,
"end": 8682
}
|
class ____:
"""Mixin class adding a method for (soft) target net(s) synchronizations.
- Adds the `update_target` method to the policy.
Calling `update_target` updates all target Q-networks' weights from their
respective "main" Q-networks, based on tau (smooth, partial updating).
"""
def __init__(self):
# Hard initial update from Q-net(s) to target Q-net(s).
tau = self.config.get("tau", 1.0)
self.update_target(tau=tau)
def update_target(self, tau=None):
# Update_target_fn will be called periodically to copy Q network to
# target Q network, using (soft) tau-synching.
tau = tau or self.config.get("tau", 1.0)
model_state_dict = self.model.state_dict()
# Support partial (soft) synching.
# If tau == 1.0: Full sync from Q-model to target Q-model.
# Support partial (soft) synching.
# If tau == 1.0: Full sync from Q-model to target Q-model.
target_state_dict = next(iter(self.target_models.values())).state_dict()
model_state_dict = {
k: tau * model_state_dict[k] + (1 - tau) * v
for k, v in target_state_dict.items()
}
for target in self.target_models.values():
target.load_state_dict(model_state_dict)
def set_weights(self, weights):
# Makes sure that whenever we restore weights for this policy's
# model, we sync the target network (from the main model)
# at the same time.
TorchPolicy.set_weights(self, weights)
self.update_target()
|
TargetNetworkMixin
|
python
|
google__pytype
|
pytype/constant_folding.py
|
{
"start": 1600,
"end": 2775
}
|
class ____(Exception):
"""Errors raised during constant folding."""
def __init__(self, message, op):
super().__init__(message)
self.lineno = op.line
self.message = message
# We track constants at three levels:
# typ: A typestruct representing the abstract type of the constant
# elements: A list or map of top-level types
# value: The concrete python value
#
# 'elements' is an intermediate structure that tracks individual folded
# constants for every element in a map or list. So e.g. for the constant
# {'x': [1, 2], 'y': 3}
# we would have
# typ = ('map', {str}, {('list', {int}), int})
# value = {'x': [1, 2], 'y': 3}
# elements = {'x': <<[1, 2]>>, 'y': <<3>>}
# where <<x>> is the folded constant corresponding to x. This lets us
# short-circuit pyval tracking at any level in the structure and fall back to
# abstract types.
#
# Note that while we could in theory just track the python value, and then
# construct 'typ' and 'elements' at the end, that would mean recursively
# unfolding a structure that we have just folded; the code is simpler if we
# track elements and types at every stage.
@attrs.define
|
ConstantError
|
python
|
python-pillow__Pillow
|
src/PIL/IcoImagePlugin.py
|
{
"start": 10477,
"end": 13068
}
|
class ____(ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft Windows .ico files.
By default the largest resolution image in the file will be loaded. This
can be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
When saving, PNG compression is used. Support for this was only added in
Windows Vista. If you are unable to view the icon in Windows, convert the
image to "RGBA" mode before saving.
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
<casadebender@gmail.com>.
https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
"""
format = "ICO"
format_description = "Windows Icon"
def _open(self) -> None:
self.ico = IcoFile(self.fp)
self.info["sizes"] = self.ico.sizes()
self.size = self.ico.entry[0].dim
self.load()
@property
def size(self) -> tuple[int, int]:
return self._size
@size.setter
def size(self, value: tuple[int, int]) -> None:
if value not in self.info["sizes"]:
msg = "This is not one of the allowed sizes of this image"
raise ValueError(msg)
self._size = value
def load(self) -> Image.core.PixelAccess | None:
if self._im is not None and self.im.size == self.size:
# Already loaded
return Image.Image.load(self)
im = self.ico.getimage(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self._mode = im.mode
if im.palette:
self.palette = im.palette
if im.size != self.size:
warnings.warn("Image was not the expected size")
index = self.ico.getentryindex(self.size)
sizes = list(self.info["sizes"])
sizes[index] = im.size
self.info["sizes"] = set(sizes)
self.size = im.size
return Image.Image.load(self)
def load_seek(self, pos: int) -> None:
# Flag the ImageFile.Parser so that it
# just does all the decode at the end.
pass
#
# --------------------------------------------------------------------
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
Image.register_save(IcoImageFile.format, _save)
Image.register_extension(IcoImageFile.format, ".ico")
Image.register_mime(IcoImageFile.format, "image/x-icon")
|
IcoImageFile
|
python
|
pypa__setuptools
|
setuptools/_vendor/tomli/_parser.py
|
{
"start": 1555,
"end": 4177
}
|
class ____(ValueError):
"""An error raised if a document is not valid TOML."""
def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = __fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float)
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = __s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
|
TOMLDecodeError
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_california_zip.py
|
{
"start": 757,
"end": 1767
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_california_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_california_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidCaliforniaZip
|
python
|
google__jax
|
jax/_src/sharding_impls.py
|
{
"start": 19449,
"end": 25975
}
|
class ____:
"""A hardware axis context for parallel computations that use the sharding
interface.
This context also uses the GSPMD partitioner.
"""
num_devices: int
device_assignment: tuple[xc.Device, ...] | None = None
abstract_mesh: mesh_lib.AbstractMesh | None = None
def __post_init__(self):
if self.device_assignment is not None:
assert isinstance(self.device_assignment, tuple)
assert self.num_devices == len(self.device_assignment)
# Similar to SPMDContext as ShardingContext also uses the GSPMD partitioner.
@property
def axis_env(self):
return AxisEnv(nreps=1, names=(), sizes=())
# -------------------- XLA OpSharding to PartitionSpec --------------------
# Note that OpSharding is more expressive than PartitionSpecs, so it's not
# always possible to convert them, but the code below should at least
# support handle all cases when this is possible.
def strides_for_sizes(sizes):
"""Returns an array of strides for major-to-minor sizes."""
return np.cumprod(sizes[::-1])[::-1] // np.asarray(sizes)
def unflatten_array(named_sizes, assignment):
"""Recovers the ordering of axis names based on a device assignment.
The device assignments that this function can convert into axis orders
are of the form::
np.arange(np.prod(named_sizes.values())).transpose(...).flatten()
for some transposition ``...``. This is satisfied by all OpSharding assignments
generated from partition specs.
Arguments:
named_sizes: A dictionary mapping axis names to their sizes.
assignment: A permutation of integers between 0 and the product of all
named sizes.
Returns:
A major-to-minor list of axis names that corresponds to the given assignment.
"""
named_sizes = {name: size for name, size in named_sizes.items() if size != 1}
sizes = np.fromiter(named_sizes.values(), dtype=np.int64)
strides = strides_for_sizes(sizes)
dims = explode_superdims(sizes, unflatten_superdims(assignment))
dim_to_name = {(size, stride): name for size, stride, name in zip(sizes, strides, named_sizes)}
return [dim_to_name[d] for d in dims]
def unflatten_superdims(assignment):
"""Unflatten a list of dimension sizes and their strides that generates assignment.
If this function succeeds for a given ``assignment``, then the following property
should be satisfied::
dims_with_strides = unflatten_superdims(assignment)
base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True)))
assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten()
That is, the returned dimensions list all sizes of the base array (with strides
indicating their initial order). The order of dimensions in the list corresponds
to the permutation that applied to the base array generates the assignment.
"""
def check(cond):
if cond: return
raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. "
"Please open a bug report!")
flat_assignment = np.asarray(assignment, dtype=np.int64)
check(flat_assignment[0] == 0)
dims = []
while flat_assignment.size > 1:
stride = flat_assignment[1]
for i in range(len(flat_assignment)):
if flat_assignment[i] != i * stride: break
else:
# After this loop i should point to an "element after the sequence", so
# we have to increment it if the whole array is a strided sequence.
i += 1
size = i
dims.append((size, stride))
assert size > 1 # Ensure progress
flat_assignment = flat_assignment[::size]
return dims
def explode_superdims(sizes, dims):
"""Explode superdims to fit a known shape.
The unflattening process might mistakenly generate too few too large dimensions.
For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``.
This function takes a list of such contiguous super-dimensions and splits them
into smaller dimensions such that::
set(map(fst, explode_superdims(sizes, dims))) == set(sizes)
"""
strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))}
dims = list(reversed(dims))
final_dims = []
for size, stride in dims:
target_size = strides_to_sizes[stride]
new_dims = []
while size > target_size:
assert target_size > 1 # Ensure progress
assert size % target_size == 0
new_dims.append((target_size, stride))
size //= target_size
stride *= target_size
target_size = strides_to_sizes[stride]
assert size == target_size
new_dims.append((size, stride))
final_dims += reversed(new_dims)
return final_dims
def parse_flatten_op_sharding(
hlo_sharding: xc.OpSharding | xc.HloSharding,
mesh: mesh_lib.Mesh | mesh_lib.AbstractMesh) -> Sequence[PartitionSpec]:
if isinstance(hlo_sharding, xc.OpSharding):
hlo_sharding = xc.HloSharding.from_proto(hlo_sharding)
if hlo_sharding.tuple_elements():
out: list[PartitionSpec] = []
for s in hlo_sharding.tuple_elements():
out.extend(parse_flatten_op_sharding(s, mesh))
return out
elif hlo_sharding.is_replicated():
return [PartitionSpec()]
elif hlo_sharding.is_maximal() and mesh.size == 1:
return [PartitionSpec()]
elif hlo_sharding.is_tiled():
mesh_shape = mesh.shape
mesh_axis_order = unflatten_array(
mesh.shape, hlo_sharding.tile_assignment_devices()
)
mesh_axis = iter(mesh_axis_order)
shape = hlo_sharding.tile_assignment_dimensions()
partitions = []
for dim_size in shape:
dim_partitions = []
while dim_size > 1:
axis = next(mesh_axis)
axis_size = mesh_shape[axis]
if dim_size % axis_size != 0:
raise ValueError(
f'{shape=} is incompatible with {mesh_shape=}: '
f'{dim_size=} is not divisible by {axis_size=}.'
)
dim_size //= axis_size
dim_partitions.append(axis)
partitions.append(tuple(dim_partitions))
if len(hlo_sharding.subgroup_types()) > 1:
raise NotImplementedError(
'Unhandled HloSharding type. Please open a bug report!'
)
if hlo_sharding.replicate_on_last_tile_dim():
partitions = partitions[:-1]
while partitions and partitions[-1] == ():
partitions.pop()
return [PartitionSpec(*partitions)]
else:
raise AssertionError("Unhandled OpSharding type. Please open a bug report!")
def _slice_as_tuple(s: slice):
assert s.step is None
return (s.start, s.stop)
|
ShardingContext
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/indexing/test_setitem.py
|
{
"start": 29644,
"end": 31240
}
|
class ____:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"])
df["B"] = idx
df["B"] = idx.to_series(index=[0, 1]).dt.tz_convert(None)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
expected = expected.dt.as_unit("us")
tm.assert_series_equal(result, expected)
|
TestSetitemTZAwareValues
|
python
|
realpython__materials
|
arcade-platformer/arcade_platformer/11_title_view.py
|
{
"start": 3292,
"end": 15920
}
|
class ____(arcade.View):
def __init__(self) -> None:
super().__init__()
# These lists will hold different sets of sprites
self.coins = None
self.background = None
self.walls = None
self.ladders = None
self.goals = None
self.enemies = None
# One sprite for the player, no more is needed
self.player = None
# We need a physics engine as well
self.physics_engine = None
# Someplace to keep score
self.score = 0
# Which level are we on?
self.level = 1
# Load up our sounds here
self.coin_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "coin.wav")
)
self.jump_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "jump.wav")
)
self.victory_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "victory.wav")
)
# Check if a joystick is connected
joysticks = arcade.get_joysticks()
if joysticks:
# If so, get the first one
self.joystick = joysticks[0]
self.joystick.open()
else:
# If not, flag it so we won't use it
print("There are no Joysticks")
self.joystick = None
def setup(self) -> None:
"""Sets up the game for the current level"""
# Get the current map based on the level
map_name = f"platform_level_{self.level:02}.tmx"
map_path = ASSETS_PATH / map_name
# What are the names of the layers?
wall_layer = "ground"
coin_layer = "coins"
goal_layer = "goal"
background_layer = "background"
ladders_layer = "ladders"
# Load the current map
game_map = arcade.tilemap.read_tmx(str(map_path))
# Load the layers
self.background = arcade.tilemap.process_layer(
game_map, layer_name=background_layer, scaling=MAP_SCALING
)
self.goals = arcade.tilemap.process_layer(
game_map, layer_name=goal_layer, scaling=MAP_SCALING
)
self.walls = arcade.tilemap.process_layer(
game_map, layer_name=wall_layer, scaling=MAP_SCALING
)
self.ladders = arcade.tilemap.process_layer(
game_map, layer_name=ladders_layer, scaling=MAP_SCALING
)
self.coins = arcade.tilemap.process_layer(
game_map, layer_name=coin_layer, scaling=MAP_SCALING
)
# Set the background color
background_color = arcade.color.FRESH_AIR
if game_map.background_color:
background_color = game_map.background_color
arcade.set_background_color(background_color)
# Find the edge of the map to control viewport scrolling
self.map_width = (
game_map.map_size.width - 1
) * game_map.tile_size.width
# Create the player sprite, if they're not already setup
if not self.player:
self.player = self.create_player_sprite()
# Move the player sprite back to the beginning
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player.change_x = 0
self.player.change_y = 0
# Reset the viewport
self.view_left = 0
self.view_bottom = 0
# Load the physics engine for this map
self.physics_engine = arcade.PhysicsEnginePlatformer(
player_sprite=self.player,
platforms=self.walls,
gravity_constant=GRAVITY,
ladders=self.ladders,
)
def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:
# Where are the player images stored?
texture_path = ASSETS_PATH / "images" / "player"
# Setup the appropriate textures
walking_paths = [
texture_path / f"alienGreen_walk{x}.png" for x in (1, 2)
]
climbing_paths = [
texture_path / f"alienGreen_climb{x}.png" for x in (1, 2)
]
standing_path = texture_path / "alienGreen_stand.png"
# Load them all now
walking_right_textures = [
arcade.load_texture(texture) for texture in walking_paths
]
walking_left_textures = [
arcade.load_texture(texture, mirrored=True)
for texture in walking_paths
]
walking_up_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
walking_down_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
standing_right_textures = [arcade.load_texture(standing_path)]
standing_left_textures = [
arcade.load_texture(standing_path, mirrored=True)
]
# Create the sprite
player = arcade.AnimatedWalkingSprite()
# Add the proper textures
player.stand_left_textures = standing_left_textures
player.stand_right_textures = standing_right_textures
player.walk_left_textures = walking_left_textures
player.walk_right_textures = walking_right_textures
player.walk_up_textures = walking_up_textures
player.walk_down_textures = walking_down_textures
# Set the player defaults
player.center_x = PLAYER_START_X
player.center_y = PLAYER_START_Y
player.state = arcade.FACE_RIGHT
# Set the initial texture
player.texture = player.stand_right_textures[0]
return player
def on_key_press(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- Which key was pressed
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [arcade.key.LEFT, arcade.key.J]:
self.player.change_x = -PLAYER_MOVE_SPEED
elif key in [arcade.key.RIGHT, arcade.key.L]:
self.player.change_x = PLAYER_MOVE_SPEED
# Check if player can climb up or down
elif key in [arcade.key.UP, arcade.key.I]:
if self.physics_engine.is_on_ladder():
self.player.change_y = PLAYER_MOVE_SPEED
elif key in [arcade.key.DOWN, arcade.key.K]:
if self.physics_engine.is_on_ladder():
self.player.change_y = -PLAYER_MOVE_SPEED
# Check if we can jump
elif key == arcade.key.SPACE:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
def on_key_release(self, key: int, modifiers: int) -> None:
"""Arguments:
key -- The key which was released
modifiers -- Which modifiers were down at the time
"""
# Check for player left/right movement
if key in [
arcade.key.LEFT,
arcade.key.J,
arcade.key.RIGHT,
arcade.key.L,
]:
self.player.change_x = 0
# Check if player can climb up or down
elif key in [
arcade.key.UP,
arcade.key.I,
arcade.key.DOWN,
arcade.key.K,
]:
if self.physics_engine.is_on_ladder():
self.player.change_y = 0
def on_update(self, delta_time: float) -> None:
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
# First, check for joystick movement
if self.joystick:
# Check if we're in the dead zone
if abs(self.joystick.x) > DEAD_ZONE:
self.player.change_x = self.joystick.x * PLAYER_MOVE_SPEED
else:
self.player.change_x = 0
if abs(self.joystick.y) > DEAD_ZONE:
if self.physics_engine.is_on_ladder():
self.player.change_y = self.joystick.y * PLAYER_MOVE_SPEED
else:
self.player.change_y = 0
# Did the user press the jump button?
if self.joystick.buttons[0]:
if self.physics_engine.can_jump():
self.player.change_y = PLAYER_JUMP_SPEED
# Play the jump sound
arcade.play_sound(self.jump_sound)
# Update the player animation
self.player.update_animation(delta_time)
# Update player movement based on the physics engine
self.physics_engine.update()
# Restrict user movement so they can't walk off screen
if self.player.left < 0:
self.player.left = 0
# Check if we've picked up a coin
coins_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.coins
)
for coin in coins_hit:
# Add the coin score to our score
self.score += int(coin.properties["point_value"])
# Play the coin sound
arcade.play_sound(self.coin_sound)
# Remove the coin
coin.remove_from_sprite_lists()
# Now check if we're at the ending goal
goals_hit = arcade.check_for_collision_with_list(
sprite=self.player, sprite_list=self.goals
)
if goals_hit:
# Play the victory sound
self.victory_sound.play()
# Setup the next level
self.level += 1
self.setup()
# Set the viewport, scrolling if necessary
self.scroll_viewport()
def scroll_viewport(self) -> None:
"""Scrolls the viewport when the player gets close to the edges"""
# Scroll left
# Find the current left boundary
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
# Are we to the left of this boundary? Then we should scroll left
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
# But don't scroll past the left edge of the map
if self.view_left < 0:
self.view_left = 0
# Scroll right
# Find the current right boundary
right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN
# Are we right of this boundary? Then we should scroll right
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
# Don't scroll past the right edge of the map
if self.view_left > self.map_width - SCREEN_WIDTH:
self.view_left = self.map_width - SCREEN_WIDTH
# Scroll up
top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
# Scroll down
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
# Only scroll to integers. Otherwise we end up with pixels that
# don't line up on the screen
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Do the scrolling
arcade.set_viewport(
left=self.view_left,
right=SCREEN_WIDTH + self.view_left,
bottom=self.view_bottom,
top=SCREEN_HEIGHT + self.view_bottom,
)
def on_draw(self) -> None:
arcade.start_render()
# Draw all the sprites
self.background.draw()
self.walls.draw()
self.coins.draw()
self.goals.draw()
self.ladders.draw()
self.player.draw()
# Draw the score in the lower left
score_text = f"Score: {self.score}"
# First a black background for a shadow effect
arcade.draw_text(
score_text,
start_x=10 + self.view_left,
start_y=10 + self.view_bottom,
color=arcade.csscolor.BLACK,
font_size=40,
)
# Now in white slightly shifted
arcade.draw_text(
score_text,
start_x=15 + self.view_left,
start_y=15 + self.view_bottom,
color=arcade.csscolor.WHITE,
font_size=40,
)
if __name__ == "__main__":
window = arcade.Window(
width=SCREEN_WIDTH, height=SCREEN_HEIGHT, title=SCREEN_TITLE
)
title_view = TitleView()
window.show_view(title_view)
arcade.run()
|
PlatformerView
|
python
|
fluentpython__example-code-2e
|
23-descriptor/bulkfood/bulkfood_v5.py
|
{
"start": 1424,
"end": 1775
}
|
class ____:
description = model.NonBlank() # <2>
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# end::LINEITEM_V5[]
|
LineItem
|
python
|
getsentry__sentry
|
tests/apidocs/endpoints/releases/test_organization_release_file_details.py
|
{
"start": 136,
"end": 1461
}
|
class ____(APIDocsTestCase):
def setUp(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = self.create_release(project=project, version="1")
file1 = self.create_file(
name="blah.js",
size=42,
type="release.file",
headers={"Content-Type": "application/json"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
releasefile = self.create_release_file(
file=file1, release_id=release.id, name="http://example.com/blah.js"
)
self.url = reverse(
"sentry-api-0-organization-release-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"version": release.version,
"file_id": releasefile.id,
},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_put(self) -> None:
data = {"name": "newfilename.js"}
response = self.client.put(self.url, data)
request = RequestFactory().put(self.url, data)
self.validate_schema(request, response)
|
ReleaseFileDetailsDocsTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/dependency.py
|
{
"start": 20769,
"end": 20883
}
|
class ____(Enum):
DIRECT = "DIRECT"
FAN_IN = "FAN_IN"
DYNAMIC_COLLECT = "DYNAMIC_COLLECT"
|
DependencyType
|
python
|
google__jax
|
jax/_src/export/shape_poly_decision.py
|
{
"start": 1395,
"end": 20476
}
|
class ____:
"""A decision procedure based on elimination of terms.
Given an expression `e = t*t_k + rest_e` for which we want to compute bounds,
and a constraint `c = t*t_c_k + rest_c >= 0`,
Let `e0 = e*abs(t_c_k) - c*sgn(t_c_k)*t_k`. (Note that we eliminated `t` from
`e0`, since `abs(t_c_k)*t_k = sgn(t_c_k)*t_k*t_c_k`.)
Since `c >= 0`,
if `sgn(t_c_k)*t_k > 0`:
then `abs(t_c_k)*e >= e0`, hence, `LB(e) >= ceil(LB(e0) / abs(t_c_k))`,
if `sgn(t_c_k)*t_k < 0`
then `abs(t_c_k)*e <= e0`, hence, `UB(e) <= floor(UB(e0) / abs(t_c_k))`,
See the implementation in self.combine_term_with_existing.
Do not use the constructor directly, use the `build` static method.
"""
def __init__(self, scope: SymbolicScope):
self.scope = scope
self._processed_for_internal_constraints: set[_DimTerm] = set()
# The other fields are for keeping an efficient representation of
# the explicit constraints.
self._term_bounds: dict[_DimTerm, tuple[float, float]] = {}
# The _expr_constraints represents a set of constraints that are not
# just simple terms. The set is represented as a mapping from a
# term "t" to tuples (cmp, k, c) where "c >= 0" (if cmp is GEQ else "c == 0")
# represents a constraint that has "t" as the leading term with coefficient "k".
self._expr_constraints: dict[_DimTerm, set[tuple[Comparator, int, _DimExpr]]] = {}
def initialize(self) -> _DecisionByElimination:
# Process the explicit constraints in the order in which the user specifies
# them. This is because the heuristics depend on the order in which the
# constraints are processed, and this way we give the user a way to control
# the result (albeit, for now, without a good feedback loop to understand
# how the order matters for inequalities).
for constr in self.scope._explicit_constraints:
if not core.is_constant_dim(constr.diff):
self.add_implicit_constraints_expr(constr.diff) # type: ignore
self.combine_and_add_constraint(constr.cmp, constr.diff, 0,
constr.debug_str)
# Clear the cache, since we have added constraints.
self.scope._bounds_cache.clear()
return self
@staticmethod
def build(scope: SymbolicScope) -> _DecisionByElimination:
"""Builds an initialized DecisionByElimination for a scope.
Caches the initial state of the decision procedure in the scope.
"""
if not scope._initialized or not scope._explicit_constraints:
# We do not cache until the scope is fully initialized.
return _DecisionByElimination(scope).initialize()
if not scope._decision_initial_state:
scope._decision_initial_state = _DecisionByElimination(scope).initialize()
d = scope._decision_initial_state
# Return a copy, because the decision procedure state is mutable
c = _DecisionByElimination(scope)
c._processed_for_internal_constraints = d._processed_for_internal_constraints.copy()
c._term_bounds = d._term_bounds.copy()
c._expr_constraints = {
lead_t: lead_t_constraints.copy()
for lead_t, lead_t_constraints in d._expr_constraints.items()}
return c
def combine_and_add_constraint(self,
cmp: Comparator,
e1: _DimExpr | int | float,
e2: _DimExpr | int | float,
debug_str: str | None = None):
"""Adds a constraint "e1 >= e2" to the internal state."""
if isinstance(e1, float):
if np.isinf(e1) and e1 >= 0 and cmp == Comparator.GEQ: return
assert e1 == np.floor(e1)
e1 = int(e1)
if isinstance(e2, float):
if np.isinf(e2) and e2 <= 0 and cmp == Comparator.GEQ: return
assert e2 == np.floor(e2)
e2 = int(e2)
e = e1 - e2
if (const := _DimExpr._to_constant(e)) is not None:
if const < 0:
raise ValueError(f"Unsatisfiable constraint: {debug_str or str(e1) + ' >= ' + str(e2)}")
return
assert isinstance(e, _DimExpr)
self.add_to_state(cmp, e, debug_str)
geq_combinations = self.combine_constraint_with_existing(cmp, e, debug_str)
for cmp, a in geq_combinations:
self.add_to_state(cmp, a, None)
def add_to_state(self,
cmp: Comparator,
e: _DimExpr,
debug_str: str | None):
"""Updates the internal state to reflect "e >= 0". """
assert _DimExpr._to_constant(e) is None
if (term_factors := e._to_single_term()) is not None:
n, t_k, t = term_factors # n + t * t_k [== | >=] 0
lb, ub = self._term_bounds.get(t, (- np.inf, np.inf))
if cmp == Comparator.EQ:
# n + t_k * t == 0 -> t == - n // t_k
if n % t_k:
raise ValueError(f"Unsatisfiable constraint: {debug_str}")
t_val = - (n // t_k)
lb = max(lb, t_val)
ub = min(ub, t_val)
else: # GEQ
if t_k > 0:
lb = max(lb, int(np.ceil(- n / t_k)))
else:
ub = min(ub, int(np.floor(- n / t_k)))
if lb > ub:
raise ValueError(f"Unsatisfiable constraint: {debug_str}")
self._term_bounds[t] = (lb, ub)
return
lead_t, lead_t_k = e._leading_term
lead_t_constraints = self._expr_constraints.get(lead_t)
if lead_t_constraints is None:
lead_t_constraints = set()
self._expr_constraints[lead_t] = lead_t_constraints
lead_t_constraints.add((cmp, lead_t_k, e))
def combine_term_with_existing(self, t: _DimTerm, t_k: int, *,
scope: shape_poly.SymbolicScope,
only_smaller_than_t=True,
) -> Sequence[tuple[Comparator,
_DimExpr,
int,
int]]:
"""
Combine a term with existing constraints.
For input (t, t_k) the tuple (c_eq, c, c_s, t_s) is among the returned
tuples if there exists a constraint `c =[c_eq] 0` that can be combined
with `t*t_k` to eliminate `t`, and:
* `c =[c_eq] 0`
* The term `comb = t*t_k*t_s + c*c_s` does not contain `t`, and if
`only_smaller_than_t` then `comb` contains only terms structurally
smaller than `t`.
* `c_s > 0`
"""
# TODO: maybe a generator is useful here instead of materializing the list
acc: list[tuple[Comparator, _DimExpr, int, int]] = []
# First combine with the existing term bounds
t_lb, t_ub = self._term_bounds.get(t, (-np.inf, np.inf))
if t_lb == t_ub:
acc.append((Comparator.EQ, _DimExpr(((t, 1),), scope) - int(t_lb),
abs(t_k), - sgn(t_k)))
else:
if t_lb > -np.inf:
acc.append((Comparator.GEQ, _DimExpr(((t, 1),), scope) - int(t_lb),
abs(t_k), - sgn(t_k)))
if t_ub < np.inf:
acc.append((Comparator.GEQ, _DimExpr(((t, -1),), scope) + int(t_ub),
abs(t_k), sgn(t_k)))
prev_constraint: set[tuple[Comparator, int, _DimExpr]]
for prev_constraint in ([self._expr_constraints.get(t, set())] if only_smaller_than_t
else self._expr_constraints.values()):
for c_eq, _, c in prev_constraint:
# TODO: optimize this dict()
tc_k = dict(c._sorted_terms).get(t)
if tc_k is not None:
# c =[c_eq] 0 AND t*tc_k appears in c.
c_s = abs(t_k)
c_t = - tc_k * sgn(t_k)
acc.append((c_eq, c, c_s, c_t))
return acc
def combine_constraint_with_existing(self,
eq: Comparator,
e: _DimExpr,
debug_str: str | None) -> set[tuple[Comparator, _DimExpr]]:
combinations: set[tuple[Comparator, _DimExpr]] = set()
for t, t_k in e._sorted_terms:
if t.is_constant: continue
for (c_eq, c, c_s, t_s) in self.combine_term_with_existing(t, t_k,
only_smaller_than_t=False,
scope=e.scope):
# c =[c_eq] 0 AND c_s > 0 AND t*t_k*t_s + c*c_s does not contain t
if t_s > 0 or eq == Comparator.EQ:
new_eq = Comparator.EQ if (eq == c_eq == Comparator.EQ) else Comparator.GEQ
new_e = _DimExpr._linear_combination(e, t_s, c, c_s, e.scope)
if (const := _DimExpr._to_constant(new_e)) is not None:
if ((new_eq == Comparator.GEQ and const < 0) or
(new_eq == Comparator.EQ and const != 0)):
raise ValueError(f"Unsatisfiable constraints: {debug_str or str(e) + ' >= 0'}")
else:
combinations.add((new_eq, new_e)) # type: ignore
return combinations
def bounds(self, e: DimSize,
prec: BoundsPrecision,
add_implicit_constraints: bool = False
) -> tuple[float, float]:
"""Returns the lower and upper bounds, or -+inf.
Args:
e: the expression for which to compute the bounds.
prec: the desired precision. See comments in `BoundsPrecision`.
add_implicit_constraints: if True, then before computing the bounds
add the implicit constraints for the terms inside `e`.
"""
if (const := _DimExpr._to_constant(e)) is not None:
return (const, const)
assert isinstance(e, _DimExpr)
# Caching bounds is tricky. Since the underlying _bounds_for_sorted_terms
# is incomplete, and it may produce better results in the context of
# specific queries (due to the implicit constraints), if we cache the
# bounds computation we may stick to sub-optimal results. Also, we should
# not use the precision as part of the cache key, because a certain result
# may work for multiple precisions.
if (res := self.scope._bounds_cache.get(e)) is not None:
lb, ub, prev_prec = res
if prec._bounds_are_sufficient(lb, ub): return (lb, ub)
if prev_prec.value >= prec.value: return (lb, ub)
if add_implicit_constraints:
self.add_implicit_constraints_expr(e)
lb, ub = self._bounds_for_sorted_terms(e.scope, e._sorted_terms, 0, prec)
lb, ub = (int(lb) if lb > -np.inf else lb,
int(ub) if ub < np.inf else ub)
self.scope._bounds_cache[e] = (lb, ub, prec)
return (lb, ub)
def _bounds_for_sorted_terms(self,
scope: SymbolicScope,
e: Sequence[tuple[_DimTerm, int]],
i: int,
prec: BoundsPrecision) -> tuple[float, float]:
"""The lower and upper bounds of e[i:].
See comments about soundness and `cmp_with` in the `shape_poly.bounds_decision`` method.
Returns (lower-bound, upper-bound)
"""
if i >= len(e): return (0, 0)
t, t_k = e[i]
if t.is_constant:
assert i == len(e) - 1 # Must be last
return (t_k, t_k)
lb = -np.inf
ub = np.inf
for (c_eq, c, c_s, t_s) in self.combine_term_with_existing(t, t_k,
only_smaller_than_t=True,
scope=scope):
# `c =[eq] 0` AND `t*t_k*t_s + c*c_s` contains only terms smaller than t
# AND c_s > 0.
# `rest = e[i:]*t_s + c*c_s` AND `rest_ub >= rest >= rest_lb`
# `rest` contains only terms smaller than `t`.
rest = _DimExpr._linear_combination_sorted_pairs(e, i, t_s,
c._sorted_terms, 0, c_s)
rest_lb, rest_ub = self._bounds_for_sorted_terms(scope, rest, 0,
BoundsPrecision.BEST)
if rest_ub < np.inf:
# We have: e[i:]*t_s = rest - c*c_s <= rest_ub
if t_s > 0:
ub = min(ub, int(np.floor(rest_ub / t_s)))
else:
lb = max(lb, int(np.ceil(rest_ub / t_s)))
if rest_lb > - np.inf and c_eq == Comparator.EQ:
# We have: e[i:]*t_s = rest - c*c_s = rest >= rest_lb
if t_s > 0:
lb = max(lb, int(np.ceil(rest_lb / t_s)))
else:
ub = min(ub, int(np.floor(rest_lb / t_s)))
if prec._bounds_are_sufficient(lb, ub): return (lb, ub)
# Now look for special rules for factors
if (t_f := t.to_factor()) is not None:
if t_f.operation in [_DimFactor.MAX, _DimFactor.MIN]:
# m_c*MAX(op1, op2) + rest_e >= max(m_c * op1 + rest_e, m_c * op2 + rest_e)
# if m_c > 0. Similar rules for when m_c < 0 and for MIN.
op1, op2 = t_f.operands
rest1 = _DimExpr._linear_combination_sorted_pairs(e, i + 1, 1,
op1._sorted_terms, 0, t_k)
rest2 = _DimExpr._linear_combination_sorted_pairs(e, i + 1, 1,
op2._sorted_terms, 0, t_k)
rest1_lb, rest1_ub = self._bounds_for_sorted_terms(scope, rest1, 0,
BoundsPrecision.BEST)
rest2_lb, rest2_ub = self._bounds_for_sorted_terms(scope, rest2, 0,
BoundsPrecision.BEST)
like_max = (t_k > 0 if t_f.operation == _DimFactor.MAX else t_k < 0)
if like_max:
lb = max(lb, max(rest1_lb, rest2_lb))
ub = min(ub, max(rest1_ub, rest2_ub))
else:
lb = max(lb, min(rest1_lb, rest2_lb))
ub = min(ub, min(rest1_ub, rest2_ub))
if prec._bounds_are_sufficient(lb, ub, ): return (lb, ub)
return lb, ub
def add_implicit_constraints_expr(self, e: _DimExpr):
"""Adds the implicit constraints for the expression `e`"""
for t, _ in e._sorted_terms:
if t.is_constant: continue
self.add_implicit_constraints_term(t)
def add_implicit_constraints_term(self, t: _DimTerm):
if t in self._processed_for_internal_constraints: return
self._processed_for_internal_constraints.add(t)
t_e = _DimExpr._from_term(t, 1, self.scope) # m as a _DimExpr
f = t.to_factor()
if f is None:
# This is a multiplication of factors. Try to compute bounds based on
# the bounds of the factors.
bounds = []
for f1, f1_exp in t._factors:
f1_t = _DimTerm.from_factor(f1, 1)
f1_e = _DimExpr._from_term(f1_t, 1, self.scope)
self.add_implicit_constraints_term(f1_t)
a1_l, a1_u = self.bounds(f1_e, BoundsPrecision.BEST)
assert a1_l <= a1_u
bounds.append((a1_l ** f1_exp, a1_u ** f1_exp))
candidate_bounds = [math.prod(factor_bounds)
for factor_bounds in itertools.product(*bounds)]
m_l = min(*candidate_bounds)
m_u = max(*candidate_bounds)
self.combine_and_add_constraint(Comparator.GEQ, t_e, m_l)
self.combine_and_add_constraint(Comparator.GEQ, m_u, t_e)
return
# It is a factor, is it a variable?
if (v := f.to_var()) is not None:
self.combine_and_add_constraint(Comparator.GEQ, t_e, 1) # v >= 1
return
for oper in f.operands:
self.add_implicit_constraints_expr(oper)
if f.operation == _DimFactor.MOD:
op1, op2 = f.operands
op2_b_l, op2_b_u = self.bounds(op2, BoundsPrecision.FOR_GEQ0_OR_LT0)
if op2_b_l > 0: # positive divisor
self.combine_and_add_constraint(Comparator.GEQ, t_e, 0) # m >= 0
self.combine_and_add_constraint(Comparator.GEQ, op2 - 1, t_e) # m <= op2 - 1
self.combine_and_add_constraint(Comparator.GEQ, op2_b_u - 1, t_e)
elif op2_b_u < 0: # negative divisor
self.combine_and_add_constraint(Comparator.GEQ, t_e, op2 + 1) # m >= op2 + 1
self.combine_and_add_constraint(Comparator.GEQ, t_e, op2_b_l + 1)
self.combine_and_add_constraint(Comparator.GEQ, 0, t_e) # m <= 0
return
if f.operation == _DimFactor.FLOORDIV:
op1, op2 = f.operands
(op1_l, op1_u) = self.bounds(op1, BoundsPrecision.BEST)
(op2_l, op2_u) = self.bounds(op2, BoundsPrecision.BEST)
def math_floor_with_inf(a: float, b: float):
# math.floor(a / b), but aware of inf.
# When either a or b are infinite, the result represents the limit
# of "a // b".
assert b != 0 # we caught division by 0 earlier
if not np.isinf(b): # divisor b is finite
if not np.isinf(a): # both dividend a and divisor b are finite
return math.floor(a / b)
# a is infinite, b is finite
return -np.inf if (a >= 0) != (b >= 0) else np.inf
elif not np.isinf(a): # dividend a is finite and divisor b is infinite
return -1 if (a >= 0) != (b >= 0) else 0
else: # both dividend and divisor are infinite
return -np.inf if (a >= 0) != (b >= 0) else np.inf
# Same reasoning as for multiplication: the bounds are among the cross-product
# of the bounds.
if op2_l <= 0 <= op2_u:
raise InconclusiveDimensionOperation(
f"Possible division by 0 in division by {op2}")
candidate_bounds = [math_floor_with_inf(op1_l, op2_l),
math_floor_with_inf(op1_l, op2_u),
math_floor_with_inf(op1_u, op2_l),
math_floor_with_inf(op1_u, op2_u)]
m_l = min(*candidate_bounds)
m_u = max(*candidate_bounds)
self.combine_and_add_constraint(Comparator.GEQ, t_e, m_l)
self.combine_and_add_constraint(Comparator.GEQ, m_u, t_e)
if op2_l >= 0:
if op1_l >= 0:
self.combine_and_add_constraint(Comparator.GEQ, t_e, 0)
mod_e = _DimExpr._from_operation(_DimFactor.MOD, op1, op2,
scope=self.scope)
if isinstance(mod_e, _DimExpr):
self.add_implicit_constraints_expr(mod_e)
combined = op2 * t_e + mod_e
self.combine_and_add_constraint(Comparator.EQ, op1, combined)
return
if f.operation == _DimFactor.MAX:
op1, op2 = f.operands
op1_b_l, op1_b_u = self.bounds(op1, BoundsPrecision.BEST)
op2_b_l, op2_b_u = self.bounds(op2, BoundsPrecision.BEST)
self.combine_and_add_constraint(Comparator.GEQ, t_e, max(op1_b_l, op2_b_l))
self.combine_and_add_constraint(Comparator.GEQ, max(op1_b_u, op2_b_u), t_e)
self.combine_and_add_constraint(Comparator.GEQ, t_e, op1)
self.combine_and_add_constraint(Comparator.GEQ, t_e, op2)
return
if f.operation == _DimFactor.MIN:
op1, op2 = f.operands
op1_b_l, op1_b_u = self.bounds(op1, BoundsPrecision.BEST)
op2_b_l, op2_b_u = self.bounds(op2, BoundsPrecision.BEST)
self.combine_and_add_constraint(Comparator.GEQ, t_e, min(op1_b_l, op2_b_l))
self.combine_and_add_constraint(Comparator.GEQ, min(op1_b_u, op2_b_u), t_e)
self.combine_and_add_constraint(Comparator.GEQ, op1, t_e)
self.combine_and_add_constraint(Comparator.GEQ, op2, t_e)
return
|
_DecisionByElimination
|
python
|
euske__pdfminer
|
pdfminer/layout.py
|
{
"start": 20735,
"end": 21450
}
|
class ____(LTLayoutContainer):
def __init__(self, name, bbox, matrix):
self.name = name
self.matrix = matrix
(x, y, w, h) = bbox
bbox = get_bound(apply_matrix_pt(matrix, (p, q))
for (p, q) in ((x, y), (x+w, y), (x, y+h), (x+w, y+h)))
LTLayoutContainer.__init__(self, bbox)
return
def __repr__(self):
return ('<%s(%s) %s matrix=%s>' %
(self.__class__.__name__, self.name,
bbox2str(self.bbox), matrix2str(self.matrix)))
def analyze(self, laparams):
if not laparams.all_texts:
return
LTLayoutContainer.analyze(self, laparams)
return
## LTPage
##
|
LTFigure
|
python
|
pytorch__pytorch
|
test/dynamo/test_subclasses.py
|
{
"start": 93440,
"end": 95301
}
|
class ____(torch.nn.Module):
def forward(
self,
primals_5: "Sym(s47)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=0)
primals_7: "Sym(s16)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=2), idx=0)
tangents_1: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='a')
tangents_2: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='b')
):
view_2: "f32[s47, s16]" = torch.ops.aten.view.default(tangents_1, [primals_5, primals_7]); tangents_1 = None
view_3: "f32[s47, s16]" = torch.ops.aten.view.default(tangents_2, [primals_5, primals_7]); tangents_2 = None
return (
None, # None
None, # None
view_2, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=2)), attr='a')
view_3, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=2)), attr='b')
primals_5, # SubclassSizeAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=2)), idx=0)
primals_7, # SubclassSizeAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=2)), idx=1)
primals_7, # SubclassStrideAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=2)), idx=0)
)
""", # noqa: B950
)
def test_tensor_subclass_TwoTensor_view_mul(self):
def f(tt):
y = tt.clone()
return y.view(y.shape[0] * y.shape[1])
a = torch.ones(3, 4, requires_grad=True)
b = a.clone()
tt = TwoTensor(a, b)
fw, bw = self._compile_check(f, [(tt,)], dynamic=True, call_backward=True)
self.assertExpectedInline(
normalize_gm(fw[0].print_readable(print_output=False, expanded_def=True)),
"""\
|
GraphModule
|
python
|
kamyu104__LeetCode-Solutions
|
Python/bitwise-or-of-all-subsequence-sums.py
|
{
"start": 365,
"end": 710
}
|
class ____(object):
def subsequenceSumOr(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = cnt = 0
for i in xrange(64):
cnt >>= 1
for x in nums:
cnt += (x>>i)&1
if cnt:
result |= 1<<i
return result
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/esm/openfold_utils/rigid_utils.py
|
{
"start": 24209,
"end": 41006
}
|
class ____:
"""
A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
[*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
dimensions of its component parts.
"""
def __init__(self, rots: Rotation | None, trans: torch.Tensor | None):
"""
Args:
rots: A [*, 3, 3] rotation tensor
trans: A corresponding [*, 3] translation tensor
"""
# (we need device, dtype, etc. from at least one input)
batch_dims, dtype, device, requires_grad = None, None, None, None
if trans is not None:
batch_dims = trans.shape[:-1]
dtype = trans.dtype
device = trans.device
requires_grad = trans.requires_grad
elif rots is not None:
batch_dims = rots.shape
dtype = rots.dtype
device = rots.device
requires_grad = rots.requires_grad
else:
raise ValueError("At least one input argument must be specified")
if rots is None:
rots = Rotation.identity(
batch_dims,
dtype,
device,
requires_grad,
)
elif trans is None:
trans = identity_trans(
batch_dims,
dtype,
device,
requires_grad,
)
assert rots is not None
assert trans is not None
if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):
raise ValueError("Rots and trans incompatible")
# Force full precision. Happens to the rotations automatically.
trans = trans.to(dtype=torch.float32)
self._rots = rots
self._trans = trans
@staticmethod
def identity(
shape: tuple[int, ...],
dtype: torch.dtype | None = None,
device: torch.device | None = None,
requires_grad: bool = True,
fmt: str = "quat",
) -> Rigid:
"""
Constructs an identity transformation.
Args:
shape:
The desired shape
dtype:
The dtype of both internal tensors
device:
The device of both internal tensors
requires_grad:
Whether grad should be enabled for the internal tensors
Returns:
The identity transformation
"""
return Rigid(
Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
identity_trans(shape, dtype, device, requires_grad),
)
def __getitem__(self, index: Any) -> Rigid:
"""
Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
both the rotation and the translation.
E.g.::
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
assert(indexed.get_trans().shape == (2, 3))
Args:
index: A standard torch tensor index. E.g. 8, (10, None, 3),
or (3, slice(0, 1, None))
Returns:
The indexed tensor
"""
if type(index) is not tuple:
index = (index,)
return Rigid(
self._rots[index],
self._trans[index + (slice(None),)],
)
def __mul__(self, right: torch.Tensor) -> Rigid:
"""
Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not (isinstance(right, torch.Tensor)):
raise TypeError("The other multiplicand must be a Tensor")
new_rots = self._rots * right
new_trans = self._trans * right[..., None]
return Rigid(new_rots, new_trans)
def __rmul__(self, left: torch.Tensor) -> Rigid:
"""
Reverse pointwise multiplication of the transformation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
@property
def shape(self) -> torch.Size:
"""
Returns the shape of the shared dimensions of the rotation and the translation.
Returns:
The shape of the transformation
"""
return self._trans.shape[:-1]
@property
def device(self) -> torch.device:
"""
Returns the device on which the Rigid's tensors are located.
Returns:
The device on which the Rigid's tensors are located
"""
return self._trans.device
def get_rots(self) -> Rotation:
"""
Getter for the rotation.
Returns:
The rotation object
"""
return self._rots
def get_trans(self) -> torch.Tensor:
"""
Getter for the translation.
Returns:
The stored translation
"""
return self._trans
def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
"""
Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
Args:
q_vec: The quaternion update vector.
Returns:
The composed transformation.
"""
q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
new_rots = self._rots.compose_q_update_vec(q_vec)
trans_update = self._rots.apply(t_vec)
new_translation = self._trans + trans_update
return Rigid(new_rots, new_translation)
def compose(self, r: Rigid) -> Rigid:
"""
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
"""
new_rot = self._rots.compose_r(r._rots)
new_trans = self._rots.apply(r._trans) + self._trans
return Rigid(new_rot, new_trans)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor.
Returns:
The transformed points.
"""
rotated = self._rots.apply(pts)
return rotated + self._trans
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the inverse of the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor
Returns:
The transformed points.
"""
pts = pts - self._trans
return self._rots.invert_apply(pts)
def invert(self) -> Rigid:
"""
Inverts the transformation.
Returns:
The inverse transformation.
"""
rot_inv = self._rots.invert()
trn_inv = rot_inv.apply(self._trans)
return Rigid(rot_inv, -1 * trn_inv)
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
translation/rotation dimensions respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
"""
new_rots = self._rots.map_tensor_fn(fn)
new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
return Rigid(new_rots, new_trans)
def to_tensor_4x4(self) -> torch.Tensor:
"""
Converts a transformation to a homogeneous transformation tensor.
Returns:
A [*, 4, 4] homogeneous transformation tensor
"""
tensor = self._trans.new_zeros((*self.shape, 4, 4))
tensor[..., :3, :3] = self._rots.get_rot_mats()
tensor[..., :3, 3] = self._trans
tensor[..., 3, 3] = 1
return tensor
@staticmethod
def from_tensor_4x4(t: torch.Tensor) -> Rigid:
"""
Constructs a transformation from a homogeneous transformation tensor.
Args:
t: [*, 4, 4] homogeneous transformation tensor
Returns:
T object with shape [*]
"""
if t.shape[-2:] != (4, 4):
raise ValueError("Incorrectly shaped input tensor")
rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
trans = t[..., :3, 3]
return Rigid(rots, trans)
def to_tensor_7(self) -> torch.Tensor:
"""
Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
translation.
Returns:
A [*, 7] tensor representation of the transformation
"""
tensor = self._trans.new_zeros((*self.shape, 7))
tensor[..., :4] = self._rots.get_quats()
tensor[..., 4:] = self._trans
return tensor
@staticmethod
def from_tensor_7(t: torch.Tensor, normalize_quats: bool = False) -> Rigid:
if t.shape[-1] != 7:
raise ValueError("Incorrectly shaped input tensor")
quats, trans = t[..., :4], t[..., 4:]
rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)
return Rigid(rots, trans)
@staticmethod
def from_3_points(
p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float = 1e-8
) -> Rigid:
"""
Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
Args:
p_neg_x_axis: [*, 3] coordinates
origin: [*, 3] coordinates used as frame origins
p_xy_plane: [*, 3] coordinates
eps: Small epsilon value
Returns:
A transformation object of shape [*]
"""
p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)
origin_unbound = torch.unbind(origin, dim=-1)
p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)
e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]
e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]
denom = torch.sqrt(sum(c * c for c in e0) + eps * torch.ones_like(e0[0]))
e0 = [c / denom for c in e0]
dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
denom = torch.sqrt(sum(c * c for c in e1) + eps * torch.ones_like(e1[0]))
e1 = [c / denom for c in e1]
e2 = [
e0[1] * e1[2] - e0[2] * e1[1],
e0[2] * e1[0] - e0[0] * e1[2],
e0[0] * e1[1] - e0[1] * e1[0],
]
rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
rots = rots.reshape(rots.shape[:-1] + (3, 3))
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))
def unsqueeze(self, dim: int) -> Rigid:
"""
Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed transformation.
"""
if dim >= len(self.shape):
raise ValueError("Invalid dimension")
rots = self._rots.unsqueeze(dim)
trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
@staticmethod
def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
"""
Concatenates transformations along a new dimension.
Args:
ts:
A list of T objects
dim:
The dimension along which the transformations should be concatenated
Returns:
A concatenated transformation object
"""
rots = Rotation.cat([t._rots for t in ts], dim)
trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
"""
Applies a Rotation -> Rotation function to the stored rotation object.
Args:
fn: A function of type Rotation -> Rotation
Returns:
A transformation object with a transformed rotation.
"""
return Rigid(fn(self._rots), self._trans)
def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Applies a Tensor -> Tensor function to the stored translation.
Args:
fn:
A function of type Tensor -> Tensor to be applied to the translation
Returns:
A transformation object with a transformed translation.
"""
return Rigid(self._rots, fn(self._trans))
def scale_translation(self, trans_scale_factor: float) -> Rigid:
"""
Scales the translation by a constant factor.
Args:
trans_scale_factor:
The constant factor
Returns:
A transformation object with a scaled translation.
"""
return self.apply_trans_fn(lambda t: t * trans_scale_factor)
def stop_rot_gradient(self) -> Rigid:
"""
Detaches the underlying rotation object
Returns:
A transformation object with detached rotations
"""
return self.apply_rot_fn(lambda r: r.detach())
@staticmethod
def make_transform_from_reference(
n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float = 1e-20
) -> Rigid:
"""
Returns a transformation object from reference coordinates.
Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
need to take care of such cases in your code.
Args:
n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
c_xyz: A [*, 3] tensor of carbon xyz coordinates.
Returns:
A transformation object. After applying the translation and rotation to the reference backbone, the
coordinates will approximately equal to the input coordinates.
"""
translation = -1 * ca_xyz
n_xyz = n_xyz + translation
c_xyz = c_xyz + translation
c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + c_x**2 + c_y**2)
sin_c1 = -c_y / norm
cos_c1 = c_x / norm
c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
c1_rots[..., 0, 0] = cos_c1
c1_rots[..., 0, 1] = -1 * sin_c1
c1_rots[..., 1, 0] = sin_c1
c1_rots[..., 1, 1] = cos_c1
c1_rots[..., 2, 2] = 1
norm = torch.sqrt(eps + c_x**2 + c_y**2 + c_z**2)
sin_c2 = c_z / norm
cos_c2 = torch.sqrt(c_x**2 + c_y**2) / norm
c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
c2_rots[..., 0, 0] = cos_c2
c2_rots[..., 0, 2] = sin_c2
c2_rots[..., 1, 1] = 1
c2_rots[..., 2, 0] = -1 * sin_c2
c2_rots[..., 2, 2] = cos_c2
c_rots = rot_matmul(c2_rots, c1_rots)
n_xyz = rot_vec_mul(c_rots, n_xyz)
_, n_y, n_z = [n_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + n_y**2 + n_z**2)
sin_n = -n_z / norm
cos_n = n_y / norm
n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
n_rots[..., 0, 0] = 1
n_rots[..., 1, 1] = cos_n
n_rots[..., 1, 2] = -1 * sin_n
n_rots[..., 2, 1] = sin_n
n_rots[..., 2, 2] = cos_n
rots = rot_matmul(n_rots, c_rots)
rots = rots.transpose(-1, -2)
translation = -1 * translation
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, translation)
def cuda(self) -> Rigid:
"""
Moves the transformation object to GPU memory
Returns:
A version of the transformation on GPU
"""
return Rigid(self._rots.cuda(), self._trans.cuda())
|
Rigid
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/descriptor.py
|
{
"start": 28758,
"end": 32850
}
|
class ____(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
Attributes:
name (str): Name of the enum type.
full_name (str): Full name of the type, including package name and any
enclosing type(s).
values (list[EnumValueDescriptor]): List of the values in this enum.
values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, but
indexed by the "name" field of each EnumValueDescriptor.
values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type (Descriptor): Descriptor of the immediate containing type of
this enum, or None if this is an enum defined at the top level in a .proto
file. Set by Descriptor's constructor if we're passed into one.
file (FileDescriptor): Reference to file descriptor.
options (descriptor_pb2.EnumOptions): Enum options message or None to use
default enum options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumDescriptor
def __new__(
cls,
name,
full_name,
filename,
values,
containing_type=None,
options=None,
serialized_options=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
create_key=None,
):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindEnumTypeByName(full_name)
def __init__(
self,
name,
full_name,
filename,
values,
containing_type=None,
options=None,
serialized_options=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
create_key=None,
):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
if create_key is not _internal_create_key:
_Deprecated('create function EnumDescriptor()')
super(EnumDescriptor, self).__init__(
options,
'EnumOptions',
name,
full_name,
file,
containing_type,
serialized_start=serialized_start,
serialized_end=serialized_end,
serialized_options=serialized_options,
)
self.values = values
for value in self.values:
value.file = file
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
# Values are reversed to ensure that the first alias is retained.
self.values_by_number = dict((v.number, v) for v in reversed(values))
@property
def _parent(self):
return self.containing_type or self.file
@property
def is_closed(self):
"""Returns true whether this is a "closed" enum.
This means that it:
- Has a fixed set of values, rather than being equivalent to an int32.
- Encountering values not in this set causes them to be treated as unknown
fields.
- The first value (i.e., the default) may be nonzero.
WARNING: Some runtimes currently have a quirk where non-closed enums are
treated as closed when used as the type of fields defined in a
`syntax = proto2;` file. This quirk is not present in all runtimes; as of
writing, we know that:
- C++, Java, and C++-based Python share this quirk.
- UPB and UPB-based Python do not.
- PHP and Ruby treat all enums as open regardless of declaration.
Care should be taken when using this function to respect the target
runtime's enum handling quirks.
"""
return self._GetFeatures().enum_type == _FEATURESET_ENUM_TYPE_CLOSED
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
|
EnumDescriptor
|
python
|
pallets__flask
|
tests/test_cli.py
|
{
"start": 11775,
"end": 20377
}
|
class ____:
@pytest.fixture
def app(self):
app = Flask(__name__)
app.add_url_rule(
"/get_post/<int:x>/<int:y>",
methods=["GET", "POST"],
endpoint="yyy_get_post",
)
app.add_url_rule("/zzz_post", methods=["POST"], endpoint="aaa_post")
return app
@pytest.fixture
def invoke(self, app, runner):
cli = FlaskGroup(create_app=lambda: app)
return partial(runner.invoke, cli)
def expect_order(self, order, output):
# skip the header and match the start of each row
for expect, line in zip(order, output.splitlines()[2:], strict=False):
# do this instead of startswith for nicer pytest output
assert line[: len(expect)] == expect
def test_simple(self, invoke):
result = invoke(["routes"])
assert result.exit_code == 0
self.expect_order(["aaa_post", "static", "yyy_get_post"], result.output)
def test_sort(self, app, invoke):
default_output = invoke(["routes"]).output
endpoint_output = invoke(["routes", "-s", "endpoint"]).output
assert default_output == endpoint_output
self.expect_order(
["static", "yyy_get_post", "aaa_post"],
invoke(["routes", "-s", "methods"]).output,
)
self.expect_order(
["yyy_get_post", "static", "aaa_post"],
invoke(["routes", "-s", "rule"]).output,
)
match_order = [r.endpoint for r in app.url_map.iter_rules()]
self.expect_order(match_order, invoke(["routes", "-s", "match"]).output)
def test_all_methods(self, invoke):
output = invoke(["routes"]).output
assert "GET, HEAD, OPTIONS, POST" not in output
output = invoke(["routes", "--all-methods"]).output
assert "GET, HEAD, OPTIONS, POST" in output
def test_no_routes(self, runner):
app = Flask(__name__, static_folder=None)
cli = FlaskGroup(create_app=lambda: app)
result = runner.invoke(cli, ["routes"])
assert result.exit_code == 0
assert "No routes were registered." in result.output
def test_subdomain(self, runner):
app = Flask(__name__, static_folder=None)
app.add_url_rule("/a", subdomain="a", endpoint="a")
app.add_url_rule("/b", subdomain="b", endpoint="b")
cli = FlaskGroup(create_app=lambda: app)
result = runner.invoke(cli, ["routes"])
assert result.exit_code == 0
assert "Subdomain" in result.output
def test_host(self, runner):
app = Flask(__name__, static_folder=None, host_matching=True)
app.add_url_rule("/a", host="a", endpoint="a")
app.add_url_rule("/b", host="b", endpoint="b")
cli = FlaskGroup(create_app=lambda: app)
result = runner.invoke(cli, ["routes"])
assert result.exit_code == 0
assert "Host" in result.output
def dotenv_not_available():
try:
import dotenv # noqa: F401
except ImportError:
return True
return False
need_dotenv = pytest.mark.skipif(
dotenv_not_available(), reason="dotenv is not installed"
)
@need_dotenv
def test_load_dotenv(monkeypatch):
# can't use monkeypatch.delitem since the keys don't exist yet
for item in ("FOO", "BAR", "SPAM", "HAM"):
monkeypatch._setitem.append((os.environ, item, notset))
monkeypatch.setenv("EGGS", "3")
monkeypatch.chdir(test_path)
assert load_dotenv()
assert Path.cwd() == test_path
# .flaskenv doesn't overwrite .env
assert os.environ["FOO"] == "env"
# set only in .flaskenv
assert os.environ["BAR"] == "bar"
# set only in .env
assert os.environ["SPAM"] == "1"
# set manually, files don't overwrite
assert os.environ["EGGS"] == "3"
# test env file encoding
assert os.environ["HAM"] == "火腿"
# Non existent file should not load
assert not load_dotenv("non-existent-file", load_defaults=False)
@need_dotenv
def test_dotenv_path(monkeypatch):
for item in ("FOO", "BAR", "EGGS"):
monkeypatch._setitem.append((os.environ, item, notset))
load_dotenv(test_path / ".flaskenv")
assert Path.cwd() == cwd
assert "FOO" in os.environ
def test_dotenv_optional(monkeypatch):
monkeypatch.setitem(sys.modules, "dotenv", None)
monkeypatch.chdir(test_path)
load_dotenv()
assert "FOO" not in os.environ
@need_dotenv
def test_disable_dotenv_from_env(monkeypatch, runner):
monkeypatch.chdir(test_path)
monkeypatch.setitem(os.environ, "FLASK_SKIP_DOTENV", "1")
runner.invoke(FlaskGroup())
assert "FOO" not in os.environ
def test_run_cert_path():
# no key
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", __file__])
# no cert
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--key", __file__])
# cert specified first
ctx = run_command.make_context("run", ["--cert", __file__, "--key", __file__])
assert ctx.params["cert"] == (__file__, __file__)
# key specified first
ctx = run_command.make_context("run", ["--key", __file__, "--cert", __file__])
assert ctx.params["cert"] == (__file__, __file__)
def test_run_cert_adhoc(monkeypatch):
monkeypatch.setitem(sys.modules, "cryptography", None)
# cryptography not installed
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "adhoc"])
# cryptography installed
monkeypatch.setitem(sys.modules, "cryptography", types.ModuleType("cryptography"))
ctx = run_command.make_context("run", ["--cert", "adhoc"])
assert ctx.params["cert"] == "adhoc"
# no key with adhoc
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "adhoc", "--key", __file__])
def test_run_cert_import(monkeypatch):
monkeypatch.setitem(sys.modules, "not_here", None)
# ImportError
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "not_here"])
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "flask"])
# SSLContext
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
monkeypatch.setitem(sys.modules, "ssl_context", ssl_context)
ctx = run_command.make_context("run", ["--cert", "ssl_context"])
assert ctx.params["cert"] is ssl_context
# no --key with SSLContext
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "ssl_context", "--key", __file__])
def test_run_cert_no_ssl(monkeypatch):
monkeypatch.setitem(sys.modules, "ssl", None)
with pytest.raises(click.BadParameter):
run_command.make_context("run", ["--cert", "not_here"])
def test_cli_blueprints(app):
"""Test blueprint commands register correctly to the application"""
custom = Blueprint("custom", __name__, cli_group="customized")
nested = Blueprint("nested", __name__)
merged = Blueprint("merged", __name__, cli_group=None)
late = Blueprint("late", __name__)
@custom.cli.command("custom")
def custom_command():
click.echo("custom_result")
@nested.cli.command("nested")
def nested_command():
click.echo("nested_result")
@merged.cli.command("merged")
def merged_command():
click.echo("merged_result")
@late.cli.command("late")
def late_command():
click.echo("late_result")
app.register_blueprint(custom)
app.register_blueprint(nested)
app.register_blueprint(merged)
app.register_blueprint(late, cli_group="late_registration")
app_runner = app.test_cli_runner()
result = app_runner.invoke(args=["customized", "custom"])
assert "custom_result" in result.output
result = app_runner.invoke(args=["nested", "nested"])
assert "nested_result" in result.output
result = app_runner.invoke(args=["merged"])
assert "merged_result" in result.output
result = app_runner.invoke(args=["late_registration", "late"])
assert "late_result" in result.output
def test_cli_empty(app):
"""If a Blueprint's CLI group is empty, do not register it."""
bp = Blueprint("blue", __name__, cli_group="blue")
app.register_blueprint(bp)
result = app.test_cli_runner().invoke(args=["blue", "--help"])
assert result.exit_code == 2, f"Unexpected success:\n\n{result.output}"
def test_run_exclude_patterns():
ctx = run_command.make_context("run", ["--exclude-patterns", __file__])
assert ctx.params["exclude_patterns"] == [__file__]
|
TestRoutes
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
|
{
"start": 11573,
"end": 15472
}
|
class ____(nn.Module):
def __init__(self, config: Phi4MultimodalVisionConfig):
super().__init__()
self.config = config
self.patch_size = config.patch_size
self.num_patches_per_side = config.image_size // self.patch_size
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=config.hidden_size,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
)
self.position_embedding = nn.Embedding(self.num_patches_per_side**2, config.hidden_size)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and no class embeddings.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embedding.weight.shape[0]
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
patch_pos_embed = self.position_embedding.weight.unsqueeze(0)
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
batch_size = pixel_values.size(0)
patch_embeds = self.patch_embedding(pixel_values)
embeddings = patch_embeds.flatten(2).transpose(1, 2)
max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
position_ids = torch.full((batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0)
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
nb_patches_h = p_attn_mask[:, 0].sum()
nb_patches_w = p_attn_mask[0].sum()
fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
position_ids = position_ids.to(self.position_embedding.weight.device)
embeddings = embeddings + self.position_embedding(position_ids)
return embeddings
|
Phi4MultimodalVisionEmbeddings
|
python
|
python-pillow__Pillow
|
Tests/helper.py
|
{
"start": 6641,
"end": 10124
}
|
class ____:
# requires unix/macOS
iterations = 100 # count
mem_limit = 512 # k
def _get_mem_usage(self) -> float:
"""
Gets the RUSAGE memory usage, returns in K. Encapsulates the difference
between macOS and Linux rss reporting
:returns: memory usage in kilobytes
"""
from resource import RUSAGE_SELF, getrusage
mem = getrusage(RUSAGE_SELF).ru_maxrss
# man 2 getrusage:
# ru_maxrss
# This is the maximum resident set size utilized
# in bytes on macOS, in kilobytes on Linux
return mem / 1024 if sys.platform == "darwin" else mem
def _test_leak(self, core: Callable[[], None]) -> None:
start_mem = self._get_mem_usage()
for cycle in range(self.iterations):
core()
mem = self._get_mem_usage() - start_mem
msg = f"memory usage limit exceeded in iteration {cycle}"
assert mem < self.mem_limit, msg
# helpers
def fromstring(data: bytes) -> ImageFile.ImageFile:
return Image.open(BytesIO(data))
def tostring(im: Image.Image, string_format: str, **options: Any) -> bytes:
out = BytesIO()
im.save(out, string_format, **options)
return out.getvalue()
def hopper(mode: str | None = None) -> Image.Image:
# Use caching to reduce reading from disk, but return a copy
# so that the cached image isn't modified by the tests
# (for fast, isolated, repeatable tests).
if mode is None:
# Always return fresh not-yet-loaded version of image.
# Operations on not-yet-loaded images are a separate class of errors
# that we should catch.
return Image.open("Tests/images/hopper.ppm")
return _cached_hopper(mode).copy()
@lru_cache
def _cached_hopper(mode: str) -> Image.Image:
if mode == "F":
im = hopper("L")
else:
im = hopper()
try:
im = im.convert(mode)
except ImportError:
if mode == "LAB":
im = Image.open("Tests/images/hopper.Lab.tif")
else:
raise
return im
def djpeg_available() -> bool:
if shutil.which("djpeg"):
try:
subprocess.check_call(["djpeg", "-version"])
return True
except subprocess.CalledProcessError: # pragma: no cover
return False
return False
def netpbm_available() -> bool:
return bool(shutil.which("ppmquant") and shutil.which("ppmtogif"))
def magick_command() -> list[str] | None:
if sys.platform == "win32":
magickhome = os.environ.get("MAGICK_HOME")
if magickhome:
imagemagick = [os.path.join(magickhome, "convert.exe")]
graphicsmagick = [os.path.join(magickhome, "gm.exe"), "convert"]
else:
imagemagick = None
graphicsmagick = None
else:
imagemagick = ["convert"]
graphicsmagick = ["gm", "convert"]
if imagemagick and shutil.which(imagemagick[0]):
return imagemagick
if graphicsmagick and shutil.which(graphicsmagick[0]):
return graphicsmagick
return None
def on_ci() -> bool:
return "CI" in os.environ
def is_big_endian() -> bool:
return sys.byteorder == "big"
def is_ppc64le() -> bool:
import platform
return platform.machine() == "ppc64le"
def is_win32() -> bool:
return sys.platform.startswith("win32")
def is_pypy() -> bool:
return hasattr(sys, "pypy_translation_info")
|
PillowLeakTestCase
|
python
|
Textualize__textual
|
src/textual/widgets/_directory_tree.py
|
{
"start": 624,
"end": 859
}
|
class ____:
"""Attaches directory information to a [`DirectoryTree`][textual.widgets.DirectoryTree] node."""
path: Path
"""The path of the directory entry."""
loaded: bool = False
"""Has this been loaded?"""
|
DirEntry
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_joins.py
|
{
"start": 1564,
"end": 7289
}
|
class ____(InheritedTest, AssertsCompiledSQL):
def test_single_prop(self):
Company = self.classes.Company
sess = fixture_session()
self.assert_compile(
sess.query(Company).join(Company.employees),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN people "
"ON companies.company_id = people.company_id",
use_default_dialect=True,
)
def test_force_via_select_from(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = fixture_session()
self.assert_compile(
sess.query(Company)
.filter(Company.company_id == Engineer.company_id)
.filter(Engineer.primary_language == "java"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies, people, engineers "
"WHERE companies.company_id = people.company_id "
"AND engineers.primary_language "
"= :primary_language_1",
use_default_dialect=True,
)
self.assert_compile(
sess.query(Company)
.select_from(Company, Engineer)
.filter(Company.company_id == Engineer.company_id)
.filter(Engineer.primary_language == "java"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies, people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE companies.company_id = people.company_id "
"AND engineers.primary_language ="
" :primary_language_1",
use_default_dialect=True,
)
def test_single_prop_of_type(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = fixture_session()
self.assert_compile(
sess.query(Company).join(Company.employees.of_type(Engineer)),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN "
"(people JOIN engineers "
"ON people.person_id = engineers.person_id) "
"ON companies.company_id = people.company_id",
use_default_dialect=True,
)
def test_explicit_polymorphic_join_one(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = fixture_session()
self.assert_compile(
sess.query(Company)
.join(Engineer)
.filter(Engineer.engineer_name == "vlad"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people JOIN engineers "
"ON people.person_id = engineers.person_id) "
"ON "
"companies.company_id = people.company_id "
"WHERE engineers.engineer_name = :engineer_name_1",
use_default_dialect=True,
)
def test_explicit_polymorphic_join_two(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = fixture_session()
self.assert_compile(
sess.query(Company)
.join(Engineer, Company.company_id == Engineer.company_id)
.filter(Engineer.engineer_name == "vlad"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN "
"(people JOIN engineers "
"ON people.person_id = engineers.person_id) "
"ON "
"companies.company_id = people.company_id "
"WHERE engineers.engineer_name = :engineer_name_1",
use_default_dialect=True,
)
def test_auto_aliasing_multi_link(self):
# test [ticket:2903]
sess = fixture_session()
Company, Engineer, Manager, Boss = (
self.classes.Company,
self.classes.Engineer,
self.classes.Manager,
self.classes.Boss,
)
q = (
sess.query(Company)
.join(Company.employees.of_type(Engineer))
.join(Company.employees.of_type(Manager))
.join(Company.employees.of_type(Boss))
)
with testing.expect_warnings(
"An alias is being generated automatically against joined entity "
r"Mapper\[Manager\(managers\)\] due to overlapping",
"An alias is being generated automatically against joined entity "
r"Mapper\[Boss\(boss\)\] due to overlapping",
):
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"JOIN (people JOIN engineers "
"ON people.person_id = engineers.person_id) "
"ON companies.company_id = people.company_id "
"JOIN (people AS people_1 JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies.company_id = people_1.company_id "
"JOIN (people AS people_2 JOIN managers AS managers_2 "
"ON people_2.person_id = managers_2.person_id "
"JOIN boss AS boss_1 "
"ON managers_2.person_id = boss_1.boss_id) "
"ON companies.company_id = people_2.company_id",
use_default_dialect=True,
)
|
InheritedJoinTest
|
python
|
tensorflow__tensorflow
|
tensorflow/cc/saved_model/testdata/generate_saved_models.py
|
{
"start": 2098,
"end": 2338
}
|
class ____(module.Module):
def __init__(self, parent):
super(ReferencesParent, self).__init__()
self.parent = parent
self.my_variable = variables.Variable(3., name="MyVariable")
# Creates a cyclic object graph.
|
ReferencesParent
|
python
|
google__jax
|
jax/_src/named_sharding.py
|
{
"start": 18274,
"end": 21497
}
|
class ____(Exception):
def __init__(self, message, mesh, pspec):
super().__init__(message)
self.message = message
self.mesh = mesh
self.pspec = pspec
def __str__(self):
return f"{self.message}"
def _check_unique_resources(pspec: PartitionSpec, arg_name: str, mesh=None
) -> None:
resource_counts: dict[MeshAxisName, int] = {}
duplicate = False
for d in pspec:
if d is PartitionSpec.UNCONSTRAINED or d is None:
continue
d = d if isinstance(d, tuple) else (d,)
for resource in d:
count = resource_counts.get(resource, 0)
if count > 0:
duplicate = True
resource_counts[resource] = count + 1
if duplicate:
multiple_uses = [r for r, c in resource_counts.items() if c > 1]
raise DuplicateSpecError(
message=(
f'A single {arg_name} specification can map every mesh axis to at'
f' most one positional dimension, but {pspec} has duplicate entries'
f' for {mesh_lib.show_axes(multiple_uses)}'),
mesh=mesh, pspec=pspec)
def check_pspec_mix_axis_type(mesh, pspec):
for spec in pspec:
if isinstance(spec, tuple):
if all(mesh._name_to_type[spec[0]] == mesh._name_to_type[p]
for p in spec):
continue
if any(mesh._name_to_type[p] == AxisType.Manual for p in spec):
raise ValueError(
'Tuple subset of `PartitionSpec` cannot contain `Manual` mixed'
f' with `Auto` or `Explicit`. Got pspec {pspec} and subset'
f' {spec} with axis types:'
f' ({", ".join(str(mesh._name_to_type[p]) for p in spec)})')
def _check_mesh_resource_axis(mesh, pspec):
for p in pspec:
if p is PartitionSpec.UNCONSTRAINED or p is None:
continue
p = p if isinstance(p, tuple) else (p,)
for r in p:
if r not in mesh.axis_names:
raise ValueError(
f"Resource axis: {r} of {pspec} "
f"is not found in mesh: {tuple(mesh.shape.keys())}.")
check_pspec_mix_axis_type(mesh, pspec)
if (AxisType.Auto not in mesh.axis_types and
PartitionSpec.UNCONSTRAINED in pspec):
raise ValueError(
f'{pspec} cannot contain'
' `P.UNCONSTRAINED` when no mesh axis_types are `Auto`. Got mesh'
f' axis_types: {mesh.axis_types}')
def _check_mesh_unreduced(mesh, pspec):
for u in pspec.unreduced:
if u not in mesh.axis_names:
raise ValueError(
f'Unreduced axes {u} is not found in {mesh.axis_names=}. '
f'Got {pspec=}')
if mesh._name_to_type[u] == AxisType.Auto:
raise ValueError(
'Unreduced axes can only refer to mesh axes that are of type'
f' `Explicit` or `Manual`. Got unreduced axes: {pspec.unreduced} and'
f' mesh: {mesh}')
for u in pspec.reduced:
if u not in mesh.axis_names:
raise ValueError(
f'Reduced axes {u} is not found in {mesh.axis_names=}. '
f'Got {pspec=}')
if mesh._name_to_type[u] == AxisType.Auto:
raise ValueError(
'Reduced axes can only refer to mesh axes that are of type'
f' `Explicit` or `Manual`. Got reduced axes: {pspec.reduced} and'
f' mesh: {mesh}')
|
DuplicateSpecError
|
python
|
huggingface__transformers
|
tests/models/dinat/test_modeling_dinat.py
|
{
"start": 1452,
"end": 7092
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
patch_size=4,
num_channels=3,
embed_dim=16,
depths=[1, 2, 1],
num_heads=[2, 4, 8],
kernel_size=3,
dilations=[[3], [1, 2], [1]],
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=True,
num_labels=10,
out_features=["stage1", "stage2"],
out_indices=[1, 2],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.kernel_size = kernel_size
self.dilations = dilations
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.num_labels = num_labels
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DinatConfig(
num_labels=self.num_labels,
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
depths=self.depths,
num_heads=self.num_heads,
kernel_size=self.kernel_size,
dilations=self.dilations,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
patch_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = DinatModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = expected_width = (config.image_size // config.patch_size) // (2 ** (len(config.depths) - 1))
expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, expected_height, expected_width, expected_dim)
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
model = DinatForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
# test greyscale images
config.num_channels = 1
model = DinatForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_backbone(self, config, pixel_values, labels):
model = DinatBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = DinatBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_natten
@require_torch
|
DinatModelTester
|
python
|
PrefectHQ__prefect
|
tests/runner/test_storage.py
|
{
"start": 36170,
"end": 40799
}
|
class ____:
@pytest.fixture
async def test_block(self):
class FakeStorageBlock(Block):
_block_type_slug = "fake-storage-block"
code: str = dedent(
"""\
from prefect import flow
@flow
def test_flow():
return 1
"""
)
async def get_directory(self, local_path: str):
(Path(local_path) / "flows.py").write_text(self.code)
return FakeStorageBlock()
async def test_init_with_not_a_block(self):
class NotABlock:
looks_around = "nervously"
with pytest.raises(
TypeError, match="Expected a block object. Received a 'NotABlock' object."
):
BlockStorageAdapter(block=NotABlock())
async def test_init_with_wrong_type_of_block(self):
class NotAStorageBlock(Block):
_block_type_slug = "not-a-storage-block"
with pytest.raises(
ValueError,
match="Provided block must have a `get_directory` method.",
):
BlockStorageAdapter(block=NotAStorageBlock())
async def test_pull_code(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block)
try:
await storage.pull_code()
assert (storage.destination / "flows.py").read_text() == test_block.code
finally:
if storage.destination.exists():
shutil.rmtree(storage.destination)
async def test_to_pull_step(self, test_block: Block):
await test_block.save("test-block")
storage = BlockStorageAdapter(block=test_block)
pull_step = storage.to_pull_step()
assert pull_step == {
"prefect.deployments.steps.pull_with_block": {
"block_document_name": "test-block",
"block_type_slug": "fake-storage-block",
}
}
try:
# test pull step runs
output = await run_step(pull_step)
assert (
Path(output["directory"]) / "flows.py"
).read_text() == test_block.code
finally:
if "output" in locals() and "directory" in output:
shutil.rmtree(f"{output['directory']}")
async def test_to_pull_step_with_unsaved_block(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block)
with pytest.raises(
BlockNotSavedError,
match=re.escape(
"Block must be saved with `.save()` before it can be converted to a"
" pull step."
),
):
storage.to_pull_step()
async def test_set_base_path(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block)
new_path = Path("/new/path")
storage.set_base_path(new_path)
assert storage._storage_base_path == new_path
def test_pull_interval_property(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block, pull_interval=120)
assert storage.pull_interval == 120
async def test_destination_property(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block)
assert storage.destination == Path.cwd() / storage._name
async def test_pull_code_existing_destination(self, test_block: Block):
try:
storage = BlockStorageAdapter(block=test_block)
storage.destination.mkdir(
parents=True, exist_ok=True
) # Ensure the destination exists
await storage.pull_code()
assert (storage.destination / "flows.py").read_text() == test_block.code
finally:
if storage.destination.exists():
shutil.rmtree(storage.destination)
async def test_eq_method_same_block(self, test_block: Block):
storage1 = BlockStorageAdapter(block=test_block)
storage2 = BlockStorageAdapter(block=test_block)
assert storage1 == storage2
async def test_eq_method_different_block(self, test_block: Block):
class FakeDeploymentStorage(ReadableDeploymentStorage):
def get_directory(self, *args, **kwargs):
pass
storage1 = BlockStorageAdapter(block=test_block)
storage2 = BlockStorageAdapter(block=FakeDeploymentStorage())
assert storage1 != storage2
async def test_eq_method_different_type(self, test_block: Block):
storage = BlockStorageAdapter(block=test_block)
assert storage != "NotABlockStorageAdapter"
|
TestBlockStorageAdapter
|
python
|
cython__cython
|
Cython/Compiler/TreePath.py
|
{
"start": 6628,
"end": 7960
}
|
class ____:
def __init__(self, path):
self._tokens = [
(special, text)
for (special, text) in path_tokenizer(path)
if special or text
]
self._tokens.reverse() # allow efficient .pop()
def peek(self, default=(None, None)):
return self._tokens[-1] if self._tokens else default
def __call__(self):
try:
return self._tokens.pop()
except IndexError:
raise StopIteration from None
def _build_path_iterator(path):
# parse pattern
_next = _LookAheadTokenizer(path)
token = _next()
selector = []
while 1:
try:
selector.append(operations[token[0]](_next, token))
except StopIteration:
raise ValueError("invalid path")
try:
token = _next()
if token[0] == "/":
token = _next()
except StopIteration:
break
return selector
# main module API
def iterfind(node, path):
selector_chain = _build_path_iterator(path)
result = iter((node,))
for select in selector_chain:
result = select(result)
return result
def find_first(node, path):
return _get_first_or_none(iterfind(node, path))
def find_all(node, path):
return list(iterfind(node, path))
|
_LookAheadTokenizer
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/queues.py
|
{
"start": 18824,
"end": 20049
}
|
class ____(Response):
"""
Response of queues.add_or_update_metadata endpoint.
:param updated: Number of queues updated (0 or 1)
:type updated: int
"""
_service = "queues"
_action = "add_or_update_metadata"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of queues updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(AddOrUpdateMetadataResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
AddOrUpdateMetadataResponse
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/rules/helpers/latest_releases.py
|
{
"start": 4442,
"end": 11775
}
|
class ____:
"""
Class responsible of hiding the complexity of handling boosted releases in the Redis hash. In addition, it provides
all the logic to handle an upper bound in the number of boosted releases that can be simultaneously be added to
a specific project.
"""
# Limit of boosted releases per project.
BOOSTED_RELEASES_HASH_EXPIRATION = 60 * 60 * 1000
def __init__(self, project_id: int):
self.redis_client = get_redis_client_for_ds()
self.project_id = project_id
self.project_platform = _get_project_platform(self.project_id)
@property
def has_boosted_releases(self) -> bool:
"""
Checks whether a specific project has boosted releases.
"""
cache_key = self._generate_cache_key_for_boosted_releases_hash()
return bool(self.redis_client.exists(cache_key) == 1)
def add_boosted_release(self, release_id: int, environment: str | None) -> None:
"""
Adds a release to the boosted releases hash with the boosting timestamp set to the current time, signaling that
the boosts starts now.
"""
self._remove_lrb_if_limit_is_reached()
cache_key = self._generate_cache_key_for_boosted_releases_hash()
self.redis_client.hset(
cache_key,
self._generate_cache_key_for_boosted_release(release_id, environment),
datetime.now(timezone.utc).timestamp(),
)
# In order to avoid having the boosted releases hash in memory for an indefinite amount of time, we will expire
# it after a specific timeout.
self.redis_client.pexpire(cache_key, self.BOOSTED_RELEASES_HASH_EXPIRATION)
def get_extended_boosted_releases(self) -> list[ExtendedBoostedRelease]:
"""
Returns a list of boosted releases augmented with additional information such as release version and platform.
In addition, this function performs the cleanup of expired boosted releases.
"""
# We read all boosted releases and we augment them in two separate loops in order to perform a single query
# to fetch all the release models. This optimization avoids peforming a query for each release.
active, expired = self._get_boosted_releases().to_extended_boosted_releases(self.project_id)
# We delete all the expired releases.
if expired:
self.redis_client.hdel(self._generate_cache_key_for_boosted_releases_hash(), *expired)
# We return the active extended boosted releases.
return active
def _get_boosted_releases(self) -> BoostedReleases:
"""
Returns all the boosted releases and parses them based on key and value data.
This method should not be called directly as the boosted releases are not extended, thus they contain only a
subset of information.
"""
boosted_releases = BoostedReleases()
for boosted_release_cache_key, timestamp in self.redis_client.hgetall(
self._generate_cache_key_for_boosted_releases_hash()
).items():
extracted_data = self._extract_data_from_cache_key(boosted_release_cache_key)
if extracted_data:
release_id, environment = extracted_data
boosted_releases.add_release(
cache_key=boosted_release_cache_key,
id=release_id,
timestamp=float(timestamp),
environment=environment,
)
return boosted_releases
def _remove_lrb_if_limit_is_reached(self) -> None:
"""
Removes all the expired releases and also the least recently boosted release in case the limit of boosted
releases is reached.
For efficiency reasons, this function performs two things simultaneously:
1. It counts the number of active releases and keeps track of expired releases for deletion
2. It finds the least recently boosted active release to remove in case the limit of boosted release is reached
"""
cache_key = self._generate_cache_key_for_boosted_releases_hash()
boosted_releases = self.redis_client.hgetall(cache_key)
current_timestamp = datetime.now(timezone.utc).timestamp()
LRBRelease = namedtuple("LRBRelease", ["key", "timestamp"])
lrb_release = None
active_releases = 0
keys_to_delete = []
for boosted_release_key, ts in boosted_releases.items():
timestamp = float(ts)
# For efficiency reasons we don't parse the release and extend it with information, therefore we have to
# check timestamps in the following way.
if current_timestamp <= timestamp + self.project_platform.time_to_adoption:
# With this logic we want to find the boosted release with the lowest timestamp, if multiple releases
# have the same timestamp we are going to take the first one in the hash.
#
# We run this logic while counting the number of active release so that we can remove the lrb release
# in O(1) in case the number of active releases is >= the limit.
if lrb_release is None or timestamp < lrb_release.timestamp:
lrb_release = LRBRelease(key=boosted_release_key, timestamp=timestamp)
# We count this release because it is an active release.
active_releases += 1
else:
keys_to_delete.append(boosted_release_key)
# We delete the least recently boosted release if we have surpassed the limit of elements in the hash.
if active_releases >= BOOSTED_RELEASES_LIMIT and lrb_release:
keys_to_delete.append(lrb_release.key)
# If we have some keys to remove from redis we are going to remove them in batch for efficiency.
if keys_to_delete:
self.redis_client.hdel(cache_key, *keys_to_delete)
def _generate_cache_key_for_boosted_releases_hash(self) -> str:
return f"ds::p:{self.project_id}:boosted_releases"
@staticmethod
def _generate_cache_key_for_boosted_release(release_id: int, environment: str | None) -> str:
return f"ds::r:{release_id}{_get_environment_cache_key(environment)}"
@staticmethod
def _extract_data_from_cache_key(
cache_key: str,
) -> tuple[int, str | None] | None:
"""
Extracts the release id and the environment from the cache key, in order to avoid storing the metadata also
in the value field.
"""
if (match := BOOSTED_RELEASE_CACHE_KEY_REGEX.match(cache_key)) is not None:
# If the cache key matches the new format, we will extract the necessary information.
release_id = match["release_id"]
environment = match["environment"]
return int(release_id), environment
# If the cache key doesn't match the new format, we will fallback to the old format which is just an integer.
try:
release_id = int(cache_key)
except ValueError:
# If the format is not an integer we will silently return None.
return None
else:
return release_id, None
@dataclass(frozen=True)
|
ProjectBoostedReleases
|
python
|
pola-rs__polars
|
py-polars/src/polars/datatypes/classes.py
|
{
"start": 9614,
"end": 9686
}
|
class ____(SignedIntegerType):
"""64-bit signed integer type."""
|
Int64
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/tests/test_pprint.py
|
{
"start": 1627,
"end": 1884
}
|
class ____(BaseEstimator):
def __init__(self, estimator, n_features_to_select=None, step=1, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
|
RFE
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_deprecations.py
|
{
"start": 17550,
"end": 18039
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@testing.fixture
def table_fixture(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
return table1, table2
|
FutureSelectTest
|
python
|
PyCQA__pylint
|
tests/functional/t/try_except_raise.py
|
{
"start": 544,
"end": 1439
}
|
class ____(AAAException):
"""BBBException"""
pass
def ccc():
"""try-except-raise test function"""
try:
raise BBBException("asdf")
except BBBException:
raise
except AAAException:
raise BBBException("raised from AAAException")
def ddd():
"""try-except-raise test function"""
try:
raise BBBException("asdf")
except AAAException:
raise BBBException("raised from AAAException")
except: # [try-except-raise]
raise
try:
pass
except RuntimeError:
raise
except:
print("a failure")
try:
pass
except:
print("a failure")
except RuntimeError: # [try-except-raise]
raise
try:
pass
except: # [try-except-raise]
raise
except RuntimeError:
print("a failure")
try:
pass
except (FileNotFoundError, PermissionError):
raise
except OSError:
print("a failure")
|
BBBException
|
python
|
getsentry__sentry-python
|
sentry_sdk/profiler/continuous_profiler.py
|
{
"start": 20175,
"end": 23066
}
|
class ____:
def __init__(self):
# type: () -> None
self.chunk_id = uuid.uuid4().hex
self.indexed_frames = {} # type: Dict[FrameId, int]
self.indexed_stacks = {} # type: Dict[StackId, int]
self.frames = [] # type: List[ProcessedFrame]
self.stacks = [] # type: List[ProcessedStack]
self.samples = [] # type: List[ProcessedSample]
def write(self, ts, sample):
# type: (float, ExtractedSample) -> None
for tid, (stack_id, frame_ids, frames) in sample:
try:
# Check if the stack is indexed first, this lets us skip
# indexing frames if it's not necessary
if stack_id not in self.indexed_stacks:
for i, frame_id in enumerate(frame_ids):
if frame_id not in self.indexed_frames:
self.indexed_frames[frame_id] = len(self.indexed_frames)
self.frames.append(frames[i])
self.indexed_stacks[stack_id] = len(self.indexed_stacks)
self.stacks.append(
[self.indexed_frames[frame_id] for frame_id in frame_ids]
)
self.samples.append(
{
"timestamp": ts,
"thread_id": tid,
"stack_id": self.indexed_stacks[stack_id],
}
)
except AttributeError:
# For some reason, the frame we get doesn't have certain attributes.
# When this happens, we abandon the current sample as it's bad.
capture_internal_exception(sys.exc_info())
def to_json(self, profiler_id, options, sdk_info):
# type: (str, Dict[str, Any], SDKInfo) -> Dict[str, Any]
profile = {
"frames": self.frames,
"stacks": self.stacks,
"samples": self.samples,
"thread_metadata": {
str(thread.ident): {
"name": str(thread.name),
}
for thread in threading.enumerate()
},
}
set_in_app_in_frames(
profile["frames"],
options["in_app_exclude"],
options["in_app_include"],
options["project_root"],
)
payload = {
"chunk_id": self.chunk_id,
"client_sdk": {
"name": sdk_info["name"],
"version": VERSION,
},
"platform": "python",
"profile": profile,
"profiler_id": profiler_id,
"version": "2",
}
for key in "release", "environment", "dist":
if options[key] is not None:
payload[key] = str(options[key]).strip()
return payload
|
ProfileChunk
|
python
|
urllib3__urllib3
|
src/urllib3/exceptions.py
|
{
"start": 3271,
"end": 3369
}
|
class ____(HTTPError):
"""Raised when passing an invalid state to a timeout"""
|
TimeoutStateError
|
python
|
scikit-learn__scikit-learn
|
sklearn/preprocessing/_data.py
|
{
"start": 88255,
"end": 111426
}
|
class ____(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
For example visualizations, refer to :ref:`Compare QuantileTransformer with
other scalers <plot_all_scaling_quantile_transformer_section>`.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.19
Parameters
----------
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int or None, default=10_000
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
Disable subsampling by setting `subsample=None`.
.. versionadded:: 1.5
The option `None` to disable subsampling was added.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
n_quantiles_ : int
The actual number of quantiles used to discretize the cumulative
distribution function.
quantiles_ : ndarray of shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray of shape (n_quantiles, )
Quantiles of references.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X)
array([...])
"""
_parameter_constraints: dict = {
"n_quantiles": [Interval(Integral, 1, None, closed="left")],
"output_distribution": [StrOptions({"uniform", "normal"})],
"ignore_implicit_zeros": ["boolean"],
"subsample": [Interval(Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
"copy": ["boolean"],
}
def __init__(
self,
*,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=10_000,
random_state=None,
copy=True,
):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn(
"'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect."
)
n_samples, n_features = X.shape
references = self.references_ * 100
if self.subsample is not None and self.subsample < n_samples:
# Take a subsample of `X`
X = resample(
X, replace=False, n_samples=self.subsample, random_state=random_state
)
self.quantiles_ = np.nanpercentile(X, references, axis=0)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative. If a sparse matrix is provided,
it will be converted into a sparse ``csc_matrix``.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]]
if self.subsample is not None and len(column_nnz_data) > self.subsample:
column_subsample = self.subsample * len(column_nnz_data) // n_samples
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample, dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False
)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(np.nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
if self.subsample is not None and self.n_quantiles > self.subsample:
raise ValueError(
"The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles, self.subsample)
)
X = self._check_inputs(X, in_fit=True, copy=False)
n_samples = X.shape[0]
if self.n_quantiles > n_samples:
warnings.warn(
"n_quantiles (%s) is greater than the total number "
"of samples (%s). n_quantiles is set to "
"n_samples." % (self.n_quantiles, n_samples)
)
self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature."""
output_distribution = self.output_distribution
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform distribution
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.cdf(X_col)
# else output distribution is already a uniform distribution
# find index for lower and higher bounds
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x
upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x
if output_distribution == "uniform":
lower_bounds_idx = X_col == lower_bound_x
upper_bounds_idx = X_col == upper_bound_x
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = 0.5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1])
)
else:
X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output distribution
if not inverse:
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))
clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
# else output distribution is uniform and the ppf is the
# identity function so we let X_col unchanged
return X_col
def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False):
"""Check inputs before fit and transform."""
X = validate_data(
self,
X,
reset=in_fit,
accept_sparse="csc",
copy=copy,
dtype=FLOAT_DTYPES,
# only set force_writeable for the validation at transform time because
# it's the only place where QuantileTransformer performs inplace operations.
force_writeable=True if not in_fit else None,
ensure_all_finite="allow-nan",
)
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if (
not accept_sparse_negative
and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))
):
raise ValueError(
"QuantileTransformer only accepts non-negative sparse matrices."
)
return X
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, default=False
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray of shape (n_samples, n_features)
Projected data.
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx], inverse
)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx], inverse
)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(X, in_fit=False, copy=self.copy)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
X_original : {ndarray, sparse matrix} of (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(
X, in_fit=False, accept_sparse_negative=True, copy=self.copy
)
return self._transform(X, inverse=True)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.input_tags.allow_nan = True
return tags
@validate_params(
{"X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})]},
prefer_skip_nested_validation=False,
)
def quantile_transform(
X,
*,
axis=0,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True,
):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to transform.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int or None, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
Disable subsampling by setting `subsample=None`.
.. versionadded:: 1.5
The option `None` to disable subsampling was added.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
copy : bool, default=True
If False, try to avoid a copy and transform in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
.. versionchanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
QuantileTransformer : Performs quantile-based scaling using the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.quantile_transform` unless
you know what you are doing. A common mistake is to apply it
to the entire data *before* splitting into training and
test sets. This will bias the model evaluation because
information would have leaked from the test set to the
training set.
In general, we recommend using
:class:`~sklearn.preprocessing.QuantileTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking:`pipe = make_pipeline(QuantileTransformer(),
LogisticRegression())`.
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
"""
n = QuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy,
)
if axis == 0:
X = n.fit_transform(X)
else: # axis == 1
X = n.fit_transform(X.T).T
return X
|
QuantileTransformer
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/glue.py
|
{
"start": 11590,
"end": 16566
}
|
class ____(AwsBaseSensor[GlueDataQualityHook]):
"""
Waits for an AWS Glue data quality recommendation run to reach any of the status below.
'FAILED', 'STOPPED', 'STOPPING', 'TIMEOUT', 'SUCCEEDED'
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:GlueDataQualityRuleRecommendationRunSensor`
:param recommendation_run_id: The AWS Glue data quality rule recommendation run identifier.
:param show_results: Displays the recommended ruleset (a set of rules), when recommendation run completes. (default: True)
:param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
:param poke_interval: Polling period in seconds to check for the status of the job. (default: 120)
:param max_retries: Number of times before returning the current state. (default: 60)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
SUCCESS_STATES = ("SUCCEEDED",)
FAILURE_STATES = ("FAILED", "STOPPED", "STOPPING", "TIMEOUT")
aws_hook_class = GlueDataQualityHook
template_fields: Sequence[str] = aws_template_fields("recommendation_run_id")
def __init__(
self,
*,
recommendation_run_id: str,
show_results: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poke_interval: int = 120,
max_retries: int = 60,
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.recommendation_run_id = recommendation_run_id
self.show_results = show_results
self.deferrable = deferrable
self.poke_interval = poke_interval
self.max_retries = max_retries
self.aws_conn_id = aws_conn_id
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=GlueDataQualityRuleRecommendationRunCompleteTrigger(
recommendation_run_id=self.recommendation_run_id,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_retries,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
else:
super().execute(context=context)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
message = f"Error: AWS Glue data quality recommendation run: {validated_event}"
raise AirflowException(message)
if self.show_results:
self.hook.log_recommendation_results(run_id=self.recommendation_run_id)
self.log.info("AWS Glue data quality recommendation run completed.")
def poke(self, context: Context) -> bool:
self.log.info(
"Poking for AWS Glue data quality recommendation run RunId: %s", self.recommendation_run_id
)
response = self.hook.conn.get_data_quality_rule_recommendation_run(RunId=self.recommendation_run_id)
status = response.get("Status")
if status in self.SUCCESS_STATES:
if self.show_results:
self.hook.log_recommendation_results(run_id=self.recommendation_run_id)
self.log.info(
"AWS Glue data quality recommendation run completed RunId: %s Run State: %s",
self.recommendation_run_id,
response["Status"],
)
return True
if status in self.FAILURE_STATES:
job_error_message = (
f"Error: AWS Glue data quality recommendation run RunId: {self.recommendation_run_id} Run "
f"Status: {status}"
f": {response.get('ErrorString')}"
)
self.log.info(job_error_message)
raise AirflowException(job_error_message)
return False
|
GlueDataQualityRuleRecommendationRunSensor
|
python
|
walkccc__LeetCode
|
solutions/3082. Find the Sum of the Power of All Subsequences/3082.py
|
{
"start": 0,
"end": 683
}
|
class ____:
def sumOfPower(self, nums: list[int], k: int) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(i: int, j: int) -> int:
"""Returns the number of subsequences in nums[i..n) that sums to j."""
if j == 0:
# For each of the remaining number, we can either pick it or skip it.
return pow(2, len(nums) - i, MOD)
if i == len(nums) or j < 0:
return 0
# 1. Include nums[i] in the subsequence and pick it.
# 2. Include nums[i] in the subsequence and skip it.
# 3. Exclude nums[i] in the subsequence.
return (dp(i + 1, j - nums[i]) + 2 * dp(i + 1, j)) % MOD
return dp(0, k)
|
Solution
|
python
|
scikit-learn__scikit-learn
|
sklearn/externals/_arff.py
|
{
"start": 12097,
"end": 12381
}
|
class ____(ArffException):
'''Error raised when some data instance is in an invalid format.'''
def __init__(self, value):
super().__init__()
self.message = (
'Bad @DATA instance format in line %d: ' +
('%s' % value)
)
|
BadDataFormat
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/skill_create_response.py
|
{
"start": 186,
"end": 1161
}
|
class ____(BaseModel):
id: str
"""Unique identifier for the skill.
The format and length of IDs may change over time.
"""
created_at: str
"""ISO 8601 timestamp of when the skill was created."""
display_title: Optional[str] = None
"""Display title for the skill.
This is a human-readable label that is not included in the prompt sent to the
model.
"""
latest_version: Optional[str] = None
"""The latest version identifier for the skill.
This represents the most recent version of the skill that has been created.
"""
source: str
"""Source of the skill.
This may be one of the following values:
- `"custom"`: the skill was created by a user
- `"anthropic"`: the skill was created by Anthropic
"""
type: str
"""Object type.
For Skills, this is always `"skill"`.
"""
updated_at: str
"""ISO 8601 timestamp of when the skill was last updated."""
|
SkillCreateResponse
|
python
|
gevent__gevent
|
src/gevent/_tracer.py
|
{
"start": 4426,
"end": 4701
}
|
class ____(GreenletTracer):
def __init__(self, hub, max_blocking_time):
GreenletTracer.__init__(self)
self.max_blocking_time = max_blocking_time
self.hub = hub
def kill(self):
self.hub = None
GreenletTracer.kill(self)
|
_HubTracer
|
python
|
google__jax
|
tests/custom_api_test.py
|
{
"start": 1460,
"end": 44725
}
|
class ____(jtu.JaxTestCase):
def test_basic(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
def test_invariance(self):
@jax.custom_jvp
def f(x):
return jnp.cos(2 * x) / 2.
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return (f(x), 3 * g)
f.defjvp(f_jvp)
def f2(x):
y, _ = api.jvp(f, (x,), (x,))
return y
def f3(x):
y, _ = api.jvp(f2, (x,), (x,))
return y
x = 1.
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f2, (x,), (x,)),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f3, (x,), (x,)),
check_dtypes=False)
def test_python_control_flow(self):
@jax.custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
if x > 0:
return f(x), 2 * g
else:
return f(x), 3 * g
f.defjvp(f_jvp)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (-x,), (1.,)),
(jnp.cos(-x), 3.),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
def test_vmap(self):
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
assert jnp.ndim(x) == jnp.ndim(g) == 0
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of jvp of f
self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# jvp of vmap of f
self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# vmap of jvp of vmap of f
self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
def test_jit(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of jvp
self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
# jvp of jit
self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
def test_pytrees(self):
@jax.custom_jvp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
f.defjvp(f_jvp)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.jvp(f, (x,), (x,)),
({'b': jnp.sin(x['a'])},
{'b': 2 * jnp.cos(x['a']) * x['a']}),
check_dtypes=False)
def test_kwargs(self):
# from https://github.com/jax-ml/jax/issues/1938
@jax.custom_jvp
def my_fun(x, y, c=1.):
return c * (x + y)
def my_jvp(primals, tangents):
x, y, c = primals
t_x, t_y, t_c = tangents
return my_fun(x, y, c), t_c
my_fun.defjvp(my_jvp)
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash
def test_initial_style(self):
@jax.custom_jvp
def f(x):
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.jit(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap(self):
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.vmap(api.jit(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.vmap(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@jax.custom_jvp
def f(x):
return lax.psum(x, 'foo')
@f.defjvp
def f_jvp(xs, ts):
x, = xs
t, = ts
return lax.psum(x, 'foo'), t
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
v = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(v, 8.)
def test_closed_over_tracers_error_message(self):
def f(x):
@jax.custom_jvp
def g(y):
return x + y
def g_jvp(primals, tangents):
return g(x), 2 * primals[0]
g.defjvp(g_jvp)
return g(1.)
self.assertRaises(UnexpectedTracerError, lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaises(UnexpectedTracerError, lambda: api.grad(f)(3.))
def test_nondiff_argnums(self):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_jvp(f, primals, tangents):
(x,), (t,) = primals, tangents
return app(f, x), 3 * t
app.defjvp(app_jvp)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))
expected = (2., 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_argnames(self):
@partial(jax.custom_jvp, nondiff_argnames=('f',))
def app(f, x):
return f(x)
def app_jvp(f, primals, tangents):
(x,), (t,) = primals, tangents
return app(f, x), 3 * t
app.defjvp(app_jvp)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_jit_tracer(self):
# This test would pass with "final-style" JIT tracing, but that was
# misleading: it doesn't work with "initial-style" staging, i.e. control
# flow primitives like jax.lax.scan or even pjit. The behavior isn't very
# useful either: instead of using nondiff_argnums here, a user can just pass
# such inputs as ordinary arguments, and ignore the corresponding tangents.
# Then nondiff_argnums can be reserved for (1) non jaxtype data (like a
# string- or callable-valued argument which parameterizes the function or
# rule) or (2) static data (e.g. integers which parameterize shapes).
raise unittest.SkipTest("behavior no longer supported")
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
@jit
def g(x, y):
return f(x, y)
ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))
expected = (6., 5.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_vmap_tracer(self):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
g = jax.vmap(f)
ans = api.jvp(lambda y: g(jnp.array([2.]), y),
(jnp.array([3.]),), (jnp.array([1.]),))
expected = (jnp.array([6.]), jnp.array([5.]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_hiding_jvp_tracer(self):
def f(x):
@partial(jax.custom_jvp, nondiff_argnums=(0,))
def g(h, x):
return h(x)
@g.defjvp
def g_jvp(h, primals, tangents):
x, = primals
t, = tangents
return g(h, x), 2. * t
h = lambda y: x + y # capture x
return g(h, x)
with self.assertRaises(UnexpectedTracerError):
api.jvp(f, (2.,), (1.,))
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_jvp_rule_error_message(self):
@jax.custom_jvp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.jvp(foo, (2.,), (1.,)))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.grad(foo)(2.))
def test_jvp_rule_inconsistent_pytree_structures_error_message(self):
@jax.custom_jvp
def f(x):
return (x**2,)
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), [2 * x * t, x]
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule foo_jvp for function f "
"must produce primal and tangent outputs "
"with equal container (pytree) structures, but got "
"{} and {} respectively.".format(
jax.tree.structure((1,)),
jax.tree.structure([1, 2]))
),
lambda: api.jvp(f, (2.,), (1.,)))
def test_primal_tangent_aval_disagreement_error_message(self):
@jax.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), jnp.reshape(t, (1,))
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with corresponding shapes and dtypes. "
"Expected float32[] (tangent type of float32[]) but got float32[1]."),
lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))
def test_jvp_rule_doesnt_return_pair_error_message(self):
# https://github.com/jax-ml/jax/issues/2516
@jax.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return t
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule foo_jvp for function f "
"must produce a pair (list or tuple of length two) "
"representing primal and tangent outputs, but got 1.0"),
lambda: api.jvp(f, (2.,), (1.,)))
def test_jvp_rule_primal_out_type_doesnt_match_primal_error_message(self):
# https://github.com/lucidrains/flash-attention-jax/issues/7
def scan_apply(f, x):
y, _ = jax.lax.scan(lambda x, _: (f(x), None), x, None, length=1)
return y
@jax.custom_jvp
def f(x):
return x
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return (x, x), (xdot, xdot)
x = jnp.float32(1.)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule f_jvp for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal in value to the output of the "
"custom_jvp-decorated function f, and in particular of the "
"same container/pytree structure), but instead the JVP rule "
"output's first element had container/pytree structure:\n"
" (float32[], float32[])\n"
"while the custom_jvp-decorated function f had output "
"container/pytree structure:\n"
" float32[]."
),
lambda: jax.jvp(lambda x: scan_apply(f, x), (x,), (x,)))
@f.defjvp
def f_jvp2(primals, tangents):
(x,), (xdot,) = primals, tangents
return jnp.zeros((3, *x.shape), x.dtype), xdot
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule f_jvp2 for function f must produce a pair "
"(list or tuple of length two) where the first element represents "
"the primal output (equal in value to the output of the "
"custom_jvp-decorated function f, and in particular "
"with leaves of the same shape/dtype), but instead the JVP rule "
"output's first element had shapes/dtypes of:\n"
" float32[3]\n"
"while the custom_jvp-decorated function f had output shapes/dtypes"
" of:\n"
" float32[]"
),
lambda: jax.jvp(lambda x: scan_apply(f, x), (x,), (x,)))
def test_multiple_rule_invocations(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
def scanned_fun(c, _):
return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def foo(x):
zero = jnp.zeros_like(x)
c, _ = lax.scan(scanned_fun, [x, zero, zero, zero, zero], None, length=10)
return c[-1]
# just make sure these don't crash
foo(3.)
grad(foo)(3.)
grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))
def test_hard_stuff(self):
arr = jnp.ones((5, 2, 2))
api.jit(jax.vmap(jnp.linalg.det))(arr) # doesn't crash
def test_hard_stuff2(self):
@jax.custom_jvp
def f(x):
return np.zeros(x.shape, x.dtype)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), t
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))
def test_hard_stuff3(self):
@jax.custom_jvp
def relu(x):
return jnp.maximum(x, 0)
@relu.defjvp
def _relu_jvp(primals, tangents):
x, = primals
t, = tangents
return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))
def scanned_fun(c, _):
return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def f(x):
zero = jnp.zeros_like(x)
c, _ = lax.scan(scanned_fun, [x, zero, zero, zero, zero], None, length=10)
return c[-1]
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))
def test_eval_shape(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
# don't crash
api.eval_shape(expit, jnp.ones((2, 3)))
api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))
def test_jaxpr_zeros(self):
# from https://github.com/jax-ml/jax/issues/2657
@jax.custom_jvp
def f(A, b):
return A @ b
def f_jvp(primals, tangents):
A, b = primals
dA, db = tangents
z = f(A, b)
dz = A @ db + dA @ b
return z, dz
f.defjvp(f_jvp)
def experiment(theta):
def step(q, _):
z = f(jnp.eye(3), jnp.ones(3) * theta)
q += z[0]
return q, q
q = 0.
q, _ = lax.scan(step, q, None, 4)
return q
grad(experiment)(1.) # doesn't crash
def test_linear_in_scan(self):
@jax.custom_jvp
def f(x):
return -x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
return f(x), f(x_dot)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = -1.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvps_first_rule_is_none(self):
# https://github.com/jax-ml/jax/issues/3389
@jax.custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)
ans = grad(f, 1)(2., 3.) # doesn't crash
expected = 12.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_concurrent_initial_style(self):
# https://github.com/jax-ml/jax/issues/3843
def unroll(param, sequence):
def scan_f(prev_state, inputs):
return prev_state, jax.nn.sigmoid(param * inputs)
return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])
def run():
return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))
expected = run()
# we just don't want this to crash
n_workers = 2
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:
futures = []
for _ in range(n_workers):
futures.append(e.submit(run))
results = [f.result() for f in futures]
for ans in results:
self.assertAllClose(ans, expected)
def test_nondiff_argnums_vmap_tracer(self):
# https://github.com/jax-ml/jax/issues/3964
@partial(jax.custom_jvp, nondiff_argnums=(0, 2))
def sample(shape, param, seed):
return jax.random.uniform(key=seed, shape=shape, minval=param)
@sample.defjvp
def sample_jvp(shape, seed, primals, tangents):
param, = primals
dparam, = tangents
dparam = jnp.broadcast_to(dparam, shape)
samples = sample(shape, param, seed)
return samples, samples * dparam # dummy jvp for proof of concept
# check these don't crash
jax.vmap(lambda seed: sample((2,3), 1., seed))(
jax.random.split(jax.random.key(1), 10))
jax.jvp(lambda x: sample((2, 3), x, jax.random.key(1)),
(1.,), (1.,))
def test_fun_with_nested_calls_2(self):
def call(f, *args):
f = jax.custom_jvp(f)
f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))
return f(*args)
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
return q
return api.jit(baz)(x)
return call(bar, x)
# test these don't crash
self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),
fun_with_nested_calls_2(3.))
api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))
def test_closure_with_vmap(self):
# https://github.com/jax-ml/jax/issues/3822
alpha = np.float32(2.)
def sample(seed):
@jax.custom_jvp
def f(alpha):
return jax.random.gamma(seed, alpha, shape=[])
@f.defjvp
def f_jvp(primal, tangent):
alpha = primal
dalpha = tangent
sample = f(alpha)
partial_alpha = lax.random_gamma_grad(alpha, sample)
return sample, partial_alpha * dalpha
return f(alpha)
api.vmap(sample)(jax.random.split(jax.random.key(1), 3)) # don't crash
def test_closure_with_vmap2(self):
# https://github.com/jax-ml/jax/issues/8783
def h(z):
def f(x):
@jax.custom_jvp
def g(y):
return x * y
# NOTE: rule closes over vmap tracer
@g.defjvp
def g_jvp(primals, tangents):
(y,), (ydot,) = primals, tangents
return x * y, x * ydot
return g(z) # NOTE: no vmapped arg
return jax.vmap(f)(jnp.arange(3., dtype='float32'))
primals, tangents = jax.jvp(h, (jnp.float32(1.),), (jnp.float32(2.),))
self.assertAllClose(primals , jnp.arange(3., dtype='float32'))
self.assertAllClose(tangents, 2 * jnp.arange(3., dtype='float32'))
def test_float0(self):
scalar_float0 = jnp.zeros((), dtype=float0)
@jax.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., jax.custom_derivatives.zero_from_primal(y))
f.defjvp(f_jvp)
primals = (2., 3)
tangents = (np.ones(()), scalar_float0)
expected_tangents = (2., scalar_float0)
self.assertAllClose(api.jvp(f, primals, tangents),
(primals, expected_tangents))
def test_float0_initial_style(self):
scalar_float0 = jnp.zeros((), dtype=float0)
@jax.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., jax.custom_derivatives.zero_from_primal(y))
f.defjvp(f_jvp)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)
return out
primals = (2., 3)
tangents = (np.ones(()), scalar_float0)
expected_tangents = (2., scalar_float0)
self.assertAllClose(api.jvp(foo, primals, tangents),
(primals, expected_tangents))
def test_remat(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
@jax.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@jax.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
def g(x):
return f(f(x))
ans = api.grad(api.grad(new_checkpoint(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(new_checkpoint(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(new_checkpoint(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_2(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.arange(1., 4.)
@jax.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvp_vmap_broadcasting_interaction(self):
# https://github.com/jax-ml/jax/issues/6452
def f2(y, z):
v1 = z
v2 = jnp.sum(y) + z
return jnp.logaddexp(v1, v2)
def f1(y, z):
v = api.vmap(lambda _y: f2(_y, z))(y)
return jnp.sum(v)
y = jnp.ones((3, 2))
f = lambda z: f1(y, z)
z = 0.1
val, g = api.value_and_grad(f)(z)
self.assertEqual(val.shape, ())
self.assertEqual(g.shape, ())
def test_custom_jvp_vmap_broadcasting_interaction_2(self):
# https://github.com/jax-ml/jax/issues/5849
@jax.custom_jvp
def transform(box, R):
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 2:
return jnp.einsum('ij,j->i', box, R)
raise ValueError()
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def periodic_general(box):
def displacement_fn(Ra, Rb, **kwargs):
_box = kwargs.get('box', box)
return transform(_box, Ra - Rb)
return displacement_fn
N = 250
scalar_box = 1.0
displacement = periodic_general(scalar_box)
key = jax.random.key(0)
R = jax.random.uniform(key, (N, 2))
def energy_fn(box):
d = partial(displacement, box=box)
d = api.vmap(api.vmap(d, (None, 0)), (0, None))
return jnp.sum(d(R, R) ** 2)
self.assertEqual(grad(energy_fn)(scalar_box).shape, ())
def test_custom_jvp_implicit_broadcasting(self):
# https://github.com/jax-ml/jax/issues/6357
if config.enable_x64.value:
raise unittest.SkipTest("test only applies when x64 is disabled")
@jax.custom_jvp
def projection_unit_simplex(x: jax.Array) -> jax.Array:
"""Projection onto the unit simplex."""
s = 1.0
n_features = x.shape[0]
u = jnp.sort(x)[::-1]
cssv = jnp.cumsum(u) - s
ind = jnp.arange(n_features, dtype=x.dtype) + 1
cond = u - cssv / ind > 0
idx = jnp.count_nonzero(cond)
threshold = cssv[idx - 1] / idx.astype(x.dtype)
return jax.nn.relu(x - threshold)
@projection_unit_simplex.defjvp
def projection_unit_simplex_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = projection_unit_simplex(x)
supp = (primal_out > 0).astype(x_dot.dtype)
card = jnp.count_nonzero(supp).astype(x_dot.dtype)
tangent_out = supp * x_dot - (jnp.dot(supp, x_dot) / card) * supp
return primal_out, tangent_out
rng = self.rng()
x = rng.rand(5).astype(np.float32)
J_rev = jax.jacrev(projection_unit_simplex)(x)
J_fwd = jax.jacfwd(projection_unit_simplex)(x)
p = projection_unit_simplex(x)
support = (p > 0).astype(jnp.float32)
cardinality = jnp.count_nonzero(support).astype(support.dtype)
J_true = jnp.diag(support) - jnp.outer(support, support) / cardinality
self.assertAllClose(J_true, J_fwd)
self.assertAllClose(J_true, J_rev)
proj = jax.vmap(projection_unit_simplex)
def fun(X):
return jnp.sum(proj(X) ** 2)
rng = self.rng()
X = rng.rand(4, 5).astype(np.float32)
U = rng.rand(4, 5)
U /= np.sqrt(np.sum(U ** 2))
U = U.astype(np.float32)
eps = 1e-3
dir_deriv_num = (fun(X + eps * U) - fun(X - eps * U)) / (2 * eps)
dir_deriv = jnp.vdot(jax.grad(fun)(X), U)
self.assertAllClose(dir_deriv, dir_deriv_num, atol=1e-3)
def test_vmap_inside_defjvp(self):
# https://github.com/jax-ml/jax/issues/3201
seed = 47
key = jax.random.key(seed)
mat = jax.random.normal(key, (2, 3))
@jax.custom_jvp
def f(mat, aux):
num_rows, num_cols = mat.shape
return jnp.ones((num_rows, 1)) / num_cols
@f.defjvp
def f_jvp(primals, tangents):
mat, aux = primals
vec, _ = tangents
output = f(*primals)
num_rows, num_cols = mat.shape
size = num_rows * num_cols
# -----
bd_mat = mat.reshape(1, 1, num_rows, num_cols)
bd_mat = jnp.tile(bd_mat, reps=(num_rows, num_cols))
bd_mat = bd_mat.reshape(size, num_rows, num_cols)
# -----
rowsum = jnp.sum(mat, axis=1, keepdims=True)
colsum = jnp.sum(mat, axis=0, keepdims=True)
bd_rowsum = jnp.tile(rowsum, reps=(1, num_rows))
bd_colsum = jnp.tile(colsum, reps=(num_cols, 1))
# -----
bd_vec = vec.reshape(size, 1)
# -----
def operate(mx, val):
buf = 0
for i in range(2):
buf = buf + jnp.matmul(mx, bd_colsum) / jnp.power(aux, i)
buf = jnp.matmul(bd_rowsum, buf)
return buf * val[None, :]
# -----
# Vertorizing will raise shape error
bd_buf = jax.vmap(operate, in_axes=(0, 0), out_axes=0)(bd_mat, bd_vec)
# -----
bd_buf = bd_buf / aux
jvp = jnp.sum(bd_buf, axis=0)
jvp = jnp.mean(jvp, axis=1, keepdims=True)
# -----
# JVP ends successfully, but still raise an error
return (output, jvp)
jax.grad(lambda mat, aux: jnp.sum(f(mat, aux)))(mat, 0.5) # doesn't crash
def test_custom_jvp_unbroadcasting(self):
# https://github.com/jax-ml/jax/issues/3056
a = jnp.array([1., 1.])
@jax.custom_jvp
def f(x):
return a * x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
dx, = tangents
return a * x, a * dx
shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape
self.assertEqual(shape, ())
def test_maybe_perturbed_internal_helper_function(self):
# This is a unit test for an internal API. We include it so as not to
# regress https://github.com/jax-ml/jax/issues/9567. For an explanation of
# this helper function, see https://github.com/jax-ml/jax/issues/6415.
def f(x):
def g(y, _):
z = y * x
self.assertTrue(custom_derivatives._maybe_perturbed(z))
return y, None
g(1, None)
return lax.scan(g, 1, xs=None, length=1)[0]
jax.jvp(f, (1.0,), (1.0,)) # assertions inside f
def test_maybe_perturbed_int_regression(self):
# see https://github.com/jax-ml/jax/discussions/9951
@jax.jit
def f():
x = jnp.array(1)
_, aux_args = custom_derivatives.closure_convert(lambda: x)
self.assertEmpty(aux_args)
f()
def test_sinc_constant_function_batching(self):
# https://github.com/jax-ml/jax/pull/10756
batch_data = jnp.arange(15.).reshape(5, 3)
@jax.vmap
def f(x):
return jax.lax.map(jnp.sinc, x)
g = lambda param: f(param * batch_data).sum()
@jax.vmap
def f_ref(x):
return jnp.stack([jnp.sinc(x_) for x_ in x])
g_ref = lambda param: f_ref(param * batch_data).sum()
grad = jax.grad(g )(0.1) # doesn't crash
grad_ref = jax.grad(g_ref)(0.1)
self.assertAllClose(grad, grad_ref, check_dtypes=False)
@parameterized.named_parameters(
('jit_vmap', True, True),
('jit', True, False),
('vmap', False, True),
('', False, False),
)
def test_symbolic_zero_custom_jvp(self, maybe_jit, maybe_vmap):
def f(static_scalar, static_array, dyn_scalar, dyn_array):
out1 = static_scalar + dyn_scalar
out2 = static_array + dyn_array
return out1, out2
def _pack(x):
return lax.broadcast(x, (1,))
def _unpack(x):
(x,) = x
return x
def _vmap(fun):
def _fun(*args):
args = jax.tree.map(_pack, args)
out = jax.vmap(fun)(*args)
out = jax.tree.map(_unpack, out)
return out
return _fun
f = jax.custom_jvp(f)
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
static_scalar, *_ = primals
t_static, t_static_arr, t_dyn_scalar, t_dyn_array = tangents
self.assertIs(type(t_static) , jax.custom_derivatives.SymbolicZero)
self.assertIs(type(t_static_arr), jax.custom_derivatives.SymbolicZero)
self.assertEqual(t_static.shape, ())
self.assertEqual(t_static_arr.shape, (2,))
return f(*primals), (static_scalar + 90, t_dyn_array + 91)
def g(dyn_scalar, dyn_array):
if maybe_vmap:
f_ = _vmap(f)
else:
f_ = f
return f_(1., jnp.array([2., 3.]), dyn_scalar, dyn_array)
def run(primal_ins, tangent_ins):
return jax.jvp(g, primal_ins, tangent_ins)
if maybe_jit:
run = jax.jit(run)
primal_ins = (4., jnp.array([5., 6.]))
tangent_ins = (7., jnp.array([8., 9.]))
primal_outs, tangent_outs = run(primal_ins, tangent_ins)
primal_out1, primal_out2 = primal_outs
tangent_out1, tangent_out2 = tangent_outs
scalar_type = jax.Array if maybe_jit or maybe_vmap else float
self.assertIsInstance(primal_out1, scalar_type)
self.assertAllClose(primal_out1, 5.)
self.assertIsInstance(tangent_out1, scalar_type)
self.assertAllClose(tangent_out1, 91.)
self.assertIsInstance(primal_out2, jax.Array)
self.assertArraysAllClose(primal_out2, jnp.array([7., 9.]))
self.assertIsInstance(tangent_out2, jax.Array)
self.assertArraysAllClose(tangent_out2, jnp.array([99., 100.]))
def test_symbolic_zero_custom_jvp_vmap_output(self):
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
self.assertIs(type(y_dot), jax.custom_derivatives.SymbolicZero)
return f(x, y), y_dot
jax.grad(lambda x, y: jax.vmap(f)(x, y).sum())(jnp.ones(3), jnp.ones(3))
def test_symbolic_zeros_memoization_caching(self):
# Tests multiple zero patterns for partial_eval._memoize, and also tests
# that we're okay with stores being occupied with equal values.
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
return f(x, y), y_dot
f_ = core.jaxpr_as_fun(jax.make_jaxpr(f)(2., 3.))
_ = jax.linearize(f_, 2., 3.)
_ = jax.linearize(lambda x: f_(x, 3.), 2.) # don't crash!
def test_symbolic_zeros_under_jit(self):
# https://github.com/jax-ml/jax/issues/14833
Zero = jax.custom_derivatives.SymbolicZero
@jax.custom_jvp
def f(x, y):
return x * y
@partial(f.defjvp, symbolic_zeros=True)
def fjvp(primals, tangents):
x, y = primals
tx, ty = tangents
assert type(tx) is not Zero or type(ty) is not Zero
return f(x, y), (
ty if type(tx) is Zero else
tx if type(ty) is Zero else
tx + ty)
jax.jacfwd(jax.jit(f))(0.1, 0.2) # don't crash
def test_custom_jvp_functools_partial(self):
def fun(x, y, a):
return x + y * a
fun_wrapped = functools.partial(fun, a = 0.1)
def jvp_fn(primals, tangents):
return jax.jvp(fun_wrapped, primals, tangents)
fn = jax.custom_jvp(fun_wrapped)
fn.defjvp(jvp_fn)
self.assertEqual((1.0, 0.1), jax.grad(lambda args: fn(*args))((1.0, 2.0)))
def test_run_rules_more_than_once(self):
# https://github.com/jax-ml/jax/issues/16614
@jax.custom_jvp
def f(x, y):
return x
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, _ = primals
x_dot, _ = tangents
return x, x_dot
def body(x_y, _):
x, y = x_y
return (f(x, y), x), None
@jax.grad
def g(x):
(out, _), _ = lax.scan(body, (x, 1.), xs=None, length=2)
return out
g(1.) # doesn't crash
def test_dce(self):
@jax.custom_jvp
def f(x, y):
return jnp.sin(x), x + jnp.cos(y)
@f.defjvp
def f_jvp(primals, tangents):
x, y = primals
dx, dy = tangents
return f(x, y), (2.0 * jnp.cos(x) * dx, 1.5 * dx - 0.5 * jnp.sin(y) * dy)
def check_jaxpr(jaxpr, used_outs, includes, excludes):
dce_jaxpr, _ = pe.dce_jaxpr(jaxpr, used_outs)
if not dce_jaxpr.eqns:
assert not includes
return
call_jaxpr = dce_jaxpr.eqns[0].params["call_jaxpr"]
for prim in includes:
assert any(eqn.primitive == prim for eqn in call_jaxpr.eqns)
for prim in excludes:
assert all(eqn.primitive != prim for eqn in call_jaxpr.eqns)
x, y = 0.1, -1.3
jaxpr = jax.make_jaxpr(f)(x, y).jaxpr
check_jaxpr(jaxpr, [True, True], [lax.sin_p, lax.cos_p], [])
check_jaxpr(jaxpr, [True, False], [lax.sin_p], [lax.cos_p])
check_jaxpr(jaxpr, [False, True], [lax.cos_p], [lax.sin_p])
check_jaxpr(jaxpr, [False, False], [], [lax.sin_p, lax.cos_p])
def dce_jaxpr_as_fun(jaxpr, used_outs):
jaxpr_, _ = pe.dce_jaxpr(jaxpr, used_outs)
fun = core.jaxpr_as_fun(pe.close_jaxpr(jaxpr_))
return lambda *args: fun(*args)[0]
f0 = dce_jaxpr_as_fun(jaxpr, [True, False])
f1 = dce_jaxpr_as_fun(jaxpr, [False, True])
self.assertAllClose(
api.jvp(f0, (x, y), (1.0, 0.0)), (f0(x, y), 2.0 * jnp.cos(x)))
self.assertAllClose(
api.jvp(f0, (x, y), (0.0, 1.0)), (f0(x, y), 0.0))
self.assertAllClose(
api.jvp(f1, (x, y), (1.0, 0.0)), (f1(x, y), 1.5))
self.assertAllClose(
api.jvp(f1, (x, y), (0.0, 1.0)), (f1(x, y), -0.5 * jnp.sin(y)))
def test_dce_symbolic_zeros(self):
# https://github.com/jax-ml/jax/issues/31448
@jax.custom_jvp
def f(x):
return x
@partial(f.defjvp, symbolic_zeros=True)
def f_jvp(primals, tangents):
x, = primals
tx, = tangents
return f(x), tx
@jax.jacfwd
@jax.jacrev
def f_wrapped(x):
return jax.jit(f)((x, 3.))
f_wrapped(jnp.zeros(2)) # doesn't crash
def test_resolve_kwargs_error_message(self):
@jax.custom_jvp
def f(x, y, *, z=None):
return jnp.sin(x), x + jnp.cos(y)
@f.defjvp
def f_jvp(primals, tangents):
self.fail("should not be executed")
with self.assertRaisesRegex(
TypeError,
r"The input arguments to the custom_jvp-decorated function f(.*)\n"
r"missing a required argument: 'y'"
):
f(0.5)
with self.assertRaisesRegex(
TypeError,
r"The input arguments to the custom_jvp-decorated function f(.*)\n"
"The following keyword arguments could not be resolved to positions: z"
):
f(0.5, 0.1, z=1.0)
def test_symbolic_zero_custom_jvp_vmap_doesnt_instantiate(self):
@jax.custom_jvp
def f(x, y):
return y
def f_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
assert type(y_dot) is jax.custom_derivatives.SymbolicZero
return y, y_dot
f.defjvp(f_jvp, symbolic_zeros=True)
def g(x):
return f(x, f(x, 1.))
jax.jvp(jax.vmap(g), (jnp.ones(3),), (jnp.ones(3),)) # don't crash
def test_symbolic_zero_under_vmap_of_jit(self):
# https://github.com/jax-ml/jax/issues/28144
@jax.custom_jvp
def f(x):
return x + 1
@f.defjvp
def f_jvp(x, t):
(x,) = x
(t,) = t
z = jax.custom_derivatives.zero_from_primal(x, symbolic_zeros=True)
return f(x), z
x = jnp.arange(3.0)
jax.jvp(jax.vmap(jax.jit(f)), (x,), (x,)) # doesn't crash
def test_pretty_print(self):
@jax.custom_jvp
def f(x):
return x + 1
@f.defjvp
def f_jvp(primals, tangents):
return f(*primals), tangents[0]
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(f)(x)
actual = jaxpr.pretty_print(use_color=False)
expected = textwrap.dedent(
"""
{ lambda ; a:f32[1]. let
b:f32[1] = custom_jvp_call[
name=f
call_jaxpr={ lambda ; c:f32[1]. let d:f32[1] = add c 1.0:f32[] in (d,) }
jvp=f_jvp
symbolic_zeros=False
] a
in (b,) }
""").strip()
self.assertEqual(actual, expected)
def test_custom_jvp_transpose_vjp3(self):
@jax.custom_jvp
def div(x, y):
return x / y
@div.defjvp
def sin_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
del y_dot # ignore lol
return div(x, y), div(x_dot, y)
_, f_vjp = api.vjp3(lambda x: div(x, 2.), 1.)
ans, = f_vjp(1.)
self.assertAllClose(ans, 1./2, check_dtypes=False)
|
CustomJVPTest
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-plugin_ini/plugin_fail_baseConfig.py
|
{
"start": 5929,
"end": 6175
}
|
class ____(BaseModel):
x: int = Field(..., alias='y')
class Config: # type: ignore[pydantic-alias]
# MYPY: error: Unused "type: ignore" comment [unused-ignore]
alias_generator = lambda x: x + '_' # noqa E731
|
AliasGeneratorModel2
|
python
|
realpython__materials
|
python-iterators-iterables/resettable_iter.py
|
{
"start": 0,
"end": 556
}
|
class ____:
def __init__(self, start=0, end=None, step=1, *, resettable=False):
if end is None:
end, start = start, 0
self._start = start
self._end = end
self._step = step
self._resettable = resettable
def __iter__(self):
return self
def __next__(self):
if self._start >= self._end:
if self._resettable:
self._start = 0
raise StopIteration
value = self._start
self._start += self._step
return value
|
ResettableRange
|
python
|
PrefectHQ__prefect
|
src/prefect/flows.py
|
{
"start": 86353,
"end": 127757
}
|
class ____(Flow[P, R]):
"""
EXPERIMENTAL: This class is experimental and may be removed or changed in future
releases.
A flow that is bound to running on a specific infrastructure.
Attributes:
work_pool: The name of the work pool to run the flow on. The base job
configuration of the work pool will determine the configuration of the
infrastructure the flow will run on.
job_variables: Infrastructure configuration that will override the base job
configuration of the work pool.
worker_cls: The class of the worker to use to spin up infrastructure and submit
the flow to it.
"""
def __init__(
self,
*args: Any,
work_pool: str,
job_variables: dict[str, Any],
worker_cls: type["BaseWorker[Any, Any, Any]"],
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.work_pool = work_pool
self.job_variables = job_variables
self.worker_cls = worker_cls
@overload
def __call__(self: "Flow[P, NoReturn]", *args: P.args, **kwargs: P.kwargs) -> None:
# `NoReturn` matches if a type can't be inferred for the function which stops a
# sync function from matching the `Coroutine` overload
...
@overload
def __call__(
self: "Flow[P, Coroutine[Any, Any, T]]",
*args: P.args,
**kwargs: P.kwargs,
) -> Coroutine[Any, Any, T]: ...
@overload
def __call__(
self: "Flow[P, T]",
*args: P.args,
**kwargs: P.kwargs,
) -> T: ...
@overload
def __call__(
self: "Flow[P, Coroutine[Any, Any, T]]",
*args: P.args,
return_state: Literal[True],
**kwargs: P.kwargs,
) -> Awaitable[State[T]]: ...
@overload
def __call__(
self: "Flow[P, T]",
*args: P.args,
return_state: Literal[True],
**kwargs: P.kwargs,
) -> State[T]: ...
def __call__(
self,
*args: "P.args",
return_state: bool = False,
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
**kwargs: "P.kwargs",
):
async def modified_call(
*args: P.args,
return_state: bool = False,
# TODO: Handle wait_for once we have an asynchronous way to wait for futures
# We should wait locally for futures to resolve before spinning up
# infrastructure.
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
**kwargs: P.kwargs,
) -> R | State[R]:
try:
async with self.worker_cls(work_pool_name=self.work_pool) as worker:
parameters = get_call_parameters(self, args, kwargs)
future = await worker.submit(
flow=self,
parameters=parameters,
job_variables=self.job_variables,
)
if return_state:
await future.wait_async()
return future.state
return await future.aresult()
except (ExceptionGroup, BaseExceptionGroup) as exc:
# For less verbose tracebacks
exceptions = exc.exceptions
if len(exceptions) == 1:
raise exceptions[0] from None
else:
raise
if inspect.iscoroutinefunction(self.fn):
return modified_call(
*args, return_state=return_state, wait_for=wait_for, **kwargs
)
else:
return run_coro_as_sync(
modified_call(
*args,
return_state=return_state,
wait_for=wait_for,
**kwargs,
)
)
def submit(self, *args: P.args, **kwargs: P.kwargs) -> PrefectFlowRunFuture[R]:
"""
EXPERIMENTAL: This method is experimental and may be removed or changed in future
releases.
Submit the flow to run on remote infrastructure.
This method will spin up a local worker to submit the flow to remote infrastructure. To
submit the flow to remote infrastructure without spinning up a local worker, use
`submit_to_work_pool` instead.
Args:
*args: Positional arguments to pass to the flow.
**kwargs: Keyword arguments to pass to the flow.
Returns:
A `PrefectFlowRunFuture` that can be used to retrieve the result of the flow run.
Examples:
Submit a flow to run on Kubernetes:
```python
from prefect import flow
from prefect_kubernetes.experimental import kubernetes
@kubernetes(work_pool="my-kubernetes-work-pool")
@flow
def my_flow(x: int, y: int):
return x + y
future = my_flow.submit(x=1, y=2)
result = future.result()
print(result)
```
"""
async def submit_func():
async with self.worker_cls(work_pool_name=self.work_pool) as worker:
parameters = get_call_parameters(self, args, kwargs)
return await worker.submit(
flow=self,
parameters=parameters,
job_variables=self.job_variables,
)
return run_coro_as_sync(submit_func())
def submit_to_work_pool(
self, *args: P.args, **kwargs: P.kwargs
) -> PrefectFlowRunFuture[R]:
"""
EXPERIMENTAL: This method is experimental and may be removed or changed in future
releases.
Submits the flow to run on remote infrastructure.
This method will create a flow run for an existing worker to submit to remote infrastructure.
If you don't have a worker available, use `submit` instead.
Args:
*args: Positional arguments to pass to the flow.
**kwargs: Keyword arguments to pass to the flow.
Returns:
A `PrefectFlowRunFuture` that can be used to retrieve the result of the flow run.
Examples:
Dispatch a flow to run on Kubernetes:
```python
from prefect import flow
from prefect_kubernetes.experimental import kubernetes
@kubernetes(work_pool="my-kubernetes-work-pool")
@flow
def my_flow(x: int, y: int):
return x + y
future = my_flow.submit_to_work_pool(x=1, y=2)
result = future.result()
print(result)
```
"""
warnings.warn(
"Dispatching flows to remote infrastructure is experimental. The interface "
"and behavior of this method are subject to change.",
category=FutureWarning,
)
from prefect import get_client
from prefect._experimental.bundles import (
convert_step_to_command,
create_bundle_for_flow_run,
upload_bundle_to_storage,
)
from prefect.context import FlowRunContext, TagsContext
from prefect.results import get_result_store, resolve_result_storage
from prefect.states import Pending, Scheduled
from prefect.tasks import Task
# Get parameters to error early if they are invalid
parameters = get_call_parameters(self, args, kwargs)
with get_client(sync_client=True) as client:
work_pool = client.read_work_pool(self.work_pool)
if (
work_pool.storage_configuration.bundle_upload_step is None
or work_pool.storage_configuration.bundle_execution_step is None
):
raise RuntimeError(
f"Storage is not configured for work pool {work_pool.name!r}. "
"Please configure storage for the work pool by running `prefect "
"work-pool storage configure`."
)
current_result_store = get_result_store()
# Check result storage and use the work pool default if needed
if (
current_result_store.result_storage is None
or isinstance(current_result_store.result_storage, LocalFileSystem)
and self.result_storage is None
):
if (
work_pool.storage_configuration.default_result_storage_block_id
is None
):
logger.warning(
f"Flow {self.name!r} has no result storage configured. Please configure "
"result storage for the flow if you want to retrieve the result for the flow run."
)
else:
# Use the work pool's default result storage block for the flow run to ensure the caller can retrieve the result
flow = self.with_options(
result_storage=resolve_result_storage(
work_pool.storage_configuration.default_result_storage_block_id,
_sync=True,
),
persist_result=True,
)
else:
flow = self
bundle_key = str(uuid.uuid4())
upload_command = convert_step_to_command(
work_pool.storage_configuration.bundle_upload_step,
bundle_key,
quiet=True,
)
execute_command = convert_step_to_command(
work_pool.storage_configuration.bundle_execution_step, bundle_key
)
job_variables = (self.job_variables or {}) | {
"command": " ".join(execute_command)
}
# Create a parent task run if this is a child flow run to ensure it shows up as a child flow in the UI
parent_task_run = None
if flow_run_ctx := FlowRunContext.get():
parent_task = Task[Any, Any](
name=flow.name,
fn=flow.fn,
version=flow.version,
)
parent_task_run = run_coro_as_sync(
parent_task.create_run(
flow_run_context=flow_run_ctx,
parameters=parameters,
)
)
flow_run = client.create_flow_run(
flow,
parameters=flow.serialize_parameters(parameters),
# Start out in pending to prevent a worker from starting the flow run before the bundle is uploaded
state=Pending(),
job_variables=job_variables,
work_pool_name=work_pool.name,
tags=TagsContext.get().current_tags,
parent_task_run_id=getattr(parent_task_run, "id", None),
)
bundle = create_bundle_for_flow_run(flow=flow, flow_run=flow_run)
upload_bundle_to_storage(bundle, bundle_key, upload_command)
# Set flow run to scheduled now that the bundle is uploaded and ready to be executed
client.set_flow_run_state(flow_run.id, state=Scheduled())
# TODO: It'd be nice to be able to return the future sooner
return PrefectFlowRunFuture(flow_run_id=flow_run.id)
def with_options(
self,
*,
name: Optional[str] = None,
version: Optional[str] = None,
retries: Optional[int] = None,
retry_delay_seconds: Optional[Union[int, float]] = None,
description: Optional[str] = None,
flow_run_name: Optional[Union[Callable[[], str], str]] = None,
task_runner: Union[
Type[TaskRunner[PrefectFuture[Any]]], TaskRunner[PrefectFuture[Any]], None
] = None,
timeout_seconds: Union[int, float, None] = None,
validate_parameters: Optional[bool] = None,
persist_result: Optional[bool] = NotSet, # type: ignore
result_storage: Optional[ResultStorage] = NotSet, # type: ignore
result_serializer: Optional[ResultSerializer] = NotSet, # type: ignore
cache_result_in_memory: Optional[bool] = None,
log_prints: Optional[bool] = NotSet, # type: ignore
on_completion: Optional[list[FlowStateHook[P, R]]] = None,
on_failure: Optional[list[FlowStateHook[P, R]]] = None,
on_cancellation: Optional[list[FlowStateHook[P, R]]] = None,
on_crashed: Optional[list[FlowStateHook[P, R]]] = None,
on_running: Optional[list[FlowStateHook[P, R]]] = None,
job_variables: Optional[dict[str, Any]] = None,
) -> "InfrastructureBoundFlow[P, R]":
new_flow = super().with_options(
name=name,
version=version,
retries=retries,
retry_delay_seconds=retry_delay_seconds,
description=description,
flow_run_name=flow_run_name,
task_runner=task_runner,
timeout_seconds=timeout_seconds,
validate_parameters=validate_parameters,
persist_result=persist_result,
result_storage=result_storage,
result_serializer=result_serializer,
cache_result_in_memory=cache_result_in_memory,
log_prints=log_prints,
on_completion=on_completion,
on_failure=on_failure,
on_cancellation=on_cancellation,
on_crashed=on_crashed,
on_running=on_running,
)
new_infrastructure_bound_flow = bind_flow_to_infrastructure(
new_flow,
self.work_pool,
self.worker_cls,
job_variables=job_variables
if job_variables is not None
else self.job_variables,
)
return new_infrastructure_bound_flow
def bind_flow_to_infrastructure(
flow: Flow[P, R],
work_pool: str,
worker_cls: type["BaseWorker[Any, Any, Any]"],
job_variables: dict[str, Any] | None = None,
) -> InfrastructureBoundFlow[P, R]:
new = InfrastructureBoundFlow[P, R](
flow.fn,
work_pool=work_pool,
job_variables=job_variables or {},
worker_cls=worker_cls,
)
# Copy all attributes from the original flow
for attr, value in flow.__dict__.items():
setattr(new, attr, value)
return new
def _raise_on_name_with_banned_characters(name: Optional[str]) -> Optional[str]:
"""
Raise an InvalidNameError if the given name contains any invalid
characters.
"""
if name is None:
return name
if not re.match(WITHOUT_BANNED_CHARACTERS, name):
raise InvalidNameError(
f"Name {name!r} contains an invalid character. "
f"Must not contain any of: {BANNED_CHARACTERS}."
)
return name
def select_flow(
flows: Iterable[Flow[P, R]],
flow_name: Optional[str] = None,
from_message: Optional[str] = None,
) -> Flow[P, R]:
"""
Select the only flow in an iterable or a flow specified by name.
Returns
A single flow object
Raises:
MissingFlowError: If no flows exist in the iterable
MissingFlowError: If a flow name is provided and that flow does not exist
UnspecifiedFlowError: If multiple flows exist but no flow name was provided
"""
# Convert to flows by name
flows_dict = {f.name: f for f in flows}
# Add a leading space if given, otherwise use an empty string
from_message = (" " + from_message) if from_message else ""
if not Optional:
raise MissingFlowError(f"No flows found{from_message}.")
elif flow_name and flow_name not in flows_dict:
raise MissingFlowError(
f"Flow {flow_name!r} not found{from_message}. "
f"Found the following flows: {listrepr(flows_dict.keys())}. "
"Check to make sure that your flow function is decorated with `@flow`."
)
elif not flow_name and len(flows_dict) > 1:
raise UnspecifiedFlowError(
(
f"Found {len(flows_dict)} flows{from_message}:"
f" {listrepr(sorted(flows_dict.keys()))}. Specify a flow name to select a"
" flow."
),
)
if flow_name:
return flows_dict[flow_name]
else:
return list(flows_dict.values())[0]
def load_flow_from_entrypoint(
entrypoint: str,
use_placeholder_flow: bool = True,
) -> Flow[P, Any]:
"""
Extract a flow object from a script at an entrypoint by running all of the code in the file.
Args:
entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
or a string in the format `<path_to_script>:<class_name>.<flow_method_name>`
or a module path to a flow function
use_placeholder_flow: if True, use a placeholder Flow object if the actual flow object
cannot be loaded from the entrypoint (e.g. dependencies are missing)
Returns:
The flow object from the script
Raises:
ScriptError: If an exception is encountered while running the script
MissingFlowError: If the flow function specified in the entrypoint does not exist
"""
if ":" in entrypoint:
# split by the last colon once to handle Windows paths with drive letters i.e C:\path\to\file.py:do_stuff
path, func_name = entrypoint.rsplit(":", maxsplit=1)
else:
path, func_name = entrypoint.rsplit(".", maxsplit=1)
try:
flow: Flow[P, Any] = import_object(entrypoint) # pyright: ignore[reportRedeclaration]
except AttributeError as exc:
raise MissingFlowError(
f"Flow function with name {func_name!r} not found in {path!r}. "
) from exc
except ScriptError:
# If the flow has dependencies that are not installed in the current
# environment, fallback to loading the flow via AST parsing.
if use_placeholder_flow:
flow: Optional[Flow[P, Any]] = safe_load_flow_from_entrypoint(entrypoint)
if flow is None:
raise
else:
raise
if not isinstance(flow, Flow): # pyright: ignore[reportUnnecessaryIsInstance]
raise MissingFlowError(
f"Function with name {func_name!r} is not a flow. Make sure that it is "
"decorated with '@flow'."
)
return flow
def load_function_and_convert_to_flow(entrypoint: str) -> Flow[P, Any]:
"""
Loads a function from an entrypoint and converts it to a flow if it is not already a flow.
"""
if ":" in entrypoint:
# split by the last colon once to handle Windows paths with drive letters i.e C:\path\to\file.py:do_stuff
path, func_name = entrypoint.rsplit(":", maxsplit=1)
else:
path, func_name = entrypoint.rsplit(".", maxsplit=1)
try:
func = import_object(entrypoint) # pyright: ignore[reportRedeclaration]
except AttributeError as exc:
raise RuntimeError(
f"Function with name {func_name!r} not found in {path!r}."
) from exc
if isinstance(func, Flow):
return func
else:
return Flow(func, log_prints=True)
def serve(
*args: "RunnerDeployment",
pause_on_shutdown: bool = True,
print_starting_message: bool = True,
limit: Optional[int] = None,
**kwargs: Any,
) -> None:
"""
Serve the provided list of deployments.
Args:
*args: A list of deployments to serve.
pause_on_shutdown: A boolean for whether or not to automatically pause
deployment schedules on shutdown.
print_starting_message: Whether or not to print message to the console
on startup.
limit: The maximum number of runs that can be executed concurrently.
**kwargs: Additional keyword arguments to pass to the runner.
Examples:
Prepare two deployments and serve them:
```python
import datetime
from prefect import flow, serve
@flow
def my_flow(name):
print(f"hello {name}")
@flow
def my_other_flow(name):
print(f"goodbye {name}")
if __name__ == "__main__":
# Run once a day
hello_deploy = my_flow.to_deployment(
"hello", tags=["dev"], interval=datetime.timedelta(days=1)
)
# Run every Sunday at 4:00 AM
bye_deploy = my_other_flow.to_deployment(
"goodbye", tags=["dev"], cron="0 4 * * sun"
)
serve(hello_deploy, bye_deploy)
```
"""
from prefect.runner import Runner
if is_in_async_context():
raise RuntimeError(
"Cannot call `serve` in an asynchronous context. Use `aserve` instead."
)
runner = Runner(pause_on_shutdown=pause_on_shutdown, limit=limit, **kwargs)
for deployment in args:
if deployment.work_pool_name:
warnings.warn(
"Work pools are not necessary for served deployments - "
"the `work_pool_name` argument will be ignored. Omit the "
f"`work_pool_name` argument from `to_deployment` for {deployment.name!r}.",
UserWarning,
)
deployment.work_pool_name = None
runner.add_deployment(deployment)
if print_starting_message:
_display_serve_start_message(*args)
try:
asyncio.run(runner.start())
except (KeyboardInterrupt, TerminationSignal) as exc:
logger.info(f"Received {type(exc).__name__}, shutting down...")
async def aserve(
*args: "RunnerDeployment",
pause_on_shutdown: bool = True,
print_starting_message: bool = True,
limit: Optional[int] = None,
**kwargs: Any,
) -> None:
"""
Asynchronously serve the provided list of deployments.
Use `serve` instead if calling from a synchronous context.
Args:
*args: A list of deployments to serve.
pause_on_shutdown: A boolean for whether or not to automatically pause
deployment schedules on shutdown.
print_starting_message: Whether or not to print message to the console
on startup.
limit: The maximum number of runs that can be executed concurrently.
**kwargs: Additional keyword arguments to pass to the runner.
Examples:
Prepare deployment and asynchronous initialization function and serve them:
```python
import asyncio
import datetime
from prefect import flow, aserve, get_client
async def init():
await set_concurrency_limit()
async def set_concurrency_limit():
async with get_client() as client:
await client.create_concurrency_limit(tag='dev', concurrency_limit=3)
@flow
async def my_flow(name):
print(f"hello {name}")
async def main():
# Initialization function
await init()
# Run once a day
hello_deploy = await my_flow.to_deployment(
"hello", tags=["dev"], interval=datetime.timedelta(days=1)
)
await aserve(hello_deploy)
if __name__ == "__main__":
asyncio.run(main())
"""
from prefect.runner import Runner
runner = Runner(pause_on_shutdown=pause_on_shutdown, limit=limit, **kwargs)
for deployment in args:
add_deployment_coro = runner.add_deployment(deployment)
if TYPE_CHECKING:
assert inspect.isawaitable(add_deployment_coro)
await add_deployment_coro
if print_starting_message:
_display_serve_start_message(*args)
await runner.start()
def _display_serve_start_message(*args: "RunnerDeployment"):
from rich.console import Console, Group
from rich.table import Table
help_message_top = (
"[green]Your deployments are being served and polling for scheduled runs!\n[/]"
)
table = Table(title="Deployments", show_header=False)
table.add_column(style="blue", no_wrap=True)
for deployment in args:
table.add_row(f"{deployment.flow_name}/{deployment.name}")
help_message_bottom = (
"\nTo trigger any of these deployments, use the"
" following command:\n[blue]\n\t$ prefect deployment run"
" [DEPLOYMENT_NAME]\n[/]"
)
if PREFECT_UI_URL:
help_message_bottom += (
"\nYou can also trigger your deployments via the Prefect UI:"
f" [blue]{PREFECT_UI_URL.value()}/deployments[/]\n"
)
console = Console()
console.print(Group(help_message_top, table, help_message_bottom), soft_wrap=True)
@client_injector
async def load_flow_from_flow_run(
client: "PrefectClient",
flow_run: "FlowRun",
ignore_storage: bool = False,
storage_base_path: Optional[str] = None,
use_placeholder_flow: bool = True,
) -> Flow[..., Any]:
"""
Load a flow from the location/script provided in a deployment's storage document.
If `ignore_storage=True` is provided, no pull from remote storage occurs. This flag
is largely for testing, and assumes the flow is already available locally.
"""
if flow_run.deployment_id is None:
raise ValueError("Flow run does not have an associated deployment")
deployment = await client.read_deployment(flow_run.deployment_id)
if deployment.entrypoint is None:
raise ValueError(
f"Deployment {deployment.id} does not have an entrypoint and can not be run."
)
run_logger = flow_run_logger(flow_run)
runner_storage_base_path = storage_base_path or os.environ.get(
"PREFECT__STORAGE_BASE_PATH"
)
# If there's no colon, assume it's a module path
if ":" not in deployment.entrypoint:
run_logger.debug(
f"Importing flow code from module path {deployment.entrypoint}"
)
flow = await run_sync_in_worker_thread(
load_flow_from_entrypoint,
deployment.entrypoint,
use_placeholder_flow=use_placeholder_flow,
)
return flow
if not ignore_storage and not deployment.pull_steps:
sys.path.insert(0, ".")
if deployment.storage_document_id:
storage_document = await client.read_block_document(
deployment.storage_document_id
)
from prefect.blocks.core import Block
storage_block = Block._from_block_document(storage_document)
else:
basepath = deployment.path
if runner_storage_base_path:
basepath = str(basepath).replace(
"$STORAGE_BASE_PATH", runner_storage_base_path
)
storage_block = LocalFileSystem(basepath=basepath)
from_path = (
str(deployment.path).replace("$STORAGE_BASE_PATH", runner_storage_base_path)
if runner_storage_base_path and deployment.path
else deployment.path
)
run_logger.info(f"Downloading flow code from storage at {from_path!r}")
await storage_block.get_directory(from_path=from_path, local_path=".")
if deployment.pull_steps:
run_logger.debug(
f"Running {len(deployment.pull_steps)} deployment pull step(s)"
)
from prefect.deployments.steps.core import StepExecutionError, run_steps
try:
output = await run_steps(
deployment.pull_steps,
print_function=run_logger.info,
deployment=deployment,
flow_run=flow_run,
logger=run_logger,
)
except StepExecutionError as e:
e = e.__cause__ or e
run_logger.error(str(e))
raise
if output.get("directory"):
run_logger.debug(f"Changing working directory to {output['directory']!r}")
os.chdir(output["directory"])
import_path = relative_path_to_current_platform(deployment.entrypoint)
run_logger.debug(f"Importing flow code from '{import_path}'")
try:
flow = await run_sync_in_worker_thread(
load_flow_from_entrypoint,
str(import_path),
use_placeholder_flow=use_placeholder_flow,
)
except MissingFlowError:
flow = await run_sync_in_worker_thread(
load_function_and_convert_to_flow,
str(import_path),
)
return flow
def load_placeholder_flow(entrypoint: str, raises: Exception) -> Flow[P, Any]:
"""
Load a placeholder flow that is initialized with the same arguments as the
flow specified in the entrypoint. If called the flow will raise `raises`.
This is useful when a flow can't be loaded due to missing dependencies or
other issues but the base metadata defining the flow is still needed.
Args:
entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
or a module path to a flow function
raises: an exception to raise when the flow is called
"""
def _base_placeholder():
raise raises
def sync_placeholder_flow(*args: "P.args", **kwargs: "P.kwargs"):
_base_placeholder()
async def async_placeholder_flow(*args: "P.args", **kwargs: "P.kwargs"):
_base_placeholder()
placeholder_flow = (
async_placeholder_flow
if is_entrypoint_async(entrypoint)
else sync_placeholder_flow
)
arguments = load_flow_arguments_from_entrypoint(entrypoint)
arguments["fn"] = placeholder_flow
return Flow(**arguments)
def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow[P, Any]]:
"""
Safely load a Prefect flow from an entrypoint string. Returns None if loading fails.
Args:
entrypoint (str): A string identifying the flow to load. Can be in one of the following formats:
- `<path_to_script>:<flow_func_name>`
- `<path_to_script>:<class_name>.<flow_method_name>`
- `<module_path>.<flow_func_name>`
Returns:
Optional[Flow]: The loaded Prefect flow object, or None if loading fails due to errors
(e.g. unresolved dependencies, syntax errors, or missing objects).
"""
func_or_cls_def, source_code, parts = _entrypoint_definition_and_source(entrypoint)
path = entrypoint.rsplit(":", maxsplit=1)[0] if ":" in entrypoint else None
namespace = safe_load_namespace(source_code, filepath=path)
if parts[0] not in namespace:
# If the object is not in the namespace, it may be due to missing dependencies
# in annotations or default values. We will attempt to sanitize them by removing
# anything that cannot be compiled, and then recompile the function or class.
if isinstance(func_or_cls_def, (ast.FunctionDef, ast.AsyncFunctionDef)):
return _sanitize_and_load_flow(func_or_cls_def, namespace)
elif (
isinstance(func_or_cls_def, ast.ClassDef)
and len(parts) >= 2
and func_or_cls_def.name == parts[0]
):
method_name = parts[1]
method_def = next(
(
stmt
for stmt in func_or_cls_def.body
if isinstance(stmt, (ast.FunctionDef, ast.AsyncFunctionDef))
and stmt.name == method_name
),
None,
)
if method_def is not None:
return _sanitize_and_load_flow(method_def, namespace)
else:
return None
obj = namespace.get(parts[0])
for part in parts[1:]:
obj = getattr(obj, part, None)
if obj is None:
return None
return obj
def _sanitize_and_load_flow(
func_def: Union[ast.FunctionDef, ast.AsyncFunctionDef], namespace: dict[str, Any]
) -> Optional[Flow[P, Any]]:
"""
Attempt to load a flow from the function definition after sanitizing the annotations
and defaults that can't be compiled.
Args:
func_def: the function definition
namespace: the namespace to load the function into
Returns:
The loaded function or None if the function can't be loaded
after sanitizing the annotations and defaults.
"""
args = func_def.args.posonlyargs + func_def.args.args + func_def.args.kwonlyargs
if func_def.args.vararg:
args.append(func_def.args.vararg)
if func_def.args.kwarg:
args.append(func_def.args.kwarg)
# Remove annotations that can't be compiled
for arg in args:
if arg.annotation is not None:
try:
code = compile(
ast.Expression(arg.annotation),
filename="<ast>",
mode="eval",
)
exec(code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate annotation for argument %s due to the following error. Ignoring annotation.",
arg.arg,
exc_info=e,
)
arg.annotation = None
# Remove defaults that can't be compiled
new_defaults: list[Any] = []
for default in func_def.args.defaults:
try:
code = compile(ast.Expression(default), "<ast>", "eval")
exec(code, namespace)
new_defaults.append(default)
except Exception as e:
logger.debug(
"Failed to evaluate default value %s due to the following error. Ignoring default.",
default,
exc_info=e,
)
new_defaults.append(
ast.Constant(
value=None, lineno=default.lineno, col_offset=default.col_offset
)
)
func_def.args.defaults = new_defaults
# Remove kw_defaults that can't be compiled
new_kw_defaults: list[Any] = []
for default in func_def.args.kw_defaults:
if default is not None:
try:
code = compile(ast.Expression(default), "<ast>", "eval")
exec(code, namespace)
new_kw_defaults.append(default)
except Exception as e:
logger.debug(
"Failed to evaluate default value %s due to the following error. Ignoring default.",
default,
exc_info=e,
)
new_kw_defaults.append(
ast.Constant(
value=None,
lineno=default.lineno,
col_offset=default.col_offset,
)
)
else:
new_kw_defaults.append(
ast.Constant(
value=None,
lineno=func_def.lineno,
col_offset=func_def.col_offset,
)
)
func_def.args.kw_defaults = new_kw_defaults
if func_def.returns is not None:
try:
code = compile(
ast.Expression(func_def.returns), filename="<ast>", mode="eval"
)
exec(code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate return annotation due to the following error. Ignoring annotation.",
exc_info=e,
)
func_def.returns = None
# Attempt to compile the function without annotations and defaults that
# can't be compiled
try:
code = compile(
ast.Module(body=[func_def], type_ignores=[]),
filename="<ast>",
mode="exec",
)
exec(code, namespace)
except Exception as e:
logger.debug("Failed to compile: %s", e)
else:
return namespace.get(func_def.name)
def load_flow_arguments_from_entrypoint(
entrypoint: str, arguments: Optional[Union[list[str], set[str]]] = None
) -> dict[str, Any]:
"""
Extract flow arguments from an entrypoint string.
Loads the source code of the entrypoint and extracts the flow arguments
from the `flow` decorator.
Args:
entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
or a module path to a flow function
"""
func_def, source_code, _ = _entrypoint_definition_and_source(entrypoint)
path = None
if ":" in entrypoint:
path = entrypoint.rsplit(":")[0]
if arguments is None:
# If no arguments are provided default to known arguments that are of
# built-in types.
arguments = {
"name",
"version",
"retries",
"retry_delay_seconds",
"description",
"timeout_seconds",
"validate_parameters",
"persist_result",
"cache_result_in_memory",
"log_prints",
}
result: dict[str, Any] = {}
for decorator in func_def.decorator_list:
if (
isinstance(decorator, ast.Call)
and getattr(decorator.func, "id", "") == "flow"
):
for keyword in decorator.keywords:
if keyword.arg not in arguments:
continue
if isinstance(keyword.value, ast.Constant):
# Use the string value of the argument
result[cast(str, keyword.arg)] = str(keyword.value.value)
continue
# if the arg value is not a raw str (i.e. a variable or expression),
# then attempt to evaluate it
namespace = safe_load_namespace(source_code, filepath=path)
literal_arg_value = ast.get_source_segment(source_code, keyword.value)
cleaned_value = (
literal_arg_value.replace("\n", "") if literal_arg_value else ""
)
try:
evaluated_value = eval(cleaned_value, namespace) # type: ignore
result[cast(str, keyword.arg)] = str(evaluated_value)
except Exception as e:
logger.info(
"Failed to parse @flow argument: `%s=%s` due to the following error. Ignoring and falling back to default behavior.",
keyword.arg,
literal_arg_value,
exc_info=e,
)
# ignore the decorator arg and fallback to default behavior
continue
if "name" in arguments and "name" not in result:
# If no matching decorator or keyword argument for `name' is found
# fallback to the function name.
result["name"] = func_def.name.replace("_", "-")
return result
def is_entrypoint_async(entrypoint: str) -> bool:
"""
Determine if the function specified in the entrypoint is asynchronous.
Args:
entrypoint: A string in the format `<path_to_script>:<func_name>` or
a module path to a function.
Returns:
True if the function is asynchronous, False otherwise.
"""
func_def, _, _ = _entrypoint_definition_and_source(entrypoint)
return isinstance(func_def, ast.AsyncFunctionDef)
def _entrypoint_definition_and_source(
entrypoint: str,
) -> Tuple[Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef], str, List[str]]:
"""
Resolves and parses the source definition of a given entrypoint.
The entrypoint can be provided in one of the following formats:
- '<path_to_script>:<flow_func_name>'
- '<path_to_script>:<class_name>.<flow_method_name>'
- '<module_path.to.flow_function>'
Returns:
A tuple containing:
- The AST node (FunctionDef, AsyncFunctionDef, or ClassDef) of the base object.
- The full source code of the file or module as a string.
- A list of attribute access parts from the object path (e.g., ['MyFlowClass', 'run']).
Raises:
ValueError: If the module or target object cannot be found.
"""
if ":" in entrypoint:
path, object_path = entrypoint.rsplit(":", maxsplit=1)
source_code = Path(path).read_text()
else:
path, object_path = entrypoint.rsplit(".", maxsplit=1)
spec = importlib.util.find_spec(path)
if not spec or not spec.origin:
raise ValueError(f"Could not find module {path!r}")
source_code = Path(spec.origin).read_text()
parsed_code = ast.parse(source_code)
parts = object_path.split(".")
base_name = parts[0]
base_def = next(
(
node
for node in ast.walk(parsed_code)
if isinstance(
node,
(
ast.FunctionDef,
ast.AsyncFunctionDef,
ast.ClassDef, # flow can be staticmethod/classmethod
),
)
and node.name == base_name
),
None,
)
if not base_def:
raise ValueError(f"Could not find object {base_name!r} in {path!r}")
return base_def, source_code, parts
|
InfrastructureBoundFlow
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/chunk_grids.py
|
{
"start": 5270,
"end": 10439
}
|
class ____(ChunkGrid):
chunk_shape: tuple[int, ...]
def __init__(self, *, chunk_shape: ShapeLike) -> None:
chunk_shape_parsed = parse_shapelike(chunk_shape)
object.__setattr__(self, "chunk_shape", chunk_shape_parsed)
@classmethod
def _from_dict(cls, data: dict[str, JSON] | NamedConfig[str, Any]) -> Self:
_, configuration_parsed = parse_named_configuration(data, "regular")
return cls(**configuration_parsed) # type: ignore[arg-type]
def to_dict(self) -> dict[str, JSON]:
return {"name": "regular", "configuration": {"chunk_shape": tuple(self.chunk_shape)}}
def all_chunk_coords(self, array_shape: tuple[int, ...]) -> Iterator[tuple[int, ...]]:
return itertools.product(
*(range(ceildiv(s, c)) for s, c in zip(array_shape, self.chunk_shape, strict=False))
)
def get_nchunks(self, array_shape: tuple[int, ...]) -> int:
return reduce(
operator.mul,
itertools.starmap(ceildiv, zip(array_shape, self.chunk_shape, strict=True)),
1,
)
def _guess_num_chunks_per_axis_shard(
chunk_shape: tuple[int, ...], item_size: int, max_bytes: int, array_shape: tuple[int, ...]
) -> int:
"""Generate the number of chunks per axis to hit a target max byte size for a shard.
For example, for a (2,2,2) chunk size and item size 4, maximum bytes of 256 would return 2.
In other words the shard would be a (2,2,2) grid of (2,2,2) chunks
i.e., prod(chunk_shape) * (returned_val * len(chunk_shape)) * item_size = 256 bytes.
Parameters
----------
chunk_shape
The shape of the (inner) chunks.
item_size
The item size of the data i.e., 2 for uint16.
max_bytes
The maximum number of bytes per shard to allow.
array_shape
The shape of the underlying array.
Returns
-------
The number of chunks per axis.
"""
bytes_per_chunk = np.prod(chunk_shape) * item_size
if max_bytes < bytes_per_chunk:
return 1
num_axes = len(chunk_shape)
chunks_per_shard = 1
# First check for byte size, second check to make sure we don't go bigger than the array shape
while (bytes_per_chunk * ((chunks_per_shard + 1) ** num_axes)) <= max_bytes and all(
c * (chunks_per_shard + 1) <= a for c, a in zip(chunk_shape, array_shape, strict=True)
):
chunks_per_shard += 1
return chunks_per_shard
def _auto_partition(
*,
array_shape: tuple[int, ...],
chunk_shape: tuple[int, ...] | Literal["auto"],
shard_shape: ShardsLike | None,
item_size: int,
) -> tuple[tuple[int, ...] | None, tuple[int, ...]]:
"""
Automatically determine the shard shape and chunk shape for an array, given the shape and dtype of the array.
If `shard_shape` is `None` and the chunk_shape is "auto", the chunks will be set heuristically based
on the dtype and shape of the array.
If `shard_shape` is "auto", then the shard shape will be set heuristically from the dtype and shape
of the array; if the `chunk_shape` is also "auto", then the chunks will be set heuristically as well,
given the dtype and shard shape. Otherwise, the chunks will be returned as-is.
"""
if shard_shape is None:
_shards_out: None | tuple[int, ...] = None
if chunk_shape == "auto":
_chunks_out = _guess_chunks(array_shape, item_size)
else:
_chunks_out = chunk_shape
else:
if chunk_shape == "auto":
# aim for a 1MiB chunk
_chunks_out = _guess_chunks(array_shape, item_size, max_bytes=1048576)
else:
_chunks_out = chunk_shape
if shard_shape == "auto":
warnings.warn(
"Automatic shard shape inference is experimental and may change without notice.",
ZarrUserWarning,
stacklevel=2,
)
_shards_out = ()
target_shard_size_bytes = zarr.config.get("array.target_shard_size_bytes", None)
num_chunks_per_shard_axis = (
_guess_num_chunks_per_axis_shard(
chunk_shape=_chunks_out,
item_size=item_size,
max_bytes=target_shard_size_bytes,
array_shape=array_shape,
)
if (has_auto_shard := (target_shard_size_bytes is not None))
else 2
)
for a_shape, c_shape in zip(array_shape, _chunks_out, strict=True):
# The previous heuristic was `a_shape // c_shape > 8` and now, with target_shard_size_bytes, we only check that the shard size is less than the array size.
can_shard_axis = a_shape // c_shape > 8 if not has_auto_shard else True
if can_shard_axis:
_shards_out += (c_shape * num_chunks_per_shard_axis,)
else:
_shards_out += (c_shape,)
elif isinstance(shard_shape, dict):
_shards_out = tuple(shard_shape["shape"])
else:
_shards_out = shard_shape
return _shards_out, _chunks_out
|
RegularChunkGrid
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 27006,
"end": 27121
}
|
class ____(TestModel):
key = TextField()
value = IntegerField()
rs = ForeignKeyField(RS, backref='rds')
|
RD
|
python
|
scikit-image__scikit-image
|
benchmarks/benchmark_restoration.py
|
{
"start": 573,
"end": 3535
}
|
class ____:
"""Benchmark for restoration routines in scikit image."""
timeout = 120
def setup(self):
nz = 32
self.volume_f64 = (
np.stack(
[
camera()[::2, ::2],
]
* nz,
axis=-1,
).astype(float)
/ 255
)
self.sigma = 0.05
self.volume_f64 += self.sigma * np.random.randn(*self.volume_f64.shape)
self.volume_f32 = self.volume_f64.astype(np.float32)
def peakmem_setup(self):
pass
def time_denoise_nl_means_f64(self):
restoration.denoise_nl_means(
self.volume_f64,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=False,
**_channel_kwarg(False),
)
def time_denoise_nl_means_f32(self):
restoration.denoise_nl_means(
self.volume_f32,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=False,
**_channel_kwarg(False),
)
def time_denoise_nl_means_fast_f64(self):
restoration.denoise_nl_means(
self.volume_f64,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=True,
**_channel_kwarg(False),
)
def time_denoise_nl_means_fast_f32(self):
restoration.denoise_nl_means(
self.volume_f32,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=True,
)
def peakmem_denoise_nl_means_f64(self):
restoration.denoise_nl_means(
self.volume_f64,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=False,
**_channel_kwarg(False),
)
def peakmem_denoise_nl_means_f32(self):
restoration.denoise_nl_means(
self.volume_f32,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=False,
)
def peakmem_denoise_nl_means_fast_f64(self):
restoration.denoise_nl_means(
self.volume_f64,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=True,
**_channel_kwarg(False),
)
def peakmem_denoise_nl_means_fast_f32(self):
restoration.denoise_nl_means(
self.volume_f32,
patch_size=3,
patch_distance=2,
sigma=self.sigma,
h=0.7 * self.sigma,
fast_mode=True,
**_channel_kwarg(False),
)
|
RestorationSuite
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_endpoint_conditions.py
|
{
"start": 383,
"end": 6467
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ready': 'bool',
'serving': 'bool',
'terminating': 'bool'
}
attribute_map = {
'ready': 'ready',
'serving': 'serving',
'terminating': 'terminating'
}
def __init__(self, ready=None, serving=None, terminating=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointConditions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ready = None
self._serving = None
self._terminating = None
self.discriminator = None
if ready is not None:
self.ready = ready
if serving is not None:
self.serving = serving
if terminating is not None:
self.terminating = terminating
@property
def ready(self):
"""Gets the ready of this V1EndpointConditions. # noqa: E501
ready indicates that this endpoint is ready to receive traffic, according to whatever system is managing the endpoint. A nil value should be interpreted as \"true\". In general, an endpoint should be marked ready if it is serving and not terminating, though this can be overridden in some cases, such as when the associated Service has set the publishNotReadyAddresses flag. # noqa: E501
:return: The ready of this V1EndpointConditions. # noqa: E501
:rtype: bool
"""
return self._ready
@ready.setter
def ready(self, ready):
"""Sets the ready of this V1EndpointConditions.
ready indicates that this endpoint is ready to receive traffic, according to whatever system is managing the endpoint. A nil value should be interpreted as \"true\". In general, an endpoint should be marked ready if it is serving and not terminating, though this can be overridden in some cases, such as when the associated Service has set the publishNotReadyAddresses flag. # noqa: E501
:param ready: The ready of this V1EndpointConditions. # noqa: E501
:type: bool
"""
self._ready = ready
@property
def serving(self):
"""Gets the serving of this V1EndpointConditions. # noqa: E501
serving indicates that this endpoint is able to receive traffic, according to whatever system is managing the endpoint. For endpoints backed by pods, the EndpointSlice controller will mark the endpoint as serving if the pod's Ready condition is True. A nil value should be interpreted as \"true\". # noqa: E501
:return: The serving of this V1EndpointConditions. # noqa: E501
:rtype: bool
"""
return self._serving
@serving.setter
def serving(self, serving):
"""Sets the serving of this V1EndpointConditions.
serving indicates that this endpoint is able to receive traffic, according to whatever system is managing the endpoint. For endpoints backed by pods, the EndpointSlice controller will mark the endpoint as serving if the pod's Ready condition is True. A nil value should be interpreted as \"true\". # noqa: E501
:param serving: The serving of this V1EndpointConditions. # noqa: E501
:type: bool
"""
self._serving = serving
@property
def terminating(self):
"""Gets the terminating of this V1EndpointConditions. # noqa: E501
terminating indicates that this endpoint is terminating. A nil value should be interpreted as \"false\". # noqa: E501
:return: The terminating of this V1EndpointConditions. # noqa: E501
:rtype: bool
"""
return self._terminating
@terminating.setter
def terminating(self, terminating):
"""Sets the terminating of this V1EndpointConditions.
terminating indicates that this endpoint is terminating. A nil value should be interpreted as \"false\". # noqa: E501
:param terminating: The terminating of this V1EndpointConditions. # noqa: E501
:type: bool
"""
self._terminating = terminating
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointConditions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointConditions):
return True
return self.to_dict() != other.to_dict()
|
V1EndpointConditions
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-total-reward-using-operations-i.py
|
{
"start": 526,
"end": 896
}
|
class ____(object):
def maxTotalReward(self, rewardValues):
"""
:type rewardValues: List[int]
:rtype: int
"""
dp = 1
for v in sorted(set(rewardValues)):
x = dp&((1<<v)-1)
dp |= x<<v
return dp.bit_length()-1
# Time: O(nlogn + r^2), r = max(rewardValues)
# Space: O(r)
# sort, dp
|
Solution2
|
python
|
pyinstaller__pyinstaller
|
bootloader/waflib/Tools/lua.py
|
{
"start": 490,
"end": 641
}
|
class ____(Task.Task):
run_str = '${LUAC} -s -o ${TGT} ${SRC}'
color = 'PINK'
def configure(conf):
conf.find_program('luac', var='LUAC')
|
luac
|
python
|
sphinx-doc__sphinx
|
sphinx/util/logging.py
|
{
"start": 6892,
"end": 11073
}
|
class ____(logging.handlers.BufferingHandler):
"""Handler buffering all logs."""
buffer: list[logging.LogRecord]
def __init__(self) -> None:
super().__init__(-1)
def shouldFlush(self, record: logging.LogRecord) -> bool:
return False # never flush
def flush(self) -> None:
# suppress any flushes triggered by importing packages that flush
# all handlers at initialization time
pass
def flushTo(self, logger: logging.Logger) -> None:
self.acquire()
try:
for record in self.buffer:
logger.handle(record)
self.buffer = []
finally:
self.release()
def clear(self) -> list[logging.LogRecord]:
buffer, self.buffer = self.buffer, []
return buffer
@contextmanager
def pending_warnings() -> Iterator[logging.Handler]:
"""Context manager to postpone logging warnings temporarily.
Similar to :func:`pending_logging`.
"""
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
memhandler.setLevel(logging.WARNING)
try:
handlers = []
for handler in logger.handlers[:]:
if isinstance(handler, WarningStreamHandler):
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
memhandler.flushTo(logger)
@contextmanager
def suppress_logging() -> Iterator[MemoryHandler]:
"""Context manager to suppress logging all logs temporarily.
For example::
>>> with suppress_logging():
>>> logger.warning('Warning message!') # suppressed
>>> some_long_process()
>>>
"""
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
try:
handlers = []
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
@contextmanager
def pending_logging() -> Iterator[MemoryHandler]:
"""Context manager to postpone logging all logs temporarily.
For example::
>>> with pending_logging():
>>> logger.warning('Warning message!') # not flushed yet
>>> some_long_process()
>>>
Warning message! # the warning is flushed here
"""
logger = logging.getLogger(NAMESPACE)
try:
with suppress_logging() as memhandler:
yield memhandler
finally:
memhandler.flushTo(logger)
skip_warningiserror = nullcontext # Deprecate in Sphinx 10
@contextmanager
def prefixed_warnings(prefix: str) -> Iterator[None]:
"""Context manager to prepend prefix to all warning log records temporarily.
For example::
>>> with prefixed_warnings("prefix:"):
>>> logger.warning('Warning message!') # => prefix: Warning message!
.. versionadded:: 2.0
"""
logger = logging.getLogger(NAMESPACE)
warning_handler = None
for handler in logger.handlers:
if isinstance(handler, WarningStreamHandler):
warning_handler = handler
break
else:
# warning stream not found
yield
return
prefix_filter = None
for _filter in warning_handler.filters:
if isinstance(_filter, MessagePrefixFilter):
prefix_filter = _filter
break
if prefix_filter:
# already prefixed
try:
previous = prefix_filter.prefix
prefix_filter.prefix = prefix
yield
finally:
prefix_filter.prefix = previous
else:
# not prefixed yet
prefix_filter = MessagePrefixFilter(prefix)
try:
warning_handler.addFilter(prefix_filter)
yield
finally:
warning_handler.removeFilter(prefix_filter)
|
MemoryHandler
|
python
|
django__django
|
django/db/backends/dummy/base.py
|
{
"start": 1407,
"end": 2217
}
|
class ____(BaseDatabaseWrapper):
operators = {}
# Override the base class implementations with null
# implementations. Anything that tries to actually
# do something raises complain; anything that tries
# to rollback or undo something raises ignore.
_cursor = complain
ensure_connection = complain
_commit = complain
_rollback = ignore
_close = ignore
_savepoint = ignore
_savepoint_commit = complain
_savepoint_rollback = ignore
_set_autocommit = complain
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DummyDatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def is_usable(self):
return True
|
DatabaseWrapper
|
python
|
getsentry__sentry
|
src/sentry/search/eap/columns.py
|
{
"start": 11694,
"end": 13408
}
|
class ____(FunctionDefinition):
internal_function: Function.ValueType
"""
An optional function that takes in the resolved argument and returns the attribute key to aggregate on.
If not provided, assumes the aggregate is on the first argument.
"""
attribute_resolver: Callable[[ResolvedArgument], AttributeKey] | None = None
def resolve(
self,
alias: str,
search_type: constants.SearchType,
resolved_arguments: ResolvedArguments,
snuba_params: SnubaParams,
query_result_cache: dict[str, EAPResponse],
search_config: SearchResolverConfig,
) -> ResolvedAggregate:
if len(resolved_arguments) > 1:
raise InvalidSearchQuery(
f"Aggregates expects exactly 1 argument, got {len(resolved_arguments)}"
)
resolved_attribute = None
if len(resolved_arguments) == 1:
if not isinstance(resolved_arguments[0], AttributeKey):
raise InvalidSearchQuery("Aggregates accept attribute keys only")
resolved_attribute = resolved_arguments[0]
if self.attribute_resolver is not None:
resolved_attribute = self.attribute_resolver(resolved_attribute)
return ResolvedAggregate(
public_alias=alias,
internal_name=self.internal_function,
search_type=search_type,
internal_type=self.internal_type,
processor=self.processor,
extrapolation_mode=resolve_extrapolation_mode(
search_config, self.extrapolation_mode_override
),
argument=resolved_attribute,
)
@dataclass(kw_only=True)
|
AggregateDefinition
|
python
|
pypa__packaging
|
src/packaging/metadata.py
|
{
"start": 9608,
"end": 10135
}
|
class ____(email.policy.EmailPolicy):
"""
This is :class:`email.policy.EmailPolicy`, but with a simple ``header_store_parse``
implementation that handles multi-line values, and some nice defaults.
"""
utf8 = True
mangle_from_ = False
max_line_length = 0
def header_store_parse(self, name: str, value: str) -> tuple[str, str]:
size = len(name) + 2
value = value.replace("\n", "\n" + " " * size)
return (name, value)
# This class is for writing RFC822 messages
|
RFC822Policy
|
python
|
urllib3__urllib3
|
src/urllib3/response.py
|
{
"start": 2356,
"end": 2446
}
|
class ____:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
|
GzipDecoderState
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/initializers/initializers_v1.py
|
{
"start": 2639,
"end": 2877
}
|
class ____(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
|
HeUniform
|
python
|
walkccc__LeetCode
|
solutions/1996. The Number of Weak Characters in the Game/1996-2.py
|
{
"start": 0,
"end": 581
}
|
class ____:
def numberOfWeakCharacters(self, properties: list[list[int]]) -> int:
ans = 0
maxAttack = max(attack for attack, _ in properties)
# maxDefenses[i] := the maximum defense for the i-th attack
maxDefenses = [0] * (maxAttack + 2)
for attack, defense in properties:
maxDefenses[attack] = max(maxDefenses[attack], defense)
for i in range(maxAttack, 0, -1):
maxDefenses[i] = max(maxDefenses[i], maxDefenses[i + 1])
for attack, defense in properties:
if maxDefenses[attack + 1] > defense:
ans += 1
return ans
|
Solution
|
python
|
PrefectHQ__prefect
|
src/prefect/workers/process.py
|
{
"start": 2818,
"end": 3404
}
|
class ____(BaseVariables):
stream_output: bool = Field(
default=True,
description=(
"If enabled, workers will stream output from flow run processes to "
"local standard output."
),
)
working_dir: Optional[Path] = Field(
default=None,
title="Working Directory",
description=(
"If provided, workers will open flow run processes within the "
"specified path as the working directory. Otherwise, a temporary "
"directory will be created."
),
)
|
ProcessVariables
|
python
|
pyparsing__pyparsing
|
examples/matchPreviousDemo.py
|
{
"start": 103,
"end": 556
}
|
class ____
...
end d;"""
identifier = Word(alphas)
classIdent = identifier("classname") # note that this also makes a copy of identifier
classHead = "class" + classIdent
classBody = "..."
classEnd = "end" + match_previous_literal(classIdent) + ";"
classDefn = classHead + classBody + classEnd
# use this form to catch syntax error
# classDefn = classHead + classBody - classEnd
for tokens in classDefn.search_string(src):
print(tokens.classname)
|
c
|
python
|
google__jax
|
tests/util_test.py
|
{
"start": 9139,
"end": 10942
}
|
class ____(jtu.JaxTestCase):
def test_safe_map(self):
def unreachable(*args, **kwargs):
raise RuntimeError("unreachable")
self.assertEqual([], util.safe_map(unreachable, []))
self.assertEqual([], util.safe_map(unreachable, (), []))
self.assertEqual([], util.safe_map(unreachable, [], [], []))
self.assertEqual([], util.safe_map(unreachable, [], iter([]), [], []))
def double(x):
return x * 2
self.assertEqual([14], util.safe_map(double, (7,)))
self.assertEqual([0, 2, 4, 6], util.safe_map(double, range(4)))
def make_tuple(*args):
return args
self.assertEqual(
[(0, 4), (1, 5), (2, 6), (3, 7)],
util.safe_map(make_tuple, range(4), range(4, 8)),
)
def test_safe_map_errors(self):
with self.assertRaisesRegex(
TypeError, "safe_map requires at least 2 arguments"
):
util.safe_map()
with self.assertRaisesRegex(
TypeError, "safe_map requires at least 2 arguments"
):
util.safe_map(lambda x: x)
with self.assertRaisesRegex(TypeError, "'int' object is not callable"):
util.safe_map(7, range(6))
def error(*args, **kwargs):
raise RuntimeError("hello")
with self.assertRaisesRegex(RuntimeError, "hello"):
util.safe_map(error, range(6))
with self.assertRaisesRegex(
ValueError, r"safe_map\(\) argument 2 is longer than argument 1"
):
util.safe_map(operator.add, range(3), range(4))
with self.assertRaisesRegex(
ValueError, r"safe_map\(\) argument 2 is shorter than argument 1"
):
util.safe_map(operator.add, range(7), range(2))
with self.assertRaisesRegex(
ValueError, r"safe_map\(\) argument 2 is longer than argument 1"
):
util.safe_map(operator.add, (), range(3))
|
SafeMapTest
|
python
|
MongoEngine__mongoengine
|
tests/document/test_validation.py
|
{
"start": 130,
"end": 6207
}
|
class ____(MongoDBTestCase):
def test_to_dict(self):
"""Ensure a ValidationError handles error to_dict correctly."""
error = ValidationError("root")
assert error.to_dict() == {}
# 1st level error schema
error.errors = {"1st": ValidationError("bad 1st")}
assert "1st" in error.to_dict()
assert error.to_dict()["1st"] == "bad 1st"
# 2nd level error schema
error.errors = {
"1st": ValidationError(
"bad 1st", errors={"2nd": ValidationError("bad 2nd")}
)
}
assert "1st" in error.to_dict()
assert isinstance(error.to_dict()["1st"], dict)
assert "2nd" in error.to_dict()["1st"]
assert error.to_dict()["1st"]["2nd"] == "bad 2nd"
# moar levels
error.errors = {
"1st": ValidationError(
"bad 1st",
errors={
"2nd": ValidationError(
"bad 2nd",
errors={
"3rd": ValidationError(
"bad 3rd", errors={"4th": ValidationError("Inception")}
)
},
)
},
)
}
assert "1st" in error.to_dict()
assert "2nd" in error.to_dict()["1st"]
assert "3rd" in error.to_dict()["1st"]["2nd"]
assert "4th" in error.to_dict()["1st"]["2nd"]["3rd"]
assert error.to_dict()["1st"]["2nd"]["3rd"]["4th"] == "Inception"
assert error.message == "root(2nd.3rd.4th.Inception: ['1st'])"
def test_model_validation(self):
class User(Document):
username = StringField(primary_key=True)
name = StringField(required=True)
try:
User().validate()
except ValidationError as e:
assert "User:None" in e.message
assert e.to_dict() == {
"username": "Field is required",
"name": "Field is required",
}
user = User(username="RossC0", name="Ross").save()
user.name = None
try:
user.save()
except ValidationError as e:
assert "User:RossC0" in e.message
assert e.to_dict() == {"name": "Field is required"}
def test_fields_rewrite(self):
class BasePerson(Document):
name = StringField()
age = IntField()
meta = {"abstract": True}
class Person(BasePerson):
name = StringField(required=True)
p = Person(age=15)
with pytest.raises(ValidationError):
p.validate()
def test_embedded_document_validation(self):
"""Ensure that embedded documents may be validated."""
class Comment(EmbeddedDocument):
date = DateTimeField()
content = StringField(required=True)
comment = Comment()
with pytest.raises(ValidationError):
comment.validate()
comment.content = "test"
comment.validate()
comment.date = 4
with pytest.raises(ValidationError):
comment.validate()
comment.date = datetime.now()
comment.validate()
assert comment._instance is None
def test_embedded_db_field_validate(self):
class SubDoc(EmbeddedDocument):
val = IntField(required=True)
class Doc(Document):
id = StringField(primary_key=True)
e = EmbeddedDocumentField(SubDoc, db_field="eb")
try:
Doc(id="bad").validate()
except ValidationError as e:
assert "SubDoc:None" in e.message
assert e.to_dict() == {"e": {"val": "OK could not be converted to int"}}
Doc.drop_collection()
Doc(id="test", e=SubDoc(val=15)).save()
doc = Doc.objects.first()
keys = doc._data.keys()
assert 2 == len(keys)
assert "e" in keys
assert "id" in keys
doc.e.val = "OK"
try:
doc.save()
except ValidationError as e:
assert "Doc:test" in e.message
assert e.to_dict() == {"e": {"val": "OK could not be converted to int"}}
def test_embedded_weakref(self):
class SubDoc(EmbeddedDocument):
val = IntField(required=True)
class Doc(Document):
e = EmbeddedDocumentField(SubDoc, db_field="eb")
Doc.drop_collection()
d1 = Doc()
d2 = Doc()
s = SubDoc()
with pytest.raises(ValidationError):
s.validate()
d1.e = s
d2.e = s
del d1
with pytest.raises(ValidationError):
d2.validate()
def test_parent_reference_in_child_document(self):
"""
Test to ensure a ReferenceField can store a reference to a parent
class when inherited. Issue #954.
"""
class Parent(Document):
meta = {"allow_inheritance": True}
reference = ReferenceField("self")
class Child(Parent):
pass
parent = Parent()
parent.save()
child = Child(reference=parent)
# Saving child should not raise a ValidationError
try:
child.save()
except ValidationError as e:
self.fail("ValidationError raised: %s" % e.message)
def test_parent_reference_set_as_attribute_in_child_document(self):
"""
Test to ensure a ReferenceField can store a reference to a parent
class when inherited and when set via attribute. Issue #954.
"""
class Parent(Document):
meta = {"allow_inheritance": True}
reference = ReferenceField("self")
class Child(Parent):
pass
parent = Parent()
parent.save()
child = Child()
child.reference = parent
# Saving the child should not raise a ValidationError
child.save()
if __name__ == "__main__":
unittest.main()
|
TestValidatorError
|
python
|
pytorch__pytorch
|
torch/ao/nn/sparse/quantized/linear.py
|
{
"start": 2904,
"end": 8971
}
|
class ____(torch.nn.Module):
r"""
A quantized sparse linear module with quantized tensor as inputs and outputs.
"""
_version = 1
_FLOAT_MODULE = torch.nn.Linear
def __init__(
self,
in_features,
out_features,
row_block_size,
col_block_size,
bias=True,
dtype=torch.qint8,
):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError(
"Only QINT8 is supported for Sparse Quantized Linear"
)
self.in_features = in_features
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized(
[out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8
)
self._packed_params = LinearPackedParams(
row_block_size=row_block_size, col_block_size=col_block_size, dtype=dtype
)
self._packed_params.set_weight_bias(
qweight, bias, row_block_size, col_block_size
)
self.scale = 1.0
self.zero_point = 0
@classmethod
def _get_name(cls):
return "SparseQuantizedLinear"
def extra_repr(self):
return (
f"in_features={self.in_features}, out_features={self.out_features}, scale={self.scale}, "
f"zero_point={self.zero_point}, qscheme={self.weight().qscheme()}"
)
def __repr__(self):
return _hide_packed_params_repr(self, LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.sparse.qlinear(
x, self._packed_params._packed_params, self.scale, self.zero_point
)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "scale"] = torch.tensor(self.scale)
destination[prefix + "zero_point"] = torch.tensor(self.zero_point)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.scale = float(state_dict[prefix + "scale"])
state_dict.pop(prefix + "scale")
self.zero_point = int(state_dict[prefix + "zero_point"])
state_dict.pop(prefix + "zero_point")
state_dict.pop(prefix + "op_type")
version = local_metadata.get("version", None)
assert version <= self._version
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(
self,
w: torch.Tensor,
b: torch.Tensor | None,
row_block_size: int | None,
col_block_size: int | None,
) -> None:
assert row_block_size is not None and col_block_size is not None
self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a quantized sparse module from a float module.
We only care about the convert at this stage, no need for observers just yet.
TODO(zaf): Need to add the sparse params to the qconfig
"""
assert type(mod) is cls._FLOAT_MODULE, (
cls._get_name() + ".from_float only works for " + cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "sparse_params"), (
"Expecting the Linear to have `sparse_params`. Make sure you have provided arguments "
'in the `sparsifier.squash_mask(params_to_save=("sparse_block_shape",))` method.'
)
sparse_block_shape = mod.sparse_params.get("sparse_block_shape", None) # type: ignore[operator, union-attr]
assert isinstance(sparse_block_shape, (tuple, list))
assert len(sparse_block_shape) == 2
# TODO: Need to add options to qconfig to avoid the calibration.
# TODO: Add calibration for the sparsity
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
activation_post_process = mod.activation_post_process
weight_post_process = mod.qconfig.weight() # type: ignore[operator, union-attr]
# Assumption is that the weight is already sparsified by the
# `sparsifier.convert`
weight = mod.weight
weight_post_process(weight)
dtype = weight_post_process.dtype
act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[operator, union-attr]
assert dtype == torch.qint8, "Weight observer must have dtype torch.qint8"
w_sc, w_zp = weight_post_process.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, "Weight zero point must map to 0"
qweight = _quantize_weight(weight.float(), weight_post_process)
row_block_size = mod.sparse_params["sparse_block_shape"][0] # type: ignore[index]
col_block_size = mod.sparse_params["sparse_block_shape"][1] # type: ignore[index]
qlinear = cls(
mod.in_features,
mod.out_features,
row_block_size,
col_block_size,
dtype=dtype,
)
qlinear.set_weight_bias(
qweight,
mod.bias,
row_block_size, # type: ignore[arg-type]
col_block_size, # type: ignore[arg-type]
)
qlinear.scale = float(act_scale)
qlinear.zero_point = int(act_zp)
return qlinear
|
Linear
|
python
|
huggingface__transformers
|
src/transformers/models/pop2piano/modeling_pop2piano.py
|
{
"start": 4462,
"end": 5844
}
|
class ____(nn.Module):
def __init__(self, config: Pop2PianoConfig):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->Pop2Piano
|
Pop2PianoDenseGatedActDense
|
python
|
django__django
|
django/core/paginator.py
|
{
"start": 7186,
"end": 10191
}
|
class ____(BasePaginator):
def __init__(
self,
object_list,
per_page,
orphans=0,
allow_empty_first_page=True,
error_messages=None,
):
super().__init__(
object_list, per_page, orphans, allow_empty_first_page, error_messages
)
self._cache_acount = None
self._cache_anum_pages = None
async def __aiter__(self):
page_range = await self.apage_range()
for page_number in page_range:
yield await self.apage(page_number)
async def avalidate_number(self, number):
num_pages = await self.anum_pages()
return self._validate_number(number, num_pages)
async def aget_page(self, number):
"""See Paginator.get_page()."""
try:
number = await self.avalidate_number(number)
except PageNotAnInteger:
number = 1
except EmptyPage:
number = await self.anum_pages()
return await self.apage(number)
async def apage(self, number):
"""See Paginator.page()."""
number = await self.avalidate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
count = await self.acount()
if top + self.orphans >= count:
top = count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
return AsyncPage(*args, **kwargs)
async def acount(self):
"""See Paginator.count()."""
if self._cache_acount is not None:
return self._cache_acount
c = getattr(self.object_list, "acount", None)
if (
iscoroutinefunction(c)
and not inspect.isbuiltin(c)
and method_has_no_args(c)
):
count = await c()
else:
count = len(self.object_list)
self._cache_acount = count
return count
async def anum_pages(self):
"""See Paginator.num_pages()."""
if self._cache_anum_pages is not None:
return self._cache_anum_pages
count = await self.acount()
if count == 0 and not self.allow_empty_first_page:
self._cache_anum_pages = 0
return self._cache_anum_pages
hits = max(1, count - self.orphans)
num_pages = ceil(hits / self.per_page)
self._cache_anum_pages = num_pages
return num_pages
async def apage_range(self):
"""See Paginator.page_range()"""
num_pages = await self.anum_pages()
return range(1, num_pages + 1)
async def aget_elided_page_range(self, number=1, *, on_each_side=3, on_ends=2):
number = await self.avalidate_number(number)
num_pages = await self.anum_pages()
page_range = await self.apage_range()
for page in self._get_elided_page_range(
number, num_pages, page_range, on_each_side, on_ends
):
yield page
|
AsyncPaginator
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_vl/modular_deepseek_vl.py
|
{
"start": 4474,
"end": 4556
}
|
class ____(IdeficsCausalLMOutputWithPast):
pass
|
DeepseekVLCausalLMOutputWithPast
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/rds.py
|
{
"start": 1229,
"end": 15238
}
|
class ____(AwsGenericHook["RDSClient"]):
"""
Interact with Amazon Relational Database Service (RDS).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("rds") <RDS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon RDS and Aurora Documentation \
<https://docs.aws.amazon.com/rds/index.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "rds"
super().__init__(*args, **kwargs)
def get_db_snapshot_state(self, snapshot_id: str) -> str:
"""
Get the current state of a DB instance snapshot.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:return: Returns the status of the DB snapshot as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance snapshot does not exist.
"""
try:
response = self.conn.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)
except self.conn.exceptions.DBSnapshotNotFoundFault as e:
raise AirflowNotFoundException(e)
return response["DBSnapshots"][0]["Status"].lower()
def wait_for_db_snapshot_state(
self, snapshot_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Snapshots until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_snapshot_state(snapshot_id)
target_state = target_state.lower()
if target_state in ("available", "deleted", "completed"):
waiter = self.conn.get_waiter(f"db_snapshot_{target_state}") # type: ignore
waiter.wait(
DBSnapshotIdentifier=snapshot_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB snapshot '%s' reached the '%s' state", snapshot_id, target_state)
def get_db_cluster_snapshot_state(self, snapshot_id: str) -> str:
"""
Get the current state of a DB cluster snapshot.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_cluster_snapshots`
:param snapshot_id: The ID of the target DB cluster.
:return: Returns the status of the DB cluster snapshot as a string (eg. "available")
:raises AirflowNotFoundException: If the DB cluster snapshot does not exist.
"""
try:
response = self.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id)
except self.conn.exceptions.DBClusterSnapshotNotFoundFault as e:
raise AirflowNotFoundException(e)
return response["DBClusterSnapshots"][0]["Status"].lower()
def wait_for_db_cluster_snapshot_state(
self, snapshot_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Cluster Snapshots until target_state is reached; raise AirflowException after a max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_cluster_snapshots`
:param snapshot_id: The ID of the target DB cluster snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_cluster_snapshot_state(snapshot_id)
target_state = target_state.lower()
if target_state in ("available", "deleted"):
waiter = self.conn.get_waiter(f"db_cluster_snapshot_{target_state}") # type: ignore
waiter.wait(
DBClusterSnapshotIdentifier=snapshot_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster snapshot '%s' reached the '%s' state", snapshot_id, target_state)
def get_export_task_state(self, export_task_id: str) -> str:
"""
Get the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_export_tasks`
:param export_task_id: The identifier of the target snapshot export task.
:return: Returns the status of the snapshot export task as a string (eg. "canceled")
:raises AirflowNotFoundException: If the export task does not exist.
"""
try:
response = self.conn.describe_export_tasks(ExportTaskIdentifier=export_task_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ("ExportTaskNotFound", "ExportTaskNotFoundFault"):
raise AirflowNotFoundException(e)
raise e
return response["ExportTasks"][0]["Status"].lower()
def wait_for_export_task_state(
self, export_task_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll export tasks until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_export_tasks`
:param export_task_id: The identifier of the target snapshot export task.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_export_task_state(export_task_id)
target_state = target_state.lower()
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("export task '%s' reached the '%s' state", export_task_id, target_state)
def get_event_subscription_state(self, subscription_name: str) -> str:
"""
Get the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:return: Returns the status of the event subscription as a string (eg. "active")
:raises AirflowNotFoundException: If the event subscription does not exist.
"""
try:
response = self.conn.describe_event_subscriptions(SubscriptionName=subscription_name)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ("SubscriptionNotFoundFault", "SubscriptionNotFound"):
raise AirflowNotFoundException(e)
raise e
return response["EventSubscriptionsList"][0]["Status"].lower()
def wait_for_event_subscription_state(
self, subscription_name: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll Event Subscriptions until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_event_subscription_state(subscription_name)
target_state = target_state.lower()
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("event subscription '%s' reached the '%s' state", subscription_name, target_state)
def get_db_instance_state(self, db_instance_id: str) -> str:
"""
Get the current state of a DB instance.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:return: Returns the status of the DB instance as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance does not exist.
"""
try:
response = self.conn.describe_db_instances(DBInstanceIdentifier=db_instance_id)
except self.conn.exceptions.DBInstanceNotFoundFault as e:
raise AirflowNotFoundException(e)
return response["DBInstances"][0]["DBInstanceStatus"].lower()
def wait_for_db_instance_state(
self, db_instance_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Instances until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_instance_state(db_instance_id)
target_state = target_state.lower()
if target_state in ("available", "deleted", "stopped"):
waiter = self.conn.get_waiter(f"db_instance_{target_state}") # type: ignore
wait(
waiter=waiter,
waiter_delay=check_interval,
waiter_max_attempts=max_attempts,
args={"DBInstanceIdentifier": db_instance_id},
failure_message=f"Rdb DB instance failed to reach state {target_state}",
status_message="Rds DB instance state is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster '%s' reached the '%s' state", db_instance_id, target_state)
def get_db_cluster_state(self, db_cluster_id: str) -> str:
"""
Get the current state of a DB cluster.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_clusters`
:param db_cluster_id: The ID of the target DB cluster.
:return: Returns the status of the DB cluster as a string (eg. "available")
:raises AirflowNotFoundException: If the DB cluster does not exist.
"""
try:
response = self.conn.describe_db_clusters(DBClusterIdentifier=db_cluster_id)
except self.conn.exceptions.DBClusterNotFoundFault as e:
raise AirflowNotFoundException(e)
return response["DBClusters"][0]["Status"].lower()
def wait_for_db_cluster_state(
self, db_cluster_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Clusters until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_clusters`
:param db_cluster_id: The ID of the target DB cluster.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_cluster_state(db_cluster_id)
target_state = target_state.lower()
if target_state in ("available", "deleted", "stopped"):
waiter = self.conn.get_waiter(f"db_cluster_{target_state}") # type: ignore
waiter.wait(
DBClusterIdentifier=db_cluster_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster snapshot '%s' reached the '%s' state", db_cluster_id, target_state)
def _wait_for_state(
self,
poke: Callable[..., str],
target_state: str,
check_interval: int,
max_attempts: int,
) -> None:
"""
Poll the poke function for the current state until it reaches the target_state.
:param poke: A function that returns the current state of the target resource as a string.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
state = poke()
tries = 1
while state != target_state:
self.log.info("Current state is %s", state)
if tries >= max_attempts:
raise AirflowException("Max attempts exceeded")
time.sleep(check_interval)
state = poke()
tries += 1
|
RdsHook
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/ir.py
|
{
"start": 43753,
"end": 46146
}
|
class ____(IR):
"""
Return a cached plan node.
Used for CSE at the plan level.
"""
__slots__ = ("key", "refcount")
_non_child = ("schema", "key", "refcount")
key: int
"""The cache key."""
refcount: int | None
"""The number of cache hits."""
def __init__(self, schema: Schema, key: int, refcount: int | None, value: IR):
self.schema = schema
self.key = key
self.refcount = refcount
self.children = (value,)
self._non_child_args = (key, refcount)
def get_hashable(self) -> Hashable: # noqa: D102
# Polars arranges that the keys are unique across all cache
# nodes that reference the same child, so we don't need to
# hash the child.
return (type(self), self.key, self.refcount)
def is_equal(self, other: Self) -> bool: # noqa: D102
if self.key == other.key and self.refcount == other.refcount:
self.children = other.children
return True
return False
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Cache")
def do_evaluate(
cls,
key: int,
refcount: int | None,
df: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame: # pragma: no cover; basic evaluation never calls this
"""Evaluate and return a dataframe."""
# Our value has already been computed for us, so let's just
# return it.
return df
def evaluate(
self, *, cache: CSECache, timer: Timer | None, context: IRExecutionContext
) -> DataFrame:
"""Evaluate and return a dataframe."""
# We must override the recursion scheme because we don't want
# to recurse if we're in the cache.
try:
(result, hits) = cache[self.key]
except KeyError:
(value,) = self.children
result = value.evaluate(cache=cache, timer=timer, context=context)
cache[self.key] = (result, 0)
return result
else:
if self.refcount is None:
return result
hits += 1 # pragma: no cover
if hits == self.refcount: # pragma: no cover
del cache[self.key]
else: # pragma: no cover
cache[self.key] = (result, hits)
return result # pragma: no cover
|
Cache
|
python
|
ray-project__ray
|
python/ray/data/_internal/util.py
|
{
"start": 42547,
"end": 43661
}
|
class ____:
def __init__(
self,
f: pyarrow.NativeFile,
context: DataContext,
max_attempts: int = 10,
max_backoff_s: int = 32,
):
self._f = f
self._data_context = context
self._max_attempts = max_attempts
self._max_backoff_s = max_backoff_s
def __repr__(self):
return f"<{self.__class__.__name__} fs={self.handler.unwrap()}>"
def _retry_operation(self, operation: Callable, description: str):
"""Execute an operation with retries."""
return call_with_retry(
operation,
description=description,
match=self._data_context.retried_io_errors,
max_attempts=self._max_attempts,
max_backoff_s=self._max_backoff_s,
)
def __enter__(self):
return self._retry_operation(self._f.__enter__, "enter file context")
def __exit__(self, exc_type, exc_value, traceback):
self._retry_operation(
lambda: self._f.__exit__(exc_type, exc_value, traceback),
"exit file context",
)
|
RetryingContextManager
|
python
|
ray-project__ray
|
rllib/env/wrappers/open_spiel.py
|
{
"start": 224,
"end": 4645
}
|
class ____(MultiAgentEnv):
def __init__(self, env):
super().__init__()
self.env = env
self.agents = self.possible_agents = list(range(self.env.num_players()))
# Store the open-spiel game type.
self.type = self.env.get_type()
# Stores the current open-spiel game state.
self.state = None
self.observation_space = gym.spaces.Dict(
{
aid: gym.spaces.Box(
float("-inf"),
float("inf"),
(self.env.observation_tensor_size(),),
dtype=np.float32,
)
for aid in self.possible_agents
}
)
self.action_space = gym.spaces.Dict(
{
aid: gym.spaces.Discrete(self.env.num_distinct_actions())
for aid in self.possible_agents
}
)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
self.state = self.env.new_initial_state()
return self._get_obs(), {}
def step(self, action):
# Before applying action(s), there could be chance nodes.
# E.g. if env has to figure out, which agent's action should get
# resolved first in a simultaneous node.
self._solve_chance_nodes()
penalties = {}
# Sequential game:
if str(self.type.dynamics) == "Dynamics.SEQUENTIAL":
curr_player = self.state.current_player()
assert curr_player in action
try:
self.state.apply_action(action[curr_player])
# TODO: (sven) resolve this hack by publishing legal actions
# with each step.
except pyspiel.SpielError:
self.state.apply_action(np.random.choice(self.state.legal_actions()))
penalties[curr_player] = -0.1
# Compile rewards dict.
rewards = dict(enumerate(self.state.returns()))
# Simultaneous game.
else:
assert self.state.current_player() == -2
# Apparently, this works, even if one or more actions are invalid.
self.state.apply_actions([action[ag] for ag in range(self.num_agents)])
# Now that we have applied all actions, get the next obs.
obs = self._get_obs()
# Compile rewards dict and add the accumulated penalties
# (for taking invalid actions).
rewards = dict(enumerate(self.state.returns()))
for ag, penalty in penalties.items():
rewards[ag] += penalty
# Are we done?
is_terminated = self.state.is_terminal()
terminateds = dict(
{ag: is_terminated for ag in range(self.num_agents)},
**{"__all__": is_terminated}
)
truncateds = dict(
{ag: False for ag in range(self.num_agents)}, **{"__all__": False}
)
return obs, rewards, terminateds, truncateds, {}
def render(self, mode=None) -> None:
if mode == "human":
print(self.state)
def _get_obs(self):
# Before calculating an observation, there could be chance nodes
# (that may have an effect on the actual observations).
# E.g. After reset, figure out initial (random) positions of the
# agents.
self._solve_chance_nodes()
if self.state.is_terminal():
return {}
# Sequential game:
if str(self.type.dynamics) == "Dynamics.SEQUENTIAL":
curr_player = self.state.current_player()
return {
curr_player: np.reshape(self.state.observation_tensor(), [-1]).astype(
np.float32
)
}
# Simultaneous game.
else:
assert self.state.current_player() == -2
return {
ag: np.reshape(self.state.observation_tensor(ag), [-1]).astype(
np.float32
)
for ag in range(self.num_agents)
}
def _solve_chance_nodes(self):
# Chance node(s): Sample a (non-player) action and apply.
while self.state.is_chance_node():
assert self.state.current_player() == -1
actions, probs = zip(*self.state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.state.apply_action(action)
|
OpenSpielEnv
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/title/_subtitle.py
|
{
"start": 235,
"end": 2828
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.title"
_path_str = "layout.title.subtitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets the subtitle font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.title.subtitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.title.subtitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the plot's subtitle.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets the subtitle font.
text
Sets the plot's subtitle.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Subtitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.title.Subtitle`
font
Sets the subtitle font.
text
Sets the plot's subtitle.
Returns
-------
Subtitle
"""
super().__init__("subtitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.title.Subtitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.title.Subtitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Subtitle
|
python
|
huggingface__transformers
|
src/transformers/models/zoedepth/modeling_zoedepth.py
|
{
"start": 48886,
"end": 49240
}
|
class ____(PreTrainedModel):
config: ZoeDepthConfig
base_model_prefix = "zoedepth"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
@auto_docstring(
custom_intro="""
ZoeDepth model with one or multiple metric depth estimation head(s) on top.
"""
)
|
ZoeDepthPreTrainedModel
|
python
|
doocs__leetcode
|
solution/2900-2999/2958.Length of Longest Subarray With at Most K Frequency/Solution.py
|
{
"start": 0,
"end": 337
}
|
class ____:
def maxSubarrayLength(self, nums: List[int], k: int) -> int:
cnt = defaultdict(int)
ans = j = 0
for i, x in enumerate(nums):
cnt[x] += 1
while cnt[x] > k:
cnt[nums[j]] -= 1
j += 1
ans = max(ans, i - j + 1)
return ans
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/csm/configuration_csm.py
|
{
"start": 8317,
"end": 18355
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CsmForConditionalGeneration`]. It is used to instantiate an CSM
model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the csm-1b.
e.g. [sesame/csm-1b](https://huggingface.co/sesame/csm-1b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_codebooks (`int`, *optional*, defaults to 32):
Number of codebooks used in the underlying codec model responsible for tokenizing the audio.
vocab_size (`int`, *optional*, defaults to 2051):
Vocabulary size of the Csm model. Defines the number of different audio tokens that can be represented by each codebook.
text_vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the text input for the Csm model. Defines the number of different text tokens that can be represented.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations of the backbone model.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations of the backbone model.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the backbone model Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the backbone model Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the backbone model Transformer decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 128002):
Padding token id.
codebook_pad_token_id (`int`, *optional*, defaults to 2050):
Padding token id for codebook tokens.
codebook_eos_token_id (`int`, *optional*, defaults to 0):
End of stream token id for codebook tokens.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
audio_token_id (`int`, *optional*, defaults to 128002):
Audio token id in the text input.
audio_eos_token_id (`int`, *optional*, defaults to 128003):
End of stream token id for audio in the text input.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
tie_codebooks_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie the codebook tokens embeddings of the backbone model to the codebook tokens embeddings of the depth decoder.
depth_decoder_config (`CsmDepthDecoderConfig`, *optional*):
Configuration for the depth decoder.
codec_config (`PreTrainedConfig`, *optional*):
Configuration for the codec.
```python
>>> from transformers import CsmForConditionalGeneration, CsmConfig
>>> # Initializing a CsmConfig
>>> configuration = CsmConfig()
>>> # Initializing a model
>>> model = CsmForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "csm"
base_config_key = "csm_config"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 500000.0
sub_configs = {
"codec_config": AutoConfig,
"depth_decoder_config": CsmDepthDecoderConfig,
}
def __init__(
self,
num_codebooks: Optional[int] = 32,
vocab_size: Optional[int] = 2051,
text_vocab_size: Optional[int] = 128256,
hidden_size: Optional[int] = 2048,
intermediate_size: Optional[int] = 8192,
num_hidden_layers: Optional[int] = 16,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 128002,
codebook_pad_token_id: Optional[int] = 2050,
codebook_eos_token_id: Optional[int] = 0,
bos_token_id: Optional[int] = 128000,
eos_token_id: Optional[int] = None,
audio_token_id: Optional[int] = 128002,
audio_eos_token_id: Optional[int] = 128003,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = None,
tie_codebooks_embeddings: Optional[bool] = True,
depth_decoder_config: Optional[dict] = None,
codec_config: Optional[dict] = None,
**kwargs,
):
if kwargs.pop("tie_word_embeddings", False):
raise ValueError("`tie_word_embeddings=True` is not supported for CsmConfig")
if depth_decoder_config is None:
self.depth_decoder_config = CsmDepthDecoderConfig()
logger.info("depth_decoder_config is None, using default depth decoder config.")
elif isinstance(depth_decoder_config, dict):
self.depth_decoder_config = CsmDepthDecoderConfig(**depth_decoder_config)
elif isinstance(depth_decoder_config, CsmDepthDecoderConfig):
self.depth_decoder_config = depth_decoder_config
if codec_config is None:
self.codec_config = AutoConfig.for_model("mimi")
logger.info("codec_config is None, using default audio encoder config.")
elif isinstance(codec_config, dict):
self.codec_config = AutoConfig.for_model(**codec_config)
elif isinstance(codec_config, PreTrainedConfig):
self.codec_config = codec_config
self.text_vocab_size = text_vocab_size
self.num_codebooks = num_codebooks
self.audio_token_id = audio_token_id
self.audio_eos_token_id = audio_eos_token_id
self.codebook_pad_token_id = codebook_pad_token_id
self.codebook_eos_token_id = codebook_eos_token_id
self.tie_codebooks_embeddings = tie_codebooks_embeddings
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=False,
**kwargs,
)
__all__ = [
"CsmDepthDecoderConfig",
"CsmConfig",
]
|
CsmConfig
|
python
|
pydata__xarray
|
xarray/core/indexing.py
|
{
"start": 17842,
"end": 19604
}
|
class ____(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
__slots__ = ()
def __init__(self, key: tuple[slice | np.ndarray[Any, np.dtype[np.generic]], ...]):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif is_duck_array(k):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if ndim is None:
ndim = k.ndim # type: ignore[union-attr]
elif ndim != k.ndim: # type: ignore[union-attr]
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError(
"invalid indexer key: ndarray arguments "
f"have different numbers of dimensions: {ndims}"
)
k = duck_array_ops.astype(k, np.int64, copy=False)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(tuple(new_key))
|
VectorizedIndexer
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_config.py
|
{
"start": 6232,
"end": 11515
}
|
class ____(TestConfigEndpoint):
@pytest.mark.parametrize(
("section", "headers", "expected_status_code", "expected_response"),
[
(
None,
HEADERS_JSON,
200,
GET_CONFIG_ALL_JSON_RESPONSE,
),
(None, HEADERS_JSON_UTF8, 200, GET_CONFIG_ALL_JSON_RESPONSE),
(None, HEADERS_ANY, 200, GET_CONFIG_ALL_JSON_RESPONSE),
(None, HEADERS_NONE, 200, GET_CONFIG_ALL_JSON_RESPONSE),
(
None,
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_CORE}]
{OPTION_KEY_PARALLELISM} = {OPTION_VALUE_PARALLELISM}
[{SECTION_SMTP}]
{OPTION_KEY_SMTP_HOST} = {OPTION_VALUE_SMTP_HOST}
{OPTION_KEY_SMTP_MAIL_FROM} = {OPTION_VALUE_SMTP_MAIL_FROM}
[{SECTION_DATABASE}]
{OPTION_KEY_SQL_ALCHEMY_CONN} = {OPTION_VALUE_SQL_ALCHEMY_CONN}
"""
),
),
(
None,
HEADERS_INVALID,
406,
{"detail": "Only application/json or text/plain is supported"},
),
(
SECTION_CORE,
HEADERS_JSON,
200,
{
"sections": [
{
"name": SECTION_CORE,
"options": [
{"key": OPTION_KEY_PARALLELISM, "value": OPTION_VALUE_PARALLELISM},
],
},
],
},
),
(
SECTION_SMTP,
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_SMTP}]
{OPTION_KEY_SMTP_HOST} = {OPTION_VALUE_SMTP_HOST}
{OPTION_KEY_SMTP_MAIL_FROM} = {OPTION_VALUE_SMTP_MAIL_FROM}
"""
),
),
(
SECTION_DATABASE,
HEADERS_JSON,
200,
{
"sections": [
{
"name": SECTION_DATABASE,
"options": [
{"key": OPTION_KEY_SQL_ALCHEMY_CONN, "value": OPTION_VALUE_SQL_ALCHEMY_CONN},
],
},
],
},
),
(None, HEADERS_JSON, 403, FORBIDDEN_RESPONSE),
(SECTION_CORE, HEADERS_JSON, 403, FORBIDDEN_RESPONSE),
(SECTION_NOT_EXIST, HEADERS_JSON, 404, {"detail": f"Section {SECTION_NOT_EXIST} not found."}),
],
)
def test_get_config(self, test_client, section, headers, expected_status_code, expected_response):
query_params = {"section": section} if section else None
if expected_status_code == 403:
with conf_vars(AIRFLOW_CONFIG_DISABLE_EXPOSE_CONFIG):
response = test_client.get("/config", headers=headers, params=query_params)
else:
response = test_client.get("/config", headers=headers, params=query_params)
self._validate_response(headers, expected_response, expected_status_code, response)
@pytest.mark.parametrize(
("headers", "expected_status_code", "expected_response"),
[
(HEADERS_JSON, 200, GET_CONFIG_NON_SENSITIVE_ONLY_JSON_RESPONSE),
(HEADERS_JSON_UTF8, 200, GET_CONFIG_NON_SENSITIVE_ONLY_JSON_RESPONSE),
(HEADERS_ANY, 200, GET_CONFIG_NON_SENSITIVE_ONLY_JSON_RESPONSE),
(HEADERS_NONE, 200, GET_CONFIG_NON_SENSITIVE_ONLY_JSON_RESPONSE),
(
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_CORE}]
{OPTION_KEY_PARALLELISM} = {OPTION_VALUE_PARALLELISM}
[{SECTION_SMTP}]
{OPTION_KEY_SMTP_HOST} = {OPTION_VALUE_SMTP_HOST}
{OPTION_KEY_SMTP_MAIL_FROM} = {OPTION_VALUE_SMTP_MAIL_FROM}
[{SECTION_DATABASE}]
{OPTION_KEY_SQL_ALCHEMY_CONN} = {OPTION_VALUE_SENSITIVE_HIDDEN}
"""
),
),
],
)
def test_get_config_non_sensitive_only(
self, test_client, headers, expected_status_code, expected_response
):
with conf_vars(AIRFLOW_CONFIG_NON_SENSITIVE_ONLY_CONFIG):
response = test_client.get("/config", headers=headers)
self._validate_response(headers, expected_response, expected_status_code, response)
def test_get_config_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/config")
assert response.status_code == 401
def test_get_config_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/config")
assert response.status_code == 403
|
TestGetConfig
|
python
|
sympy__sympy
|
sympy/integrals/manualintegrate.py
|
{
"start": 18948,
"end": 19298
}
|
class ____(AtomicRule):
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
a, b, c, x = self.a, self.b, self.c, self.variable
return sqrt(S.Pi)/sqrt(2*a) * (
cos(b**2/(4*a) - c)*fresnelc((2*a*x + b)/sqrt(2*a*S.Pi)) +
sin(b**2/(4*a) - c)*fresnels((2*a*x + b)/sqrt(2*a*S.Pi)))
@dataclass
|
FresnelCRule
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http2.py
|
{
"start": 1111,
"end": 1433
}
|
class ____:
@property
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
# the import can fail when H2_ENABLED is False
from scrapy.core.downloader.handlers.http2 import ( # noqa: PLC0415
H2DownloadHandler,
)
return H2DownloadHandler
|
H2DownloadHandlerMixin
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
|
{
"start": 8787,
"end": 9078
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "InvalidPipelineRunsFilterError"
def __init__(self, message):
super().__init__()
self.message = check.str_param(message, "message")
|
GrapheneInvalidPipelineRunsFilterError
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 34687,
"end": 35381
}
|
class ____(DelegatingLexer):
"""
Subclass of the ERB lexer that highlights the unlexed data with the
html lexer.
Nested Javascript and CSS is highlighted too.
"""
name = 'RHTML'
aliases = ['rhtml', 'html+erb', 'html+ruby']
filenames = ['*.rhtml']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+ruby']
def __init__(self, **options):
super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
|
RhtmlLexer
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-alibabacloud-aisearch/llama_index/embeddings/alibabacloud_aisearch/base.py
|
{
"start": 1603,
"end": 6115
}
|
class ____(BaseEmbedding):
"""
For further details, please visit `https://help.aliyun.com/zh/open-search/search-platform/developer-reference/text-embedding-api-details`.
"""
_client: Client = PrivateAttr()
aisearch_api_key: str = Field(default=None, exclude=True)
endpoint: str = None
service_id: str = "ops-text-embedding-002"
workspace_name: str = "default"
def __init__(
self, endpoint: str = None, aisearch_api_key: str = None, **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.aisearch_api_key = get_from_param_or_env(
"aisearch_api_key", aisearch_api_key, "AISEARCH_API_KEY"
)
self.endpoint = get_from_param_or_env("endpoint", endpoint, "AISEARCH_ENDPOINT")
config = AISearchConfig(
bearer_token=self.aisearch_api_key,
endpoint=self.endpoint,
protocol="http",
)
self._client = Client(config=config)
@classmethod
def class_name(cls) -> str:
return "AlibabaCloudAISearchEmbedding"
@retry_decorator
def _get_embedding(self, text: str, input_type: str) -> List[float]:
request = GetTextEmbeddingRequest(input=text, input_type=input_type)
response: GetTextEmbeddingResponse = self._client.get_text_embedding(
workspace_name=self.workspace_name,
service_id=self.service_id,
request=request,
)
embeddings = response.body.result.embeddings
return embeddings[0].embedding
@aretry_decorator
async def _aget_embedding(self, text: str, input_type: str) -> List[float]:
request = GetTextEmbeddingRequest(input=text, input_type=input_type)
response: GetTextEmbeddingResponse = (
await self._client.get_text_embedding_async(
workspace_name=self.workspace_name,
service_id=self.service_id,
request=request,
)
)
embeddings = response.body.result.embeddings
return embeddings[0].embedding
@retry_decorator
def _get_embeddings(self, texts: List[str], input_type: str) -> List[List[float]]:
request = GetTextEmbeddingRequest(input=texts, input_type=input_type)
response: GetTextEmbeddingResponse = self._client.get_text_embedding(
workspace_name=self.workspace_name,
service_id=self.service_id,
request=request,
)
embeddings = response.body.result.embeddings
return [emb.embedding for emb in embeddings]
@aretry_decorator
async def _aget_embeddings(
self,
texts: List[str],
input_type: str,
) -> List[List[float]]:
request = GetTextEmbeddingRequest(input=texts, input_type=input_type)
response: GetTextEmbeddingResponse = (
await self._client.get_text_embedding_async(
workspace_name=self.workspace_name,
service_id=self.service_id,
request=request,
)
)
embeddings = response.body.result.embeddings
return [emb.embedding for emb in embeddings]
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(
query,
input_type="query",
)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._aget_embedding(
query,
input_type="query",
)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(
text,
input_type="document",
)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""The asynchronous version of _get_text_embedding."""
return await self._aget_embedding(
text,
input_type="document",
)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._get_embeddings(
texts,
input_type="document",
)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""The asynchronous version of _get_text_embeddings."""
return await self._aget_embeddings(
texts,
input_type="document",
)
|
AlibabaCloudAISearchEmbedding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.