language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 25837,
"end": 26538
} | class ____(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| ClvpDecoderMLP |
python | doocs__leetcode | solution/1100-1199/1103.Distribute Candies to People/Solution.py | {
"start": 0,
"end": 297
} | class ____:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
ans = [0] * num_people
i = 0
while candies:
ans[i % num_people] += min(candies, i + 1)
candies -= min(candies, i + 1)
i += 1
return ans
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/base64_image_source_param.py | {
"start": 372,
"end": 725
} | class ____(TypedDict, total=False):
data: Required[Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]]
media_type: Required[Literal["image/jpeg", "image/png", "image/gif", "image/webp"]]
type: Required[Literal["base64"]]
set_pydantic_config(Base64ImageSourceParam, {"arbitrary_types_allowed": True})
| Base64ImageSourceParam |
python | facebook__pyre-check | client/commands/tests/infer_test.py | {
"start": 14390,
"end": 14488
} | class ____:
path: str
infer_output: infer.RawInferOutputForPath
| ExpectedModuleAnnotationItem |
python | tensorflow__tensorflow | tensorflow/python/distribute/parameter_server_strategy_v2.py | {
"start": 26179,
"end": 45492
} | class ____(
parameter_server_strategy.ParameterServerStrategyExtended
):
"""Extended class for ParameterServerStrategyV2.
Please see `tf.distribute.StrategyExtended` doc for more information.
"""
def __init__(
self,
container_strategy,
cluster_resolver: base_cluster_resolver.ClusterResolver,
variable_partitioner,
):
"""Initialization of ParameterServerStrategyV2Extended."""
super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)
self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get("ps", []))
self._num_workers = len(
cluster_resolver.cluster_spec().as_dict().get("worker", [])
)
self._variable_count = 0
self._variable_partitioner = variable_partitioner
# The following two attrs are to verify that `ParameterServerStrategy`
# methods are properly used with a `ClusterCoordinator`.
self._used_with_coordinator = False
self._being_scheduled = False
self._set_num_gpus()
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpus_per_worker"
).set(self._num_gpus_per_worker)
# Don't canonicalize the devices here since this code is executed on Chief,
# but we want the reduce evaluation to be done on each worker. Placer will
# automatically choose the right device based on current context.
# TODO(ishark): Use select_cross_device_ops instead.
self._cross_device_ops = cross_device_ops_lib.ReductionToOneDevice(
reduce_to_device="/device:CPU:0"
)
self._cross_device_ops._canonicalize_devices = False # pylint: disable=protected-access
self._allow_run_without_coordinator = False
self._coordinator_creation_lock = threading.Lock()
def _set_num_gpus(self):
devices = config.list_logical_devices("GPU")
per_worker_gpus = {}
for d in devices:
d_spec = tf_device.DeviceSpec.from_string(d.name)
if d_spec.device_type == "GPU" and d_spec.job == "worker":
# TODO(b/167894802): update if worker name is customizable
job_spec = d_spec.replace(device_type=None, device_index=None)
per_worker_gpus[job_spec] = per_worker_gpus.get(job_spec, 0) + 1
num_gpus = 0
for _, count in per_worker_gpus.items():
if num_gpus > 0 and count != num_gpus:
raise ValueError("Mismatched number of GPUs per worker")
num_gpus = count
self._num_gpus_per_worker = num_gpus
logging.info(f"Number of GPUs on workers: {self._num_gpus_per_worker}")
@property
def _num_replicas_in_sync(self):
return self._num_gpus_per_worker or 1
def _create_var_creator(self, next_creator, **kwargs):
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
def var_creator(**kwargs):
"""Create an AggregatingVariable."""
# Create and wrap the variable.
v = next_creator(**kwargs)
wrapped_v = ps_values.CachingVariable(v)
wrapped = ps_values.AggregatingVariable(
self._container_strategy(), wrapped_v, aggregation
)
return wrapped
if self._num_replicas_in_sync > 1:
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA,
):
raise ValueError(
"Invalid variable aggregation mode: "
+ aggregation
+ " for variable: "
+ kwargs["name"]
)
return var_creator
else:
def variable_creator_single_replica(**kwargs):
v = next_creator(**kwargs)
return ps_values.CachingVariable(v)
return variable_creator_single_replica
def _create_per_worker_variable(self, next_creator, **kwargs):
"""Create an unsynced, unaggregated variable on each worker."""
return ps_values.PerWorkerVariable(
self._container_strategy(), next_creator, **kwargs
)
def _create_variable(self, next_creator, **kwargs):
"""Implements StrategyExtendedV2._create_variable.
Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
created if satisfying all the following criteria:
1. `self._variable_partitioner` results in more than one partition on the
first axis.
2. variable's rank is greater than 0.
3. variable is not colocated with another variable.
Otherwise a `Variable` will be created.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
**kwargs: Passed through to the next creator.
Returns:
A `Variable` or `ShardedVariable`.
"""
if kwargs.pop("per_worker_variable", False):
logging.info("Creating per worker variable")
return self._create_per_worker_variable(next_creator, **kwargs)
var_creator = self._create_var_creator(next_creator, **kwargs)
if "colocate_with" in kwargs: # Never partition colocated_with variables.
colocate_with = kwargs["colocate_with"]
# Clear the variable scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.device(None):
with ops.colocate_with(colocate_with):
var = var_creator(**kwargs)
logging.debug(
"Creating variable (name:%s, shape:%r) that colocates with %s",
var.name,
var.shape,
kwargs["colocate_with"].name,
)
return var
if self._variable_partitioner is None:
return self._create_variable_round_robin(var_creator, **kwargs)
name = kwargs.get("name", None)
dtype = kwargs.get("dtype", None)
shape = kwargs.get("shape", None)
initial_value = kwargs.get("initial_value", None)
if initial_value is None:
# If we are loading, next_creator will return an UninitializedVariable
v = next_creator(**kwargs)
if not isinstance(v, resource_variable_ops.UninitializedVariable):
raise ValueError(
"It looks like you are using `ParameterServerStrategy` with a"
" `variable_partitioner`, and trying to create a variable without"
" specifying `initial_value`. This is not allowed. Please specify"
" the `initial_value`."
)
elif shape is None or dtype is None:
raise ValueError(
"It looks like you are trying to load a `SavedModel` using "
"`tf.saved_model.load` within a `ParameterServerStrategy` scope, "
"but the `SavedModel` is missing shape or dtype information."
)
else:
def initializer(shape, dtype, **kwargs):
if "partition_shape" in kwargs:
shape = kwargs["partition_shape"]
return array_ops.zeros(shape, dtype)
initial_value = functools.partial(initializer, shape=shape, dtype=dtype)
# Two cases where initial_value can be a callable:
# 1. initial_value is passed as a callable, e.g, an `initializer` class.
# 2. restoring from checkpoint, initial_value is a
# "CheckpointInitialValueCallable".
init_from_fn = callable(initial_value)
if init_from_fn and (shape is None or dtype is None):
init_from_fn = False
initial_value = initial_value()
if not init_from_fn:
# The initial_value is created on coordinator, it will need to be sent to
# ps for variable initialization, which can be inefficient and can
# potentially hit the 2GB limit on protobuf serialization.
initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
dtype = initial_value.dtype
shape = initial_value.shape
else:
shape = tensor_shape.as_shape(shape)
if shape.rank == 0: # Skip partitioning rank-0 variable.
return self._create_variable_round_robin(var_creator, **kwargs)
num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
if (
not num_partitions
or num_partitions[0] == 0
or any(v != 1 for v in num_partitions[1:])
):
raise ValueError(
"variable_partitioner must return a list/tuple whose elements are 1"
" besides the first element (non-zero), got: %r" % num_partitions
)
if num_partitions[0] == 1: # no partition
return self._create_variable_round_robin(var_creator, **kwargs)
# Use "div" partition strategy to partition the variable.
num_partitions = min(num_partitions[0], shape[0])
base = shape[0] // num_partitions
extra = shape[0] % num_partitions
# An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]
# offsets: [0, 3, 6, 8, 10]
offsets = []
for i in range(num_partitions):
if i == 0:
offsets.append(0)
else:
prev_shard_size = base + (1 if i - 1 < extra else 0)
offsets.append(offsets[i - 1] + prev_shard_size)
offsets.append(shape[0])
def init_shard_fn(shard_index):
if not init_from_fn:
logging.log_if(
logging.WARN,
_INEFFICIENT_INIT_WARNING % name,
shard_index == 0
and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS,
)
return initial_value[offsets[shard_index] : offsets[shard_index + 1]]
partition_shape = (
offsets[shard_index + 1] - offsets[shard_index],
) + shape[1:]
partition_offset = (offsets[shard_index],) + (0,) * len(shape[1:])
arg_spec = tf_inspect.getfullargspec(initial_value)
if (
"shard_info" not in arg_spec.args
and "shard_info" not in arg_spec.kwonlyargs
):
try:
value = initial_value(
partition_shape=partition_shape, partition_offset=partition_offset
)
except (TypeError, ValueError):
# TypeError: Initializer doesn't accept kwargs
# ValueError: Initializer doesn't accept partition kwargs
# In both cases we go ahead creating the full value and then slice.
value = initial_value()
if value.shape == partition_shape:
# Initializer supports partition: value is the partition value.
return value
else:
# Initializer doesn't support partition: value is the full value
# and needs to be sliced to get the partition value.
logging.log_if(
logging.WARN,
_INEFFICIENT_INIT_WARNING % name,
shard_index == 0
and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS,
)
return value[offsets[shard_index] : offsets[shard_index + 1]]
else:
# For compatibility with `CheckpointInitialValueCallable`.
return initial_value(
shard_info=trackable.ShardInfo(
shape=tensor_shape.as_shape(partition_shape),
offset=partition_offset,
)
)
var_list = []
for i in range(num_partitions):
kwargs["shape"] = (offsets[i + 1] - offsets[i],) + shape[1:]
kwargs["initial_value"] = lambda: init_shard_fn(i)
if name is not None:
kwargs["name"] = "{}/part_{}".format(name, i)
var_list.append(self._create_variable_round_robin(var_creator, **kwargs))
result = sharded_variable.ShardedVariable(var_list)
return result
def _create_variable_round_robin(self, next_creator, **kwargs):
# Clear the colocation scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.colocate_with(None, ignore_existing=True):
# Explicitly set CPU:0 device for PS in case create variable is called
# inside replica_fn and worker has with GPU:0 scope.
with ops.device(
"/job:ps/task:%d/device:CPU:0" % (self._variable_count % self._num_ps)
):
var = next_creator(**kwargs)
log_method = (
logging.info
if os.getenv("TF_PSS_VERBOSE_VARIABLE_PLACEMENT")
else logging.debug
)
log_method(
"Creating variable (name:%s, shape:%r) on "
"/job:ps/task:%d/device:CPU:0",
var.name,
var.shape,
(self._variable_count % self._num_ps),
)
self._variable_count += 1
return var
def _resource_creator_scope(self):
with self._coordinator_creation_lock:
if not self._container_strategy()._cluster_coordinator: # pylint: disable=protected-access
cluster_coordinator.ClusterCoordinator(
strategy=self._container_strategy()
)
# TODO(wxinyi): We should warn the user of the inefficiency of creating
# `StaticHashTable` inside a `@tf.function`-wrapped `dataset_fn` to be
# distributed with `distribute_datasets_from_function` and
# `create_per_worker_dataset`. This is because the `dataset_fn` does not
# use the same `default_graph` as `scope` to which the
# `resource_creator_stack` belongs. Thus, `StaticHashTable` creation inside
# `dataset_fn` is not intercepted. And since its resource creation under a
# `tf.function` is lifted out, all workers will share the same resource on
# the coordinator which incurs worker-coordinator communication overhead.
def lookup_creator(next_creator, *args, **kwargs):
if load_context.in_load_context():
return ps_values.RestoredDistributedTable(
self._container_strategy(), lambda: next_creator(*args, **kwargs)
) # pylint: disable=protected-access
else:
return ps_values.DistributedTable(
self._container_strategy(), lambda: next_creator(*args, **kwargs)
) # pylint: disable=protected-access
def restored_lookup_creator(next_creator, *args, **kwargs):
return ps_values.RestoredDistributedTable(
self._container_strategy(), lambda: next_creator(*args, **kwargs)
) # pylint: disable=protected-access
return [
ops.resource_creator_scope("StaticHashTable", lookup_creator),
ops.resource_creator_scope(
"RestoredStaticHashTable", restored_lookup_creator
),
]
def _assert_used_with_cluster_coordinator(self):
if (
not self._used_with_coordinator
and not self._allow_run_without_coordinator
):
raise NotImplementedError(
"`tf.distribute.experimental.ParameterServerStrategy` must be used "
"with `tf.distribute.experimental.coordinator.ClusterCoordinator` in "
"a custom training loop. If you are using `Model.fit`, please supply "
"a dataset function directly to a "
"`tf.keras.utils.experimental.DatasetCreator` instead."
)
def _assert_being_scheduled_by_cluster_coordinator(self):
if not self._being_scheduled and not self._allow_run_without_coordinator:
logging.warning(
"A `tf.distribute.experimental.ParameterServerStrategy` method is "
"invoked without using `ClusterCoordinator.schedule`. If you are not "
"tracing a tf.function, this method is possibly executed on the "
"coordinator, which can be slow. To properly dispatch functions to "
"run on workers, methods like `run` or `reduce` should be used "
"within a function passed to `tf.distribute.experimental.coordinator."
"ClusterCoordinator.schedule`."
)
# options is not used right now. But we may want to support options while
# creating InputWorkers in future, similar to MirroredStrategy.
def _input_workers_with_options(self, options=None):
input_workers_devices = (("/device:CPU:0", self.worker_devices),)
return input_lib.InputWorkers(
input_workers_devices, canonicalize_devices=False
)
def _experimental_distribute_dataset(self, dataset, options):
input_workers_devices = self._input_workers_with_options()
# If this DistributedDataset is created outside ClusterCoordinator, i,e,
# outside a tf.function, we don't build its underlying datasets immediately
# until it is passed to ClusterCoordinator.create_per_worker_dataset.
return input_util.get_distributed_dataset(
dataset,
input_workers_devices,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
options=options,
build=ops.inside_function(),
) # will be built by ClusterCoordinator
def _distribute_datasets_from_function(self, dataset_fn, options):
# There is no synchronization beyond a worker and thus, the number of
# input pipelines in sync is only 1 per worker.
input_pipeline_id_in_sync = 0
num_input_pipelines_in_sync = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines_in_sync,
input_pipeline_id=input_pipeline_id_in_sync,
num_replicas_in_sync=self._num_replicas_in_sync,
)
# If this DistributedDatasetFromFunction is created outside
# ClusterCoordinator, i,e, outside a tf.function, we don't build its
# underlying datasets immediately until it is passed to
# ClusterCoordinator.create_per_worker_dataset.
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options),
[input_context],
self._container_strategy(),
options=options,
build=ops.inside_function(),
) # will be built by ClusterCoordinator
@property
def worker_devices(self):
num_gpus = self._num_gpus_per_worker
if num_gpus > 0:
compute_devices = tuple("/device:GPU:%d" % (i,) for i in range(num_gpus))
else:
compute_devices = ("/device:CPU:0",)
return compute_devices
def _call_for_each_replica(self, fn, args, kwargs):
self._assert_being_scheduled_by_cluster_coordinator()
return mirrored_run.call_for_each_replica(
self._container_strategy(), fn, args, kwargs
)
def _reduce(self, reduce_op, value):
self._assert_being_scheduled_by_cluster_coordinator()
dst = device_util.current() or self._default_device or "/device:CPU:0"
destinations = device_util.canonicalize_without_job_and_task(dst)
result = self._local_results(
self.reduce_to(reduce_op, value, destinations)
)[0]
return result
def _reduce_to(self, reduce_op, value, destinations, options):
self._assert_being_scheduled_by_cluster_coordinator()
def get_values(x):
if isinstance(x, values.DistributedValues):
return self._cross_device_ops.reduce(
reduce_op, x, destinations=destinations
) # pylint: disable=protected-access
return x
return nest.map_structure(get_values, value)
# The warning that will be logged if the way we initialize sharded variables
# is memory-inefficient.
_INEFFICIENT_INIT_WARNING = (
"Large variable %s is partitioned but not initialized in a "
"memory-efficient way. On each shard, the full value is first being "
"created and then sliced into smaller values. To reduce the memory "
"footprint, explicitly specify `dtype` and `shape` when creating "
"variables, and use `tf.initializers` to initialize the variable. "
"Note that some initializers (e.g., orthogonal) don't support "
"memory-efficient initialization and there is not much you can do here."
)
_LARGE_VARIABLE_NUM_ELEMENTS = 1e9
| ParameterServerStrategyV2Extended |
python | huggingface__transformers | src/transformers/models/wavlm/modeling_wavlm.py | {
"start": 31162,
"end": 32588
} | class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [
WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
| WavLMFeatureEncoder |
python | celery__celery | t/integration/test_canvas.py | {
"start": 6949,
"end": 42946
} | class ____:
@flaky
def test_simple_chain(self, manager):
c = add.s(4, 4) | add.s(8) | add.s(16)
assert c().get(timeout=TIMEOUT) == 32
@flaky
def test_single_chain(self, manager):
c = chain(add.s(3, 4))()
assert c.get(timeout=TIMEOUT) == 7
@flaky
def test_complex_chain(self, manager):
g = group(add.s(i) for i in range(4))
c = (
add.s(2, 2) | (
add.s(4) | add_replaced.s(8) | add.s(16) | add.s(32)
) | g
)
res = c()
assert res.get(timeout=TIMEOUT) == [64, 65, 66, 67]
@pytest.mark.xfail(raises=TimeoutError, reason="Task is timeout")
def test_group_results_in_chain(self, manager):
# This adds in an explicit test for the special case added in commit
# 1e3fcaa969de6ad32b52a3ed8e74281e5e5360e6
c = (
group(
add.s(1, 2) | group(
add.s(1), add.s(2)
)
)
)
res = c()
assert res.get(timeout=TIMEOUT / 10) == [4, 5]
def test_chain_of_chain_with_a_single_task(self, manager):
sig = signature('any_taskname', queue='any_q')
chain([chain(sig)]).apply_async()
def test_chain_on_error(self, manager):
from .tasks import ExpectedException
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
# Run the chord and wait for the error callback to finish.
c1 = chain(
add.s(1, 2), fail.s(), add.s(3, 4),
)
res = c1()
with pytest.raises(ExpectedException):
res.get(propagate=True)
with pytest.raises(ExpectedException):
res.parent.get(propagate=True)
@flaky
def test_chain_inside_group_receives_arguments(self, manager):
c = (
add.s(5, 6) |
group((add.s(1) | add.s(2), add.s(3)))
)
res = c()
assert res.get(timeout=TIMEOUT) == [14, 14]
@flaky
def test_eager_chain_inside_task(self, manager):
from .tasks import chain_add
prev = chain_add.app.conf.task_always_eager
chain_add.app.conf.task_always_eager = True
chain_add.apply_async(args=(4, 8), throw=True).get()
chain_add.app.conf.task_always_eager = prev
@flaky
def test_group_chord_group_chain(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
before = group(redis_echo.si(f'before {i}') for i in range(3))
connect = redis_echo.si('connect')
after = group(redis_echo.si(f'after {i}') for i in range(2))
result = (before | connect | after).delay()
result.get(timeout=TIMEOUT)
redis_messages = list(redis_connection.lrange('redis-echo', 0, -1))
before_items = {b'before 0', b'before 1', b'before 2'}
after_items = {b'after 0', b'after 1'}
assert set(redis_messages[:3]) == before_items
assert redis_messages[3] == b'connect'
assert set(redis_messages[4:]) == after_items
redis_connection.delete('redis-echo')
@flaky
def test_group_result_not_has_cache(self, manager):
t1 = identity.si(1)
t2 = identity.si(2)
gt = group([identity.si(3), identity.si(4)])
ct = chain(identity.si(5), gt)
task = group(t1, t2, ct)
result = task.delay()
assert result.get(timeout=TIMEOUT) == [1, 2, [3, 4]]
@flaky
def test_second_order_replace(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
result = second_order_replace1.delay()
result.get(timeout=TIMEOUT)
redis_messages = list(redis_connection.lrange('redis-echo', 0, -1))
expected_messages = [b'In A', b'In B', b'In/Out C', b'Out B',
b'Out A']
assert redis_messages == expected_messages
@flaky
def test_parent_ids(self, manager, num=10):
assert_ping(manager)
c = chain(ids.si(i=i) for i in range(num))
c.freeze()
res = c()
try:
res.get(timeout=TIMEOUT)
except TimeoutError:
print(manager.inspect().active())
print(manager.inspect().reserved())
print(manager.inspect().stats())
raise
self.assert_ids(res, num - 1)
def assert_ids(self, res, size):
i, root = size, res
while root.parent:
root = root.parent
node = res
while node:
root_id, parent_id, value = node.get(timeout=30)
assert value == i
if node.parent:
assert parent_id == node.parent.id
assert root_id == root.id
node = node.parent
i -= 1
def test_chord_soft_timeout_recuperation(self, manager):
"""Test that if soft timeout happens in task but is managed by task,
chord still get results normally
"""
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
c = chord([
# return 3
add.s(1, 2),
# return 0 after managing soft timeout
delayed_sum_with_soft_guard.s(
[100], pause_time=2
).set(
soft_time_limit=1
),
])
result = c(delayed_sum.s(pause_time=0)).get()
assert result == 3
def test_chain_error_handler_with_eta(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
eta = datetime.now(timezone.utc) + timedelta(seconds=10)
c = chain(
group(
add.s(1, 2),
add.s(3, 4),
),
tsum.s()
).on_error(print_unicode.s()).apply_async(eta=eta)
result = c.get()
assert result == 10
@flaky
def test_groupresult_serialization(self, manager):
"""Test GroupResult is correctly serialized
to save in the result backend"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
async_result = build_chain_inside_task.delay()
result = async_result.get()
assert len(result) == 2
assert isinstance(result[0][1], list)
@flaky
def test_chain_of_task_a_group_and_a_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = add.si(1, 0)
c = c | group(add.s(1), add.s(1))
c = c | group(tsum.s(), tsum.s())
c = c | tsum.s()
res = c()
assert res.get(timeout=TIMEOUT) == 8
@flaky
def test_chain_of_chords_as_groups_chained_to_a_task_with_two_tasks(self,
manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = add.si(1, 0)
c = c | group(add.s(1), add.s(1))
c = c | tsum.s()
c = c | add.s(1)
c = c | group(add.s(1), add.s(1))
c = c | tsum.s()
res = c()
assert res.get(timeout=TIMEOUT) == 12
@flaky
def test_chain_of_chords_with_two_tasks(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = add.si(1, 0)
c = c | group(add.s(1), add.s(1))
c = c | tsum.s()
c = c | add.s(1)
c = c | chord(group(add.s(1), add.s(1)), tsum.s())
res = c()
assert res.get(timeout=TIMEOUT) == 12
@flaky
def test_chain_of_a_chord_and_a_group_with_two_tasks(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = add.si(1, 0)
c = c | group(add.s(1), add.s(1))
c = c | tsum.s()
c = c | add.s(1)
c = c | group(add.s(1), add.s(1))
res = c()
assert res.get(timeout=TIMEOUT) == [6, 6]
@flaky
def test_chain_of_a_chord_and_a_task_and_a_group(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = group(add.s(1, 1), add.s(1, 1))
c = c | tsum.s()
c = c | add.s(1)
c = c | group(add.s(1), add.s(1))
res = c()
assert res.get(timeout=TIMEOUT) == [6, 6]
@flaky
def test_chain_of_a_chord_and_two_tasks_and_a_group(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = group(add.s(1, 1), add.s(1, 1))
c = c | tsum.s()
c = c | add.s(1)
c = c | add.s(1)
c = c | group(add.s(1), add.s(1))
res = c()
assert res.get(timeout=TIMEOUT) == [7, 7]
@flaky
def test_chain_of_a_chord_and_three_tasks_and_a_group(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
c = group(add.s(1, 1), add.s(1, 1))
c = c | tsum.s()
c = c | add.s(1)
c = c | add.s(1)
c = c | add.s(1)
c = c | group(add.s(1), add.s(1))
res = c()
assert res.get(timeout=TIMEOUT) == [8, 8]
@pytest.mark.xfail(raises=TimeoutError, reason="Task is timeout")
def test_nested_chain_group_lone(self, manager): # Fails with Redis 5.x
"""
Test that a lone group in a chain completes.
"""
sig = chain(
group(identity.s(42), identity.s(42)), # [42, 42]
)
res = sig.delay()
assert res.get(timeout=TIMEOUT / 10) == [42, 42]
def test_nested_chain_group_mid(self, manager):
"""
Test that a mid-point group in a chain completes.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
identity.s(42), # 42
group(identity.s(), identity.s()), # [42, 42]
identity.s(), # [42, 42]
)
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
def test_nested_chain_group_last(self, manager):
"""
Test that a final group in a chain with preceding tasks completes.
"""
sig = chain(
identity.s(42), # 42
group(identity.s(), identity.s()), # [42, 42]
)
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
def test_chain_replaced_with_a_chain_and_a_callback(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
link_msg = 'Internal chain callback'
c = chain(
identity.s('Hello '),
# The replacement chain will pass its args though
replace_with_chain.s(link_msg=link_msg),
add.s('world'),
)
res = c.delay()
assert res.get(timeout=TIMEOUT) == 'Hello world'
await_redis_echo({link_msg, })
def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
link_msg = 'Internal chain errback'
c = chain(
identity.s('Hello '),
replace_with_chain_which_raises.s(link_msg=link_msg),
add.s(' will never be seen :(')
)
res = c.delay()
with pytest.raises(ValueError):
res.get(timeout=TIMEOUT)
await_redis_echo({link_msg, })
def test_chain_with_cb_replaced_with_chain_with_cb(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
link_msg = 'Internal chain callback'
c = chain(
identity.s('Hello '),
# The replacement chain will pass its args though
replace_with_chain.s(link_msg=link_msg),
add.s('world'),
)
c.link(redis_echo.s())
res = c.delay()
assert res.get(timeout=TIMEOUT) == 'Hello world'
await_redis_echo({link_msg, 'Hello world'})
def test_chain_flattening_keep_links_of_inner_chain(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
link_b_msg = 'link_b called'
link_b_key = 'echo_link_b'
link_b_sig = redis_echo.si(link_b_msg, redis_key=link_b_key)
def link_chain(sig):
sig.link(link_b_sig)
sig.link_error(identity.s('link_ab'))
return sig
inner_chain = link_chain(chain(identity.s('a'), add.s('b')))
flat_chain = chain(inner_chain, add.s('c'))
redis_connection.delete(link_b_key)
res = flat_chain.delay()
assert res.get(timeout=TIMEOUT) == 'abc'
await_redis_echo((link_b_msg,), redis_key=link_b_key)
def test_chain_with_eb_replaced_with_chain_with_eb(
self, manager, subtests
):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_connection.delete('redis-echo')
inner_link_msg = 'Internal chain errback'
outer_link_msg = 'External chain errback'
c = chain(
identity.s('Hello '),
# The replacement chain will die and break the encapsulating chain
replace_with_chain_which_raises.s(link_msg=inner_link_msg),
add.s('world'),
)
c.link_error(redis_echo.si(outer_link_msg))
res = c.delay()
with subtests.test(msg="Chain fails due to a child task dying"):
with pytest.raises(ValueError):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Chain and child task callbacks are called"):
await_redis_echo({inner_link_msg, outer_link_msg})
def test_replace_chain_with_empty_chain(self, manager):
r = chain(identity.s(1), replace_with_empty_chain.s()).delay()
with pytest.raises(ImproperlyConfigured,
match="Cannot replace with an empty chain"):
r.get(timeout=TIMEOUT)
def test_chain_children_with_callbacks(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
child_task_count = 42
child_sig = identity.si(1337)
child_sig.link(callback)
chain_sig = chain(child_sig for _ in range(child_task_count))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = chain_sig()
assert res_obj.get(timeout=TIMEOUT) == 1337
with subtests.test(msg="Chain child task callbacks are called"):
await_redis_count(child_task_count, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_chain_children_with_errbacks(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
child_task_count = 42
child_sig = fail.si()
child_sig.link_error(errback)
chain_sig = chain(child_sig for _ in range(child_task_count))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain fails due to a child task dying"):
res_obj = chain_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Chain child task errbacks are called"):
# Only the first child task gets a change to run and fail
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_chain_with_callback_child_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
chain_sig = chain(add_replaced.si(42, 1337), identity.s())
chain_sig.link(callback)
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = chain_sig()
assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
with subtests.test(msg="Callback is called after chain finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_chain_with_errback_child_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
chain_sig = chain(add_replaced.si(42, 1337), fail.s())
chain_sig.link_error(errback)
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = chain_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after chain finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_chain_child_with_callback_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
child_sig = add_replaced.si(42, 1337)
child_sig.link(callback)
chain_sig = chain(child_sig, identity.s())
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = chain_sig()
assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
with subtests.test(msg="Callback is called after chain finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_chain_child_with_errback_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
child_sig = fail_replaced.si()
child_sig.link_error(errback)
chain_sig = chain(child_sig, identity.si(42))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = chain_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after chain finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
@pytest.mark.xfail(raises=TimeoutError,
reason="Task is timeout instead of returning exception on rpc backend",
strict=False)
def test_task_replaced_with_chain(self, manager):
orig_sig = replace_with_chain.si(42)
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == 42
def test_chain_child_replaced_with_chain_first(self, manager):
orig_sig = chain(replace_with_chain.si(42), identity.s())
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == 42
def test_chain_child_replaced_with_chain_middle(self, manager):
orig_sig = chain(
identity.s(42), replace_with_chain.s(), identity.s()
)
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == 42
@pytest.mark.xfail(raises=TimeoutError,
reason="Task is timeout instead of returning exception on rpc backend",
strict=False)
def test_chain_child_replaced_with_chain_last(self, manager):
orig_sig = chain(identity.s(42), replace_with_chain.s())
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == 42
@pytest.mark.parametrize('redis_key', ['redis-group-ids'])
def test_chord_header_id_duplicated_on_rabbitmq_msg_duplication(self, manager, subtests, celery_session_app,
redis_key):
"""
When a task that predates a chord in a chain was duplicated by Rabbitmq (for whatever reason),
the chord header id was not duplicated. This caused the chord header to have a different id.
This test ensures that the chord header's id preserves itself in face of such an edge case.
To validate the correct behavior is implemented, we collect the original and duplicated chord header ids
in redis, to ensure that they are the same.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
if manager.app.conf.broker_url.startswith('redis'):
raise pytest.xfail('Redis broker does not duplicate the task (t1)')
# Republish t1 to cause the chain to be executed twice
@before_task_publish.connect
def before_task_publish_handler(sender=None, body=None, exchange=None, routing_key=None, headers=None,
properties=None,
declare=None, retry_policy=None, **kwargs):
""" We want to republish t1 to ensure that the chain is executed twice """
metadata = {
'body': body,
'exchange': exchange,
'routing_key': routing_key,
'properties': properties,
'headers': headers,
}
with celery_session_app.producer_pool.acquire(block=True) as producer:
# Publish t1 to the message broker, just before it's going to be published which causes duplication
return producer.publish(
metadata['body'],
exchange=metadata['exchange'],
routing_key=metadata['routing_key'],
retry=None,
retry_policy=retry_policy,
serializer='json',
delivery_mode=None,
headers=headers,
**kwargs
)
# Clean redis key
redis_connection = get_redis_connection()
if redis_connection.exists(redis_key):
redis_connection.delete(redis_key)
# Prepare tasks
t1, t2, t3, t4 = identity.s(42), redis_echo_group_id.s(), identity.s(), identity.s()
c = chain(t1, chord([t2, t3], t4))
# Delay chain
r1 = c.delay()
r1.get(timeout=TIMEOUT)
# Cleanup
before_task_publish.disconnect(before_task_publish_handler)
with subtests.test(msg='Compare group ids via redis list'):
await_redis_list_message_length(2, redis_key=redis_key, timeout=15)
compare_group_ids_in_redis(redis_key=redis_key)
# Cleanup
redis_connection = get_redis_connection()
redis_connection.delete(redis_key)
def test_chaining_upgraded_chords_pure_groups(self, manager, subtests):
""" This test is built to reproduce the github issue https://github.com/celery/celery/issues/5958
The issue describes a canvas where a chain of groups are executed multiple times instead of once.
This test is built to reproduce the issue and to verify that the issue is fixed.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_key = 'echo_chamber'
c = chain(
# letting the chain upgrade the chord, reproduces the issue in _chord.__or__
group(
redis_echo.si('1', redis_key=redis_key),
redis_echo.si('2', redis_key=redis_key),
redis_echo.si('3', redis_key=redis_key),
),
group(
redis_echo.si('4', redis_key=redis_key),
redis_echo.si('5', redis_key=redis_key),
redis_echo.si('6', redis_key=redis_key),
),
group(
redis_echo.si('7', redis_key=redis_key),
),
group(
redis_echo.si('8', redis_key=redis_key),
),
redis_echo.si('9', redis_key=redis_key),
redis_echo.si('Done', redis_key='Done'),
)
with subtests.test(msg='Run the chain and wait for completion'):
redis_connection.delete(redis_key, 'Done')
c.delay().get(timeout=TIMEOUT)
await_redis_list_message_length(1, redis_key='Done', timeout=10)
with subtests.test(msg='All tasks are executed once'):
actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)]
expected = [str(i) for i in range(1, 10)]
with subtests.test(msg='All tasks are executed once'):
assert sorted(actual) == sorted(expected)
# Cleanup
redis_connection.delete(redis_key, 'Done')
def test_chaining_upgraded_chords_starting_with_chord(self, manager, subtests):
""" This test is built to reproduce the github issue https://github.com/celery/celery/issues/5958
The issue describes a canvas where a chain of groups are executed multiple times instead of once.
This test is built to reproduce the issue and to verify that the issue is fixed.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_key = 'echo_chamber'
c = chain(
# by manually upgrading the chord to a group, we can reproduce the issue in _chain.__or__
chord(group([redis_echo.si('1', redis_key=redis_key),
redis_echo.si('2', redis_key=redis_key),
redis_echo.si('3', redis_key=redis_key)]),
group([redis_echo.si('4', redis_key=redis_key),
redis_echo.si('5', redis_key=redis_key),
redis_echo.si('6', redis_key=redis_key)])),
group(
redis_echo.si('7', redis_key=redis_key),
),
group(
redis_echo.si('8', redis_key=redis_key),
),
redis_echo.si('9', redis_key=redis_key),
redis_echo.si('Done', redis_key='Done'),
)
with subtests.test(msg='Run the chain and wait for completion'):
redis_connection.delete(redis_key, 'Done')
c.delay().get(timeout=TIMEOUT)
await_redis_list_message_length(1, redis_key='Done', timeout=10)
with subtests.test(msg='All tasks are executed once'):
actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)]
expected = [str(i) for i in range(1, 10)]
with subtests.test(msg='All tasks are executed once'):
assert sorted(actual) == sorted(expected)
# Cleanup
redis_connection.delete(redis_key, 'Done')
def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests):
""" This test is built to reproduce the github issue https://github.com/celery/celery/issues/5958
The issue describes a canvas where a chain of groups are executed multiple times instead of once.
This test is built to reproduce the issue and to verify that the issue is fixed.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_key = 'echo_chamber'
c = chain(
chord(group([redis_echo.si('1', redis_key=redis_key),
redis_echo.si('2', redis_key=redis_key),
redis_echo.si('3', redis_key=redis_key)]),
group([redis_echo.si('4', redis_key=redis_key),
redis_echo.si('5', redis_key=redis_key),
redis_echo.si('6', redis_key=redis_key)])),
redis_echo.si('7', redis_key=redis_key),
group(
redis_echo.si('8', redis_key=redis_key),
),
redis_echo.si('9', redis_key=redis_key),
redis_echo.si('Done', redis_key='Done'),
)
with subtests.test(msg='Run the chain and wait for completion'):
redis_connection.delete(redis_key, 'Done')
c.delay().get(timeout=TIMEOUT)
await_redis_list_message_length(1, redis_key='Done', timeout=10)
with subtests.test(msg='All tasks are executed once'):
actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)]
expected = [str(i) for i in range(1, 10)]
with subtests.test(msg='All tasks are executed once'):
assert sorted(actual) == sorted(expected)
# Cleanup
redis_connection.delete(redis_key, 'Done')
def test_freezing_chain_sets_id_of_last_task(self, manager):
last_task = add.s(2).set(task_id='42')
c = add.s(4) | last_task
assert c.id is None
c.freeze(last_task.id)
assert c.id == last_task.id
@pytest.mark.parametrize(
"group_last_task",
[False, True],
)
def test_chaining_upgraded_chords_mixed_canvas_protocol_2(
self, manager, subtests, group_last_task):
""" This test is built to reproduce the github issue https://github.com/celery/celery/issues/8662
The issue describes a canvas where a chain of groups are executed multiple times instead of once.
This test is built to reproduce the issue and to verify that the issue is fixed.
"""
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = get_redis_connection()
redis_key = 'echo_chamber'
c = chain(
group([
redis_echo.si('1', redis_key=redis_key),
redis_echo.si('2', redis_key=redis_key)
]),
group([
redis_echo.si('3', redis_key=redis_key),
redis_echo.si('4', redis_key=redis_key),
redis_echo.si('5', redis_key=redis_key)
]),
group([
redis_echo.si('6', redis_key=redis_key),
redis_echo.si('7', redis_key=redis_key),
redis_echo.si('8', redis_key=redis_key),
redis_echo.si('9', redis_key=redis_key)
]),
redis_echo.si('Done', redis_key='Done') if not group_last_task else
group(redis_echo.si('Done', redis_key='Done')),
)
with subtests.test(msg='Run the chain and wait for completion'):
redis_connection.delete(redis_key, 'Done')
c.delay().get(timeout=TIMEOUT)
await_redis_list_message_length(1, redis_key='Done', timeout=10)
with subtests.test(msg='All tasks are executed once'):
actual = [
sig.decode('utf-8')
for sig in redis_connection.lrange(redis_key, 0, -1)
]
expected = [str(i) for i in range(1, 10)]
with subtests.test(msg='All tasks are executed once'):
assert sorted(actual) == sorted(expected)
# Cleanup
redis_connection.delete(redis_key, 'Done')
def test_group_in_center_of_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
t1 = chain(tsum.s(), group(add.s(8), add.s(16)), tsum.s() | add.s(32))
t2 = chord([tsum, tsum], t1)
t3 = chord([add.s(0, 1)], t2)
res = t3.apply_async() # should not raise
assert res.get(timeout=TIMEOUT) == 60
def test_upgrade_to_chord_inside_chains(self, manager):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
redis_key = str(uuid.uuid4())
group1 = group(redis_echo.si('a', redis_key), redis_echo.si('a', redis_key))
group2 = group(redis_echo.si('a', redis_key), redis_echo.si('a', redis_key))
chord1 = group1 | group2
chain1 = chain(chord1, (redis_echo.si('a', redis_key) | redis_echo.si('b', redis_key)))
chain1.apply_async().get(timeout=TIMEOUT)
redis_connection = get_redis_connection()
actual = redis_connection.lrange(redis_key, 0, -1)
assert actual.count(b'b') == 1
redis_connection.delete(redis_key)
| test_chain |
python | huggingface__transformers | src/transformers/models/seamless_m4t/configuration_seamless_m4t.py | {
"start": 788,
"end": 23521
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~SeamlessM4TModel`]. It is used to instantiate an
SeamlessM4T model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4T
["facebook/hf-seamless-m4t-medium"](https://huggingface.co/"facebook/hf-seamless-m4t-medium") architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the SeamlessM4T model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`~SeamlessM4TModel`], [`~SeamlessM4TForTextToSpeech`] or
[`~SeamlessM4TForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4T model. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer of the speech encoder.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer of the speech encoder.
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
`None` no relative position embedding is applied. Only applied to the speech encoder.
rotary_embedding_base (`int`, *optional*, defaults to 10000):
If `"rotary"` position embeddings are used, defines the size of the embedding base. Only applied to the
speech encoder.
max_source_positions (`int`, *optional*, defaults to 4096):
if `"relative"` position embeddings are used, defines the maximum source input positions. Only applied to
the speech encoder.
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_decoder_start_token_id (`int`, *optional*, defaults to 2):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied to the text-to-unit seq2seq model.
t2u_max_new_tokens (`int`, *optional*, defaults to 1024):
The maximum numbers of unit tokens to generate, ignoring the number of tokens in the prompt. Only applied
to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4T vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4TModel, SeamlessM4TConfig
>>> # Initializing a SeamlessM4T "facebook/hf-seamless-m4t-medium" style configuration
>>> configuration = SeamlessM4TConfig()
>>> # Initializing a model from the "facebook/hf-seamless-m4t-medium" style configuration
>>> model = SeamlessM4TModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "seamless_m4t"
def __init__(
self,
vocab_size=256102,
t2u_vocab_size=10082,
# shared config
hidden_size=1024,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_cache=True,
max_position_embeddings=1024,
is_encoder_decoder=True,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
activation_function="relu",
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
scale_embedding=True,
# text encoder|decoder
encoder_layers=24,
encoder_ffn_dim=8192,
encoder_attention_heads=16,
decoder_layers=24,
decoder_ffn_dim=8192,
decoder_attention_heads=16,
decoder_start_token_id=3,
max_new_tokens=256,
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
# speech_encoder
speech_encoder_layers=24,
speech_encoder_attention_heads=16,
speech_encoder_intermediate_size=4096,
speech_encoder_hidden_act="swish",
speech_encoder_dropout=0.0,
add_adapter=True,
speech_encoder_layerdrop=0.1,
feature_projection_input_dim=160,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
adaptor_kernel_size=8,
adaptor_stride=8,
adaptor_dropout=0.1,
num_adapter_layers=1,
position_embeddings_type="relative",
rotary_embedding_base=10000,
max_source_positions=4096,
conv_depthwise_kernel_size=31,
# t2u config
t2u_bos_token_id=0,
t2u_pad_token_id=1,
t2u_eos_token_id=2,
t2u_decoder_start_token_id=2,
t2u_max_new_tokens=1024,
t2u_encoder_layers=6,
t2u_encoder_ffn_dim=8192,
t2u_encoder_attention_heads=16,
t2u_decoder_layers=6,
t2u_decoder_ffn_dim=8192,
t2u_decoder_attention_heads=16,
t2u_max_position_embeddings=2048,
# hifi-gan vocoder config
sampling_rate=16000,
upsample_initial_channel=512,
upsample_rates=[5, 4, 4, 2, 2],
upsample_kernel_sizes=[11, 8, 8, 4, 4],
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
leaky_relu_slope=0.1,
# specific to Code Hifi-Gan
unit_hifi_gan_vocab_size=10000,
unit_embed_dim=1280,
lang_embed_dim=256,
spkr_embed_dim=256,
vocoder_num_langs=36,
vocoder_num_spkrs=200,
variance_predictor_kernel_size=3,
var_pred_dropout=0.5,
vocoder_offset=4,
**kwargs,
):
# overall_config
self.vocab_size = vocab_size
self.t2u_vocab_size = t2u_vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.max_new_tokens = max_new_tokens
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.scale_embedding = scale_embedding
# for proper config init
self.num_attention_heads = decoder_attention_heads
self.num_hidden_layers = decoder_layers
# text|unit encoder|decoder
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
# speech_encoder
self.speech_encoder_layers = speech_encoder_layers
self.speech_encoder_hidden_act = speech_encoder_hidden_act
self.speech_encoder_dropout = speech_encoder_dropout
self.speech_encoder_attention_heads = speech_encoder_attention_heads
self.speech_encoder_layerdrop = speech_encoder_layerdrop
self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
self.feature_projection_input_dim = feature_projection_input_dim
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.adaptor_kernel_size = adaptor_kernel_size
self.adaptor_stride = adaptor_stride
self.adaptor_dropout = adaptor_dropout
self.num_adapter_layers = num_adapter_layers
self.position_embeddings_type = position_embeddings_type
self.rotary_embedding_base = rotary_embedding_base
self.max_source_positions = max_source_positions
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.add_adapter = add_adapter
# t2u config
self.t2u_bos_token_id = t2u_bos_token_id
self.t2u_pad_token_id = t2u_pad_token_id
self.t2u_eos_token_id = t2u_eos_token_id
self.t2u_decoder_start_token_id = t2u_decoder_start_token_id
self.t2u_max_new_tokens = t2u_max_new_tokens
self.t2u_encoder_layers = t2u_encoder_layers
self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
self.t2u_decoder_layers = t2u_decoder_layers
self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
self.t2u_max_position_embeddings = t2u_max_position_embeddings
# hifi-gan vocoder config
# original parameters specific to Hifi-Gan
self.sampling_rate = sampling_rate
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.leaky_relu_slope = leaky_relu_slope
# specific to Code Hifi-Gan
self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
self.unit_embed_dim = unit_embed_dim
self.lang_embed_dim = lang_embed_dim
self.spkr_embed_dim = spkr_embed_dim
self.vocoder_num_langs = vocoder_num_langs
self.vocoder_num_spkrs = vocoder_num_spkrs
self.variance_predictor_kernel_size = variance_predictor_kernel_size
self.var_pred_dropout = var_pred_dropout
self.vocoder_offset = vocoder_offset
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
is_encoder_decoder=is_encoder_decoder,
max_position_embeddings=max_position_embeddings,
**kwargs,
)
__all__ = ["SeamlessM4TConfig"]
| SeamlessM4TConfig |
python | pytest-dev__pytest | src/_pytest/outcomes.py | {
"start": 2367,
"end": 3037
} | class ____:
"""Exit testing process.
:param reason:
The message to show as the reason for exiting pytest. reason has a default value
only because `msg` is deprecated.
:param returncode:
Return code to be used when exiting pytest. None means the same as ``0`` (no error),
same as :func:`sys.exit`.
:raises pytest.exit.Exception:
The exception that is raised.
"""
Exception: ClassVar[type[Exit]] = Exit
def __call__(self, reason: str = "", returncode: int | None = None) -> NoReturn:
__tracebackhide__ = True
raise Exit(msg=reason, returncode=returncode)
exit: _Exit = _Exit()
| _Exit |
python | pypa__warehouse | tests/functional/manage/test_project_publishing.py | {
"start": 454,
"end": 6530
} | class ____:
@responses.activate
def test_add_github_publisher_to_existing_project(self, webtest):
"""
An authenticated user with project ownership can add a GitHub
trusted publisher to their existing project.
"""
# Arrange: Create a user with a project
user = UserFactory.create(
with_verified_primary_email=True,
with_terms_of_service_agreement=True,
clear_pwd="password",
)
project = ProjectFactory.create(name="existing-project")
RoleFactory.create(user=user, project=project, role_name="Owner")
UserUniqueLoginFactory.create(
user=user, ip_address=REMOTE_ADDR, status=UniqueLoginStatus.CONFIRMED
)
# Mock GitHub API for owner validation
responses.add(
responses.GET,
"https://api.github.com/users/test-owner",
json={
"id": 123456,
"login": "test-owner",
},
status=200,
)
# Act: Log in
login_page = webtest.get("/account/login/", status=HTTPStatus.OK)
login_form = login_page.forms["login-form"]
csrf_token = login_form["csrf_token"].value
login_form["username"] = user.username
login_form["password"] = "password"
# Handle 2FA
two_factor_page = login_form.submit().follow(status=HTTPStatus.OK)
two_factor_form = two_factor_page.forms["totp-auth-form"]
two_factor_form["csrf_token"] = csrf_token
two_factor_form["totp_value"] = (
_get_totp(user.totp_secret).generate(time.time()).decode()
)
two_factor_form.submit().follow(status=HTTPStatus.OK)
# Navigate to project publishing settings page
publishing_page = webtest.get(
f"/manage/project/{project.name}/settings/publishing/",
status=HTTPStatus.OK,
)
# Get logged-in CSRF token
logged_in_csrf_token = publishing_page.html.find(
"input", {"name": "csrf_token"}
)["value"]
# Fill out the GitHub publisher form
github_form = publishing_page.forms["github-publisher-form"]
github_form["csrf_token"] = logged_in_csrf_token
github_form["owner"] = "test-owner"
github_form["repository"] = "test-repo"
github_form["workflow_filename"] = "release.yml"
# Note: No project_name field - this is for an existing project
# Submit the form, redirects back to the same page on success
response = github_form.submit(status=HTTPStatus.SEE_OTHER)
response.follow(status=HTTPStatus.OK)
# Assert: Verify success
# Check flash messages via the JavaScript endpoint
flash_messages = webtest.get(
"/_includes/unauthed/flash-messages/", status=HTTPStatus.OK
)
success_message = flash_messages.html.find(
"span", {"class": "notification-bar__message"}
)
assert success_message is not None
assert "Added" in success_message.text
assert "release.yml" in success_message.text
assert "existing-project" in success_message.text
def test_add_gitlab_publisher_to_existing_project(self, webtest):
"""
An authenticated user with project ownership can add a GitLab
trusted publisher to their existing project.
"""
# Arrange: Create a user with a project
user = UserFactory.create(
with_verified_primary_email=True,
with_terms_of_service_agreement=True,
clear_pwd="password",
)
project = ProjectFactory.create(name="gitlab-project")
RoleFactory.create(user=user, project=project, role_name="Owner")
UserUniqueLoginFactory.create(
user=user, ip_address=REMOTE_ADDR, status=UniqueLoginStatus.CONFIRMED
)
# Act: Log in
login_page = webtest.get("/account/login/", status=HTTPStatus.OK)
login_form = login_page.forms["login-form"]
csrf_token = login_form["csrf_token"].value
login_form["username"] = user.username
login_form["password"] = "password"
# Handle 2FA
two_factor_page = login_form.submit().follow(status=HTTPStatus.OK)
two_factor_form = two_factor_page.forms["totp-auth-form"]
two_factor_form["csrf_token"] = csrf_token
two_factor_form["totp_value"] = (
_get_totp(user.totp_secret).generate(time.time()).decode()
)
two_factor_form.submit().follow(status=HTTPStatus.OK)
# Navigate to project publishing settings page
publishing_page = webtest.get(
f"/manage/project/{project.name}/settings/publishing/",
status=HTTPStatus.OK,
)
# Get logged-in CSRF token
logged_in_csrf_token = publishing_page.html.find(
"input", {"name": "csrf_token"}
)["value"]
# Fill out the GitLab publisher form
gitlab_form = publishing_page.forms["gitlab-publisher-form"]
gitlab_form["csrf_token"] = logged_in_csrf_token
gitlab_form["namespace"] = "gitlab-namespace"
gitlab_form["project"] = "gitlab-repo"
gitlab_form["workflow_filepath"] = ".gitlab-ci.yml"
# Note: issuer_url defaults to https://gitlab.com when not specified
# Submit the form
response = gitlab_form.submit(status=HTTPStatus.SEE_OTHER)
response.follow(status=HTTPStatus.OK)
# Assert: Verify success
# Check flash messages via the JavaScript endpoint
flash_messages = webtest.get(
"/_includes/unauthed/flash-messages/", status=HTTPStatus.OK
)
success_message = flash_messages.html.find(
"span", {"class": "notification-bar__message"}
)
assert success_message is not None
assert "Added" in success_message.text
assert ".gitlab-ci.yml" in success_message.text
assert "gitlab-project" in success_message.text
| TestManageProjectPublishing |
python | pytorch__pytorch | torch/_functorch/partitioners.py | {
"start": 5129,
"end": 118120
} | class ____:
def __repr__(self):
return "Invalid Node"
InvalidNode = InvalidNodeBase()
def _extract_graph_with_inputs_outputs(
joint_graph: fx.Graph,
inputs: list[fx.Node],
outputs: list[fx.Node],
outputs_descs: list[AOTOutput],
subgraph: Optional[str] = None,
ignore_must_be_in_fw_bw: bool = False,
) -> fx.Graph:
"""
Given a graph, extracts out a subgraph that takes the specified nodes as
inputs and returns the specified outputs.
This includes specifying non-placeholder nodes as inputs.
The general strategy is to initialize all inputs with proxies as we
encounter them, and trace through the graph, only keeping values which take
in valid proxies. Then, all dead code is eliminated.
"""
new_graph = fx.Graph()
env = {}
# Add new placeholder nodes in the order specified by the inputs
for node in inputs:
new_node = new_graph.placeholder(node.name)
# Can't use node_copy here as we may be turning previous call_function into placeholders
new_node.meta = node.meta
# pyrefly: ignore [unsupported-operation]
env[node] = new_node
for node in joint_graph.nodes:
if not ignore_must_be_in_fw_bw:
if (
_must_be_in_backward(node)
and subgraph != "backward"
and node not in inputs
):
env[node] = InvalidNode # type: ignore[assignment]
continue
if (
_must_be_in_forward(node)
and subgraph != "forward"
and node not in inputs
):
env[node] = InvalidNode # type: ignore[assignment]
continue
if node in env:
# Node must be one of our inputs. (Any member of env which wasn't an
# input to start must have been created by this loop and won't be in
# joint_graph.nodes).
continue
elif node.op == "placeholder":
env[node] = InvalidNode # type: ignore[assignment]
elif node.op == "call_function":
all_args = pytree.arg_tree_leaves(*node.args, **node.kwargs)
all_args = [
isinstance(env[x], InvalidNodeBase)
for x in all_args
if isinstance(x, fx.Node)
]
if any(all_args):
env[node] = InvalidNode # type: ignore[assignment]
continue
# pyrefly: ignore [unsupported-operation, bad-argument-type]
env[node] = new_graph.node_copy(node, lambda x: env[x])
elif node.op == "get_attr":
# pyrefly: ignore [unsupported-operation, bad-argument-type]
env[node] = new_graph.node_copy(node, lambda x: env[x])
elif node.op == "output":
pass
output_values = []
for x in outputs:
if isinstance(x, fx.Node):
if x not in env:
raise RuntimeError(f"Node {x} couldn't be found in env")
assert not isinstance(env[x], InvalidNodeBase), (
f"Node {x} was invalid, but is output"
)
output_values.append(env[x])
else:
output_values.append(x)
out = new_graph.output(tuple(output_values))
out.meta["desc"] = outputs_descs
new_graph.eliminate_dead_code()
new_graph.lint()
return new_graph
def _is_primal(node: fx.Node) -> bool:
return (
node.op == "placeholder"
and "tangents" not in str(node.target)
and not _is_bwd_seed_offset(node)
and not _is_fwd_seed_offset(node)
)
def _is_tangent(node: fx.Node) -> bool:
return node.op == "placeholder" and "tangents" in str(node.target)
def _is_bwd_seed_offset(node: fx.Node) -> bool:
return node.op == "placeholder" and (
"bwd_seed" in str(node.target) or "bwd_base_offset" in str(node.target)
)
def _is_fwd_seed_offset(node: fx.Node) -> bool:
return node.op == "placeholder" and (
"fwd_seed" in str(node.target) or "fwd_base_offset" in str(node.target)
)
def _is_backward_state(node: fx.Node) -> bool:
return node.op == "placeholder" and isinstance(node.meta.get("val"), BackwardState)
def _has_tag_is_backward(node: fx.Node) -> bool:
return node.meta.get("partitioner_tag", None) == "is_backward"
def _has_tag_must_be_in_forward(node: fx.Node) -> bool:
return node.meta.get("partitioner_tag", None) == "must_be_in_forward"
def _has_tag_must_be_in_backward(node: fx.Node) -> bool:
return node.meta.get("partitioner_tag", None) == "must_be_in_backward"
def _must_be_in_forward(node: fx.Node) -> bool:
if _has_tag_must_be_in_forward(node):
return True
is_mutable = is_with_effects(node) or (
isinstance(node.target, torch._ops.OpOverload)
and node.target._schema.is_mutable
)
return (
not _has_tag_is_backward(node)
and not _has_tag_must_be_in_backward(node)
and is_mutable
)
def _must_be_in_backward(node: fx.Node) -> bool:
if _has_tag_must_be_in_backward(node):
return True
is_mutable = is_with_effects(node) or (
isinstance(node.target, torch._ops.OpOverload)
and node.target._schema.is_mutable
)
return _has_tag_is_backward(node) and is_mutable
def _extract_fwd_bwd_outputs(
joint_module: fx.GraphModule, *, num_fwd_outputs
) -> tuple[list[fx.Node], list[fx.Node], list[AOTOutput], list[AOTOutput]]:
outputs = pytree.arg_tree_leaves(
*(node.args for node in joint_module.graph.find_nodes(op="output"))
)
outputs_descs = pytree.arg_tree_leaves(
next(iter(joint_module.graph.find_nodes(op="output"))).meta.get(
"desc", [None] * len(outputs)
)
)
fwd_outputs = outputs[:num_fwd_outputs]
bwd_outputs = outputs[num_fwd_outputs:]
fwd_outputs_descs = outputs_descs[:num_fwd_outputs]
bwd_outputs_descs = outputs_descs[num_fwd_outputs:]
return fwd_outputs, bwd_outputs, fwd_outputs_descs, bwd_outputs_descs
def _remove_by_name(saved_values: list[fx.Node], name: str):
for saved_value in saved_values:
if saved_value.name == name:
saved_values.remove(saved_value)
break
def find_first_sym_node(
fwd_module_outputs: Union[list[fx.Node], tuple[fx.Node]],
) -> int:
idx = len(fwd_module_outputs)
for i in range(len(fwd_module_outputs) - 1, -1, -1):
if not is_sym_node(fwd_module_outputs[i]):
idx = i + 1
break
return idx
def calculate_quantization_scaling(
graph: torch.fx.Graph,
node: torch.fx.Node,
max: float = 57344.0,
min: float = 1e-12,
position: int = 0,
):
with graph.inserting_after(node):
abs_node = graph.call_function(
torch.ops.aten.abs.default,
args=(node,),
)
abs_node.meta["val"] = torch.ops.aten.abs.default(node.meta["val"])
abs_node.meta["tensor_meta"] = extract_tensor_metadata(abs_node.meta["val"])
with graph.inserting_after(abs_node):
amax_node = graph.call_function(
torch.ops.aten.amax.default,
args=(abs_node, [-1], True),
)
amax_node.meta["val"] = torch.ops.aten.amax.default(
abs_node.meta["val"], [-1], True
)
amax_node.meta["tensor_meta"] = extract_tensor_metadata(amax_node.meta["val"])
with graph.inserting_after(amax_node):
amax_64_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(amax_node, torch.float64),
)
amax_64_node.meta["val"] = torch.ops.prims.convert_element_type.default(
amax_node.meta["val"], torch.float64
)
amax_64_node.meta["tensor_meta"] = extract_tensor_metadata(
amax_64_node.meta["val"]
)
with graph.inserting_after(amax_64_node):
clamp_min_node = graph.call_function(
torch.ops.aten.clamp_min.default,
args=(amax_64_node, min),
)
clamp_min_node.meta["val"] = torch.ops.aten.clamp_min.default(
amax_64_node.meta["val"], min
)
clamp_min_node.meta["tensor_meta"] = extract_tensor_metadata(
clamp_min_node.meta["val"]
)
with graph.inserting_after(clamp_min_node):
reciprocal_node = graph.call_function(
torch.ops.aten.reciprocal.default,
args=(clamp_min_node,),
)
reciprocal_node.meta["val"] = torch.ops.aten.reciprocal.default(
clamp_min_node.meta["val"]
)
reciprocal_node.meta["tensor_meta"] = extract_tensor_metadata(
reciprocal_node.meta["val"]
)
with graph.inserting_after(reciprocal_node):
mul_node = graph.call_function(
torch.ops.aten.mul.Tensor,
args=(reciprocal_node, max),
)
mul_node.meta["val"] = torch.ops.aten.mul.Tensor(
reciprocal_node.meta["val"], max
)
mul_node.meta["tensor_meta"] = extract_tensor_metadata(mul_node.meta["val"])
with graph.inserting_after(mul_node):
scale_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(mul_node, torch.float32),
name=f"fp8_scale_pos_{position}_{node.name}",
)
scale_node.meta["val"] = torch.ops.prims.convert_element_type.default(
mul_node.meta["val"], torch.float32
)
scale_node.meta["tensor_meta"] = extract_tensor_metadata(scale_node.meta["val"])
return scale_node
def perform_quantization(
graph: torch.fx.Graph,
node: torch.fx.Node,
scale_node: torch.fx.Node,
quant_type: torch.dtype,
clamp_min: float,
clamp_max: float,
position: int,
) -> torch.fx.Node:
with graph.inserting_after(scale_node):
target_node_32 = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(node, torch.float32),
)
target_node_32.meta["val"] = torch.ops.prims.convert_element_type.default(
node.meta["val"], torch.float32
)
target_node_32.meta["tensor_meta"] = extract_tensor_metadata(
target_node_32.meta["val"]
)
with graph.inserting_after(target_node_32):
scaled_target_node = graph.call_function(
torch.ops.aten.mul.Tensor,
args=(target_node_32, scale_node),
)
scaled_target_node.meta["val"] = torch.ops.aten.mul.Tensor(
target_node_32.meta["val"], scale_node.meta["val"]
)
scaled_target_node.meta["tensor_meta"] = extract_tensor_metadata(
scaled_target_node.meta["val"]
)
with graph.inserting_after(scaled_target_node):
clamp_min_scaled_node = graph.call_function(
torch.ops.aten.clamp_min.default,
args=(scaled_target_node, clamp_min),
)
clamp_min_scaled_node.meta["val"] = torch.ops.aten.clamp_min.default(
scaled_target_node.meta["val"], clamp_min
)
clamp_min_scaled_node.meta["tensor_meta"] = extract_tensor_metadata(
clamp_min_scaled_node.meta["val"]
)
with graph.inserting_after(clamp_min_scaled_node):
clamp_max_scaled_node = graph.call_function(
torch.ops.aten.clamp_max.default,
args=(clamp_min_scaled_node, clamp_max),
)
clamp_max_scaled_node.meta["val"] = torch.ops.aten.clamp_max.default(
clamp_min_scaled_node.meta["val"], clamp_max
)
clamp_max_scaled_node.meta["tensor_meta"] = extract_tensor_metadata(
clamp_max_scaled_node.meta["val"]
)
with graph.inserting_after(clamp_max_scaled_node):
quant_activation_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(clamp_max_scaled_node, quant_type),
name=f"fp8_quant_pos_{position}_{node.name}",
)
quant_activation_node.meta["val"] = (
torch.ops.prims.convert_element_type.default(
clamp_max_scaled_node.meta["val"], quant_type
)
)
quant_activation_node.meta["tensor_meta"] = extract_tensor_metadata(
quant_activation_node.meta["val"]
)
return quant_activation_node
def calculate_tensor_size(tensor: torch.Tensor) -> float:
"""
Calculate the size of a PyTorch tensor in megabytes (MB).
Args:
tensor (torch.Tensor): Input tensor
Returns:
float: Memory size in MB
"""
# Get number of elements and size per element
num_elements = tensor.numel()
element_size = tensor.element_size()
return (num_elements * element_size) / (1024 * 1024)
def get_allowed_dtypes() -> list[torch.dtype]:
allowed_dtypes = torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("allowed_dtypes", "torch.bfloat16")
allowed_dtypes = [
getattr(torch, dtype.split(".")[-1]) for dtype in allowed_dtypes.split(";")
]
return allowed_dtypes
def should_quantize(node: torch.fx.Node) -> bool:
allowed_dtypes = get_allowed_dtypes()
if not is_node_meta_valid(node) or node.meta["val"].dtype not in allowed_dtypes:
return False
size_threshold = torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("size_in_mb", 100)
# calculate the size of the node
size_in_mb = calculate_tensor_size(node.meta["val"])
if not torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("skip_dynamo_guards", False):
return size_in_mb >= size_threshold
else:
# case 1: we always quantize tensors with dynamic shapes
if torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("quantize_dynamic_shape", False):
return statically_known_true(
size_in_mb >= size_threshold
) or not statically_known_false(size_in_mb >= size_threshold)
else:
# case 2: we always not quantize tensors with dynamic shapes
return statically_known_true(size_in_mb >= size_threshold)
def get_quant_type() -> torch.dtype:
quant_type = torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("quant_type", "torch.float8_e5m2")
return getattr(torch, quant_type.split(".")[-1])
def calculate_range(dtype: torch.dtype) -> tuple:
"""
Calculate the range of values for a given torch.dtype.
Args:
dtype (torch.dtype): The input dtype.
Returns:
tuple: A tuple containing the minimum and maximum values.
"""
info = torch.finfo(dtype)
return info.min, info.max
def quantize_activation_fw(graph: torch.fx.Graph) -> None:
output = graph.find_nodes(op="output")[0]
fwd_outputs = output.args[0]
quant_type = get_quant_type()
clamp_min, clamp_max = calculate_range(quant_type)
position_to_quant = dict()
tensor_scale_nodes, sym_scale_nodes = [], []
for position, node in enumerate(fwd_outputs):
# check if the activation node is the node saved for quantization
if node.meta.get("saved_for_quantization", False):
# case: use scaling
if torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("use_scaling", True):
# calculating the scale
scale_node = calculate_quantization_scaling(
graph, node, clamp_max, 1e-12, position
)
# converting to fp8
quant_node = perform_quantization(
graph, node, scale_node, quant_type, clamp_min, clamp_max, position
)
if not is_sym_node(scale_node):
tensor_scale_nodes.append(scale_node)
else:
sym_scale_nodes.append(scale_node)
else:
# case: do not use scaling
with graph.inserting_after(node):
quant_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(node, quant_type),
name=f"fp8_quant_pos_{position}_{node.name}",
)
quant_node.meta["val"] = (
torch.ops.prims.convert_element_type.default(
node.meta["val"], quant_type
)
)
quant_node.meta["tensor_meta"] = extract_tensor_metadata(
quant_node.meta["val"]
)
position_to_quant[position] = quant_node
# Use position-based lookup for building output
# only update the return node args, and remain all other users unchanged
output_updated_args = [
position_to_quant.get(i, node) for i, node in enumerate(fwd_outputs)
]
# add the scale nodes to the output find the first sym_node in the output
# pyrefly: ignore [bad-argument-type]
idx = find_first_sym_node(output_updated_args)
scale_nodes = tensor_scale_nodes + sym_scale_nodes
if scale_nodes:
output_updated_args = (
output_updated_args[:idx] + scale_nodes + output_updated_args[idx:]
)
output.update_arg(0, tuple(output_updated_args))
counters["inductor"]["activation_quantization_fwd_aten_pass"] += 1
def quantize_activation_bw(graph: torch.fx.Graph) -> None:
bw_inputs = [node for node in graph.nodes if node.op == "placeholder"]
activation_node = None
for node in bw_inputs:
if node.meta.get("saved_for_quantization", False):
node.meta.pop("saved_for_quantization")
dequant_type = node.meta.pop("dequant_type")
# dequantize the node
if torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("use_scaling", False):
# case: use scaling
with graph.inserting_after(node):
# find corresponding scale node
scale_name = "fp8_scale_" + node.name.replace("fp8_quant_", "")
scale_node = next(
bwd_input
for bwd_input in bw_inputs
if bwd_input.name == scale_name
)
with graph.inserting_after(scale_node):
activation_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(node, dequant_type),
)
activation_node.meta["val"] = (
torch.ops.prims.convert_element_type.default(
node.meta["val"], dequant_type
)
)
activation_node.meta["tensor_meta"] = extract_tensor_metadata(
activation_node.meta["val"]
)
with graph.inserting_after(activation_node):
divided_target_node_32 = graph.call_function(
torch.ops.aten.div.Tensor,
args=(activation_node, scale_node),
)
divided_target_node_32.meta["val"] = torch.ops.aten.div.Tensor(
activation_node.meta["val"], scale_node.meta["val"]
)
divided_target_node_32.meta["tensor_meta"] = (
extract_tensor_metadata(divided_target_node_32.meta["val"])
)
with graph.inserting_after(divided_target_node_32):
dequant_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(divided_target_node_32, dequant_type),
)
dequant_node.meta["val"] = (
torch.ops.prims.convert_element_type.default(
divided_target_node_32.meta["val"], dequant_type
)
)
dequant_node.meta["tensor_meta"] = extract_tensor_metadata(
dequant_node.meta["val"]
)
else:
with graph.inserting_after(node):
dequant_node = graph.call_function(
torch.ops.prims.convert_element_type.default,
args=(node, dequant_type),
name="dequant_" + str(node.name),
)
dequant_node.meta["val"] = (
torch.ops.prims.convert_element_type.default(
node.meta["val"], dequant_type
)
)
dequant_node.meta["tensor_meta"] = extract_tensor_metadata(
dequant_node.meta["val"]
)
# find the users of the node and replace them with the new node except the dequant_node
for user in list(node.users.keys()):
if user != dequant_node and user != activation_node:
user.replace_input_with(node, dequant_node)
counters["inductor"]["activation_quantization_bwd_aten_pass"] += 1
def perform_fp8_activation_quantization(
fwd_module: fx.GraphModule,
bwd_module: fx.GraphModule,
bwd_module_inputs: dict[str, fx.Node],
) -> None:
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "before_activation_quantization_fwd_aten_pass",
"encoding": "string",
},
payload_fn=lambda: fwd_module.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
quantize_activation_fw(fwd_module.graph)
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "after_activation_quantization_fwd_aten_pass",
"encoding": "string",
},
payload_fn=lambda: fwd_module.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "before_activation_quantization_bwd_aten_pass",
"encoding": "string",
},
payload_fn=lambda: bwd_module.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
quant_fwd_module_outputs = fwd_module.graph.find_nodes(op="output")[0].args[0]
# update the corresponding bwd_inputs due to the fwd_outputs quantization
for fwd_node in quant_fwd_module_outputs:
if "fp8_quant_" in fwd_node.name:
bwd_input = bwd_module_inputs[
re.sub(r"^fp8_quant_pos_\d+_", "", fwd_node.name)
]
with bwd_module.graph.inserting_after(bwd_input):
quant_bwd_input = bwd_module.graph.placeholder(name=fwd_node.name)
dequant_type = bwd_input.meta["dequant_type"]
quant_bwd_input.meta.update(fwd_node.meta)
quant_bwd_input.meta["saved_for_quantization"] = True
quant_bwd_input.meta["dequant_type"] = dequant_type
bwd_input.replace_all_uses_with(quant_bwd_input)
bwd_module.graph.erase_node(bwd_input)
# update the bwd_inputs if quantization with scaling is used
if torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("use_scaling", True):
quant_bwd_module_inputs = list(bwd_module.graph.find_nodes(op="placeholder"))
# update the corresponding bwd input nodes find the last non-tangent node
bwd_input_loc = quant_bwd_module_inputs[-1]
for bw_input in reversed(quant_bwd_module_inputs):
if not _is_tangent(bw_input):
bwd_input_loc = bw_input
break
scaled_fwd_module_outputs = fwd_module.graph.find_nodes(op="output")[0].args[0]
for fwd_node in scaled_fwd_module_outputs:
if "fp8_scale_" in fwd_node.name:
# fwd node is a scale node
with bwd_module.graph.inserting_after(bwd_input_loc):
scale_bwd_input = bwd_module.graph.placeholder(name=fwd_node.name)
scale_bwd_input.meta.update(fwd_node.meta)
bwd_input_loc = scale_bwd_input
quantize_activation_bw(bwd_module.graph)
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "after_activation_quantization_bwd_aten_pass",
"encoding": "string",
},
payload_fn=lambda: bwd_module.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
def enable_activation_quantization(
saved_values: list[fx.Node],
fwd_module: fx.GraphModule,
bwd_module: fx.GraphModule,
static_lifetime_input_nodes: Optional[OrderedSet[fx.Node]] = None,
) -> None:
if (
inductor_config.post_grad_fusion_options.get(
"activation_quantization_aten_pass", None
)
is None
):
return
static_input_names = (
[node.name for node in static_lifetime_input_nodes]
if static_lifetime_input_nodes
else []
)
saved_values_names = {node.name: node for node in saved_values}
if torch._inductor.config.post_grad_fusion_options[
"activation_quantization_aten_pass"
].get("exclude_primals", False):
saved_values_names = {
node.name: node for node in saved_values if "primals" not in node.name
}
fwd_module_outputs = fwd_module.graph.find_nodes(op="output")[0].args[0]
bwd_module_inputs = {
node.name: node for node in bwd_module.graph.find_nodes(op="placeholder")
}
should_perform_fp8_quant = False
for node in fwd_module_outputs:
if node.name in saved_values_names and should_quantize(node):
if node.name in static_input_names:
log.debug("Skipping quantization of static input %s: ", node.name)
continue
node.meta["saved_for_quantization"] = True
node.meta["dequant_type"] = node.meta["val"].dtype
# some of the fwd outputs and bwd inputs are not share the same object
bwd_module_inputs[node.name].meta["saved_for_quantization"] = True
bwd_module_inputs[node.name].meta["dequant_type"] = node.meta["val"].dtype
should_perform_fp8_quant = True
if should_perform_fp8_quant:
perform_fp8_activation_quantization(fwd_module, bwd_module, bwd_module_inputs)
def _extract_fwd_bwd_modules(
joint_module: fx.GraphModule,
saved_values: list[fx.Node],
saved_sym_nodes: list[fx.Node],
*,
num_fwd_outputs: int,
static_lifetime_input_nodes: Optional[OrderedSet[fx.Node]] = None,
) -> tuple[fx.GraphModule, fx.GraphModule]:
fwd_outputs, bwd_outputs, fwd_outputs_descs, bwd_outputs_descs = (
_extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs)
)
placeholders = joint_module.graph.find_nodes(op="placeholder")
primal_inputs = [*filter(_is_primal, placeholders)]
tangent_inputs = [*filter(_is_tangent, placeholders)]
fwd_seed_offset_inputs = [*filter(_is_fwd_seed_offset, placeholders)]
bwd_seed_offset_inputs = [*filter(_is_bwd_seed_offset, placeholders)]
backward_state_inputs = [*filter(_is_backward_state, placeholders)]
bwd_graph = _extract_graph_with_inputs_outputs(
joint_module.graph,
saved_sym_nodes + saved_values + tangent_inputs + bwd_seed_offset_inputs,
bwd_outputs,
bwd_outputs_descs,
"backward",
)
distributed_enabled = torch.distributed.is_available()
for node in bwd_graph.find_nodes(op="placeholder"):
# This is to filter out saved values that don't actually end up being used by the backwards pass
if not node.users:
_remove_by_name(saved_values, node.name)
_remove_by_name(saved_sym_nodes, node.name)
# wait_tensor is a bit special: if we have a "dead activation" that is not used in the bw,
# but this dead activation is actually a collective,
# then the collective will generally by followed by a wait_tensor() call.
# we need to peak one node further to see if this wait_tensor is dead as well.
elif distributed_enabled and all(
n.target is torch.ops._c10d_functional.wait_tensor.default
and len(n.users) == 0
for n in node.users
):
_remove_by_name(saved_values, node.name)
_remove_by_name(saved_sym_nodes, node.name)
elif _is_backward_state(node):
# BackwardState is saved directly
_remove_by_name(saved_values, node.name)
assert backward_state_inputs
# Now that we have the finalized list of saved values, we need to ensure
# we propagate all symbols which are referenced by backwards inputs.
# These are not directly used in the graph but are required for downstream
# sizevar assignment
saved_symbols: OrderedSet[sympy.Symbol] = OrderedSet()
saved_sym_nodes_binding = []
saved_sym_nodes_derived = []
# Some symbols may already be bound in the directly saved_sym_nodes,
# keep track of them so we don't re-bind them
for node in saved_sym_nodes:
symbol = is_symbol_binding_fx_node(node)
if symbol:
saved_symbols.add(symbol)
saved_sym_nodes_binding.append(node)
else:
saved_sym_nodes_derived.append(node)
# Now go through all of the prospective backward inputs and track any
# other symbols we need to bind
symbol_bindings = find_symbol_binding_fx_nodes(joint_module.graph)
for node in itertools.chain(saved_sym_nodes_derived, saved_values, tangent_inputs):
if "val" not in node.meta:
continue
new_symbols = free_symbols(node.meta["val"]) - saved_symbols
# NB: Deterministic order please!
for s in sorted(new_symbols, key=lambda s: s.name):
# NB: For well formed graphs, the symbol should always be present,
# but we also have ways to produce ill-formed graphs, e.g., direct
# make_fx usages, so don't choke in this case
if s not in symbol_bindings:
continue
saved_sym_nodes_binding.append(symbol_bindings[s])
saved_symbols |= new_symbols
# Update saved_sym_nodes that are now reordered to have all bindings at
# front. This can also be used later on to figure out the position of saved
# sym nodes in the output of fwd graph.
saved_sym_nodes.clear()
saved_sym_nodes.extend(saved_sym_nodes_binding + saved_sym_nodes_derived)
# Now, we re-generate the fwd/bwd graphs.
# NB: This might increase compilation time, but I doubt it matters
fwd_graph = _extract_graph_with_inputs_outputs(
joint_module.graph,
primal_inputs + fwd_seed_offset_inputs,
fwd_outputs + saved_values + saved_sym_nodes,
fwd_outputs_descs
+ [
SavedForBackwardsAOTOutput(i)
for i in range(len(saved_values) + len(saved_sym_nodes))
],
"forward",
)
bwd_graph = _extract_graph_with_inputs_outputs(
joint_module.graph,
saved_sym_nodes
+ saved_values
+ tangent_inputs
+ bwd_seed_offset_inputs
+ backward_state_inputs,
bwd_outputs,
bwd_outputs_descs,
"backward",
)
fwd_module = fx._lazy_graph_module._make_graph_module(joint_module, fwd_graph)
bwd_module = fx._lazy_graph_module._make_graph_module(joint_module, bwd_graph)
enable_activation_quantization(
saved_values, fwd_module, bwd_module, static_lifetime_input_nodes
)
return fwd_module, bwd_module
def default_partition(
joint_module: fx.GraphModule,
_joint_inputs,
*,
num_fwd_outputs,
static_lifetime_input_indices: Optional[list[int]] = None,
static_lifetime_input_nodes: Optional[OrderedSet[fx.Node]] = None,
) -> tuple[fx.GraphModule, fx.GraphModule]:
"""
Partitions the :attr:`joint_module` in a manner that closely resembles the
behavior observed in the original ``.forward()`` and ``.backward()`` of the
callable, i.e., the resulting forward graph contains those operators that
are executed in the original ``.forward()`` callable passed to
:func:`aot_function`.
The default partitioner collects the operators that are between the forward
inputs and the forward outputs. This helps in finding the tensors which have
to be stashed for the backward pass. These stashed tensors become the output
of the generated forward graph. The remaining operators are then placed in
the backward graph.
.. warning::
This API is experimental and likely to change.
Args:
joint_module(fx.GraphModule): The joint forward and backward graph. This
is the result of AOT Autograd tracing.
Returns:
Returns the generated forward and backward Fx graph modules.
"""
if has_recomputable_ops(joint_module):
return min_cut_rematerialization_partition(
joint_module,
_joint_inputs,
num_fwd_outputs=num_fwd_outputs,
static_lifetime_input_indices=static_lifetime_input_indices,
)
primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))
fwd_seed_offset_inputs = list(filter(_is_fwd_seed_offset, joint_module.graph.nodes))
inputs = primal_inputs + fwd_seed_offset_inputs
fwd_outputs, bwd_outputs, fwd_outputs_descs, bwd_outputs_descs = (
_extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs)
)
forward_only_graph = _extract_graph_with_inputs_outputs(
joint_module.graph, inputs, fwd_outputs, fwd_outputs_descs, "forward"
)
forward_node_names = OrderedSet(
node.name for node in forward_only_graph.nodes if node.op != "output"
)
order = {node: idx for idx, node in enumerate(joint_module.graph.nodes)}
saved_values = []
saved_sym_nodes = []
def is_mutated_later_in_fw(node):
if _has_tag_is_backward(node):
return False
tensor_arg_aliases = [
x
for x in node.args
if isinstance(x, fx.Node)
and "val" in x.meta
and isinstance(x.meta["val"], torch.Tensor)
]
while len(tensor_arg_aliases) > 0:
a = tensor_arg_aliases.pop()
for u in a.users:
if not isinstance(u.target, torch._ops.OpOverload):
continue
# If we witness a mutation on our node later, and that mutation is not "must be in backward",
# then our node needs to be computed in the forward (otherwise we will compute it on the mutated values)
if (
# one of the args was mutated
u.target._schema.is_mutable
# and the mutation happens "later"
and order[u] > order[node]
# and the mutation happened during the forward
and not (_has_tag_is_backward(u) or _has_tag_must_be_in_backward(u))
):
for idx, alias_info in enumerate(u.target._schema.arguments):
if alias_info.is_write and u.args[idx] is a:
return True
elif u.target.is_view:
tensor_arg_aliases.append(u)
return False
for node in joint_module.graph.nodes:
if node.name not in forward_node_names:
# if a node isn't "required" to be in the forward, but any of its arguments
# are later mutated in the forward, then it must have been run in the forward
# (if not, and the node's arg was saved for backward, we would have mutated a saved value)
# NB: doesn't handle nodes where the input is a list of tensors and one of those tensors is later mutated
if is_mutated_later_in_fw(node):
saved_values.append(node)
continue
if node.target is torch.ops.aten._assert_scalar.default:
continue
if is_sym_node(node):
# Symints must be kept separate from tensors so that PythonFunction only calls
# save_for_backward on tensors and stashes symints in autograd .ctx
saved_sym_nodes.append(node)
elif (
"tensor_meta" not in node.meta
and node.op == "call_function"
and not isinstance(node.meta.get("val"), torch._subclasses.FakeTensor)
):
# Since we can't save tuple of tensor values, we need to flatten out what we're saving
users = node.users
assert all(user.target is operator.getitem for user in users)
saved_values.extend(users)
else:
backward_usages = [
n for n in node.users if n.name not in forward_node_names
]
if "tensor_meta" in node.meta and all(
is_sym_node(n) for n in backward_usages
):
# If we have a tensor in the forward, where only its sizes/strides are needed in the backward,
# and not the actual tensor data,
# then it will be a lot cheaper to save only the sizes/strides, and not the actual tensor.
#
# Note that saving the tensor could also cause compilation problems:
# If the user mutated an input in the forward and uses its sizes/strides in the backward,
# then we would be obligated to clone the input before saving it to appease autograd.
# (This is how we originally found this bug).
saved_sym_nodes.extend(backward_usages)
else:
saved_values.append(node)
saved_values = list(dict.fromkeys(saved_values).keys())
saved_sym_nodes = list(dict.fromkeys(saved_sym_nodes).keys())
return _extract_fwd_bwd_modules(
joint_module,
saved_values,
saved_sym_nodes=saved_sym_nodes,
num_fwd_outputs=num_fwd_outputs,
static_lifetime_input_nodes=static_lifetime_input_nodes,
)
INT_INF = int(1e6)
def _tensor_nbytes(numel: int, dtype) -> int:
return numel * dtype.itemsize
def _size_of(node: fx.Node) -> int:
def object_nbytes(x) -> int:
if not isinstance(x, torch.Tensor):
return 0
return _tensor_nbytes(hint_int(x.numel(), fallback=4096), x.dtype)
if "val" in node.meta:
val = node.meta["val"]
if isinstance(val, py_sym_types):
return 1
# NB: The fallback values here are meaningless, maybe we should respect
# torch._inductor.config.unbacked_symint_fallback (but this is a
# layering violation)
elif isinstance(val, (list, tuple)):
return sum(object_nbytes(n) for n in val)
elif isinstance(val, dict):
return sum(object_nbytes(n) for _, n in val.items())
elif isinstance(val, torch.Tensor):
return object_nbytes(val)
raise RuntimeError(f"Unknown metadata type {type(val)} on node {node}")
if node.op == "get_attr" or node.target is torch.ops.aten._assert_scalar.default:
return 0
raise RuntimeError(
f"Node {node} didn't have `val` metadata; we should always have `val` metadata on the nodes."
)
# Used for some investigative purposes
def _count_ops(graph: fx.Graph):
from collections import defaultdict
cnt: dict[str, int] = defaultdict(int)
for node in graph.nodes:
if node.op == "call_function":
cnt[node.target.__name__] += 1
log.info("%s", sorted(cnt.items(), key=operator.itemgetter(1), reverse=True))
@functools.cache
def pointwise_ops():
ops = []
for attr_name in dir(torch.ops.aten):
opoverloadpacket = getattr(torch.ops.aten, attr_name)
if not isinstance(opoverloadpacket, torch._ops.OpOverloadPacket):
continue
for overload in opoverloadpacket.overloads():
op_overload = getattr(opoverloadpacket, overload)
if torch.Tag.pointwise in op_overload.tags:
# currently aot autograd uses packet not overload
ops.append(opoverloadpacket)
break
return ops
def sort_depths(args, depth_map: dict[fx.Node, int]) -> list[tuple[fx.Node, int]]:
arg_depths = {
arg: depth_map[arg] for arg in args if isinstance(arg, torch.fx.node.Node)
}
return sorted(arg_depths.items(), key=operator.itemgetter(1), reverse=True)
def reordering_to_mimic_autograd_engine(gm: fx.GraphModule) -> fx.GraphModule:
"""
This pass finds the first bwd node in the graph (by looking at users of
tangents) and then reorders the graph by walking from this node to all the
way to the end of the graph. At each op in this traversal, we insert this op
in a new graph and try to bring only the relevant subgraph from the other
non-bwd edges relevant for this op. This closely mimics the behavior of
autograd engine.
Why is this pass required in the first place?
This is an artifact of how partitioners work today. The starting point of
partitioner is a joint graph, which is fwd and then bwd graph. In the case
of checkpointing, we keep portions of fwd graph in their original place in
the joint graph, while obtaining a bwd graph. As a result, the resulting bwd
graph has copies of recomputed fwd subgraphs followed by the original bwd
graph. If we run this naively, this leads to bad memory footprint, because
the fwd subgraphs are live for way longer duration than necessary. This pass
reorders the operations such that we prioritize the ops for the original bwd
graph while only realizing those ops from the fwd graph that are necessary
at any given point in the graph.
"""
new_graph = fx.Graph()
env: dict[fx.Node, fx.Node] = {}
# Add new placeholder nodes in the order specified by the inputs
for node in gm.graph.find_nodes(op="placeholder"):
env[node] = new_graph.node_copy(node, lambda x: env[x])
order = {node: idx for idx, node in enumerate(gm.graph.nodes)}
def insert_node_in_graph(node):
cur_nodes = [node]
insertable_nodes: OrderedSet[fx.Node] = OrderedSet()
while len(cur_nodes) > 0:
node = cur_nodes.pop()
if node in insertable_nodes or node in env:
continue
insertable_nodes.add(node)
# Bias traversal towards the nodes that have higher depth - prioritizes
# critical path first.
cur_nodes += node.all_input_nodes
# pyrefly: ignore [bad-assignment]
insertable_nodes = sorted(insertable_nodes, key=lambda n: order[n])
for node in insertable_nodes:
env[node] = new_graph.node_copy(node, lambda x: env[x])
# Find first bwd node in the graph
tangent_inputs = list(filter(_is_tangent, gm.graph.nodes))
first_node_in_bwd = None
minimum_order = math.inf
for tangent in tangent_inputs:
for user in tangent.users:
if order[user] < minimum_order:
minimum_order = order[user]
first_node_in_bwd = user
# If gradInp does not depend upon gradOut, we may not find any nodes in the "backwards pass"
if first_node_in_bwd is None:
return gm
# Build the graph op-by-op by starting from the node all the way to the end
# copy_ can be not using tangents at all, we must copy it.
for node in list(gm.graph.nodes)[: order[first_node_in_bwd]]:
if node.op == "call_function" and node.target is torch.ops.aten.copy_.default:
insert_node_in_graph(node)
for node in list(gm.graph.nodes)[order[first_node_in_bwd] :]:
insert_node_in_graph(node)
# The output node is already built by the traversal.
new_gm = torch.fx.GraphModule(gm, new_graph)
return new_gm
def apply_graphsafe_rng_functionalization(
fw_module: torch.fx.GraphModule,
bw_module: torch.fx.GraphModule,
fw_node: torch.fx.Node,
bw_node: torch.fx.Node,
device: torch.device,
rng_count: int,
last_fwd_input: torch.fx.Node,
last_bwd_input: torch.fx.Node,
):
"""
Note [CUDA Graph Safe RNG Functionalization]
CUDA Graph capture doesn't work with get_rng_state and set_rng_state because these functions operate on CPU values,
while CUDA Graph RNG capture uses on-device CUDA tensors. To solve this, we use graphsafe_set_state with a
CUDA Generator registered to the CUDA Graph before capture begins. graphsafe_set_state updates the generator's pointer
to reference a different GeneratorImpl, ensuring subsequent calls are correctly forwarded to the desired generator
(and its cuda-tensor RNG state during graph capture).
For each RNG operation's forward/backward pair:
- We create two generators initialized with identical values
- Each forward and backward call advances its respective generator equally
- This keeps generators synchronized so forward and backward operations use matching RNG values
When forward is called multiple times before backward (causing desynchronization):
- We save the forward RNG state
- We update the backward Generator's state before executing backward
Before each CUDA Graph replay, replay_prologue updates captured RNG pointers with current states, ensuring backward Generator
changes are reflected during replay.
This function modifies both forward and backward computation graphs by:
Creating RNG state placeholders for both passes
Updating the forward node to use graph-safe RNG state
Updating the backward node to use graph-safe RNG state
For more details: https://github.com/pytorch/pytorch/issues/113541
"""
device_idx = device.index
assert device_idx is not None
fw_graph = fw_module.graph
bw_graph = bw_module.graph
graphsafe_run_with_rng_state = torch._prims.rng_prims.graphsafe_run_with_rng_state
# Handle forward pass
# Note: [Generator arguments in AOTDispatcher]
# Generator arguments in AOTDispatcher are added to support graphsafe rng
# functionalization. See note above [CUDA Graph Safe RNG Functionalization]
with fw_module.graph.inserting_after(last_fwd_input):
fwd_rng_state = fw_module.graph.placeholder(f"fwd_rng_state_{rng_count}")
fwd_rng_state.meta["val"] = get_cuda_generator_meta_val(device_idx)
last_fwd_input = fwd_rng_state
# Handle backward pass
with bw_module.graph.inserting_after(last_bwd_input):
bwd_rng_state = bw_module.graph.placeholder(f"bwd_rng_state_{rng_count}")
# as above, clone so that meta val generator will not contain tensors
bwd_rng_state.meta["val"] = get_cuda_generator_meta_val(device_idx)
last_bwd_input = bwd_rng_state
# Update forward node
fw_kwargs = dict(fw_node.kwargs)
fw_kwargs["rng_state"] = fwd_rng_state
with fw_module.graph.inserting_after(fw_node):
functional_fw_node = fw_graph.create_node(
"call_function",
graphsafe_run_with_rng_state,
args=(fw_node.target, *fw_node.args), # type: ignore[arg-type]
kwargs=fw_kwargs,
)
fw_node.replace_all_uses_with(functional_fw_node)
fw_graph.erase_node(fw_node)
# Update backward node
bwd_kwargs = dict(bw_node.kwargs)
bwd_kwargs["rng_state"] = bwd_rng_state
with bw_graph.inserting_before(bw_node):
rng_output = bw_graph.create_node(
"call_function",
graphsafe_run_with_rng_state,
args=(bw_node.target, *bw_node.args), # type: ignore[arg-type]
kwargs=bwd_kwargs,
)
bw_node.replace_all_uses_with(rng_output)
bw_graph.erase_node(bw_node)
return last_fwd_input, last_bwd_input
def functionalize_rng_ops(
joint_module: fx.GraphModule,
fw_module: fx.GraphModule,
bw_module: fx.GraphModule,
num_sym_nodes: int,
) -> tuple[fx.GraphModule, fx.GraphModule]:
# During user-driven activation checkpointing, we have to ensure that a rng
# op in fwd yields the same output as the recomputed rng op in the bwd. To
# do this, we use functionalize wrappers to wrap the random ops and share
# rng state between the fwd and bwd graphs.
# There are 3 main steps to do this
# Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd.
# Step 2 - Modify the fwd pass such that
# 1) Replace rand with run_and_save_rng_state wrapper
# 2) Replace the users of the original op with the output[1] of this op.
# 3) Collect all the rng_state - output[0] of each op, and make them
# output nodes. Special care needs to be taken here because fwd outputs
# has symints at the very end.
# Step 3 - Modify the bwd pass such that
# 1) Add the input nodes just before the tangents for the stashed rng states
# 2) Replace rand with run_with_save_rng_state wrappers
# 3) Use the stashed states as inputs to these ops
# Unique id to generate name
uid = itertools.count()
def get_rng_ops(gmod):
random_nodes = {}
for node in gmod.graph.nodes:
if (
node.op == "call_function"
and hasattr(node.target, "tags")
and torch.Tag.nondeterministic_seeded in node.target.tags
):
random_nodes[node.name] = node
return random_nodes
def get_device(node) -> Optional[torch.device]:
"""
Check the example value of the node outputs to find the device type.
"""
if "val" not in node.meta:
return None
candidates = node.meta["val"]
if not isinstance(candidates, tuple):
candidates = (candidates,)
for candidate in candidates:
if isinstance(candidate, torch.Tensor):
if candidate.device.type == "cuda":
return candidate.device
return torch.device("cpu")
def get_sample_rng_state(device: Optional[torch.device]):
from torch._guards import detect_fake_mode # noqa: F401
fake_mode = detect_fake_mode()
assert fake_mode is not None
with fake_mode:
if device is not None and device.type == "cuda":
return fake_mode.from_tensor(torch.cuda.get_rng_state())
return fake_mode.from_tensor(torch.get_rng_state())
# Step 1 - Construct a mapping of rng node between the fwd and its counterpart in bwd.
joint_graph_rng_ops = get_rng_ops(joint_module)
fw_graph_rng_ops = get_rng_ops(fw_module)
bw_graph_rng_ops = get_rng_ops(bw_module)
recomputable_rng_ops_map = {}
for node in joint_module.graph.nodes:
if (
must_recompute(node)
and hasattr(node.target, "tags")
and torch.Tag.nondeterministic_seeded in node.target.tags
):
base_node = joint_graph_rng_ops[node.name]
fw_node = fw_graph_rng_ops[node.name]
bw_node = bw_graph_rng_ops[node.name]
recomputable_rng_ops_map[base_node] = {"fwd": fw_node, "bwd": bw_node}
run_and_save_rng = torch._prims.rng_prims.run_and_save_rng_state
run_with_rng_state = torch._prims.rng_prims.run_with_rng_state
bw_tangent_start_node = None
for node in bw_module.graph.find_nodes(op="placeholder"):
if "tangent" in node.name:
bw_tangent_start_node = node
break
if bw_tangent_start_node is None:
raise RuntimeError(
"Couldn't find tangent node in graph inputs. This is unexpected, please file a bug if you see this"
)
fw_rng_state_outputs = []
last_fwd_input = next(reversed(fw_module.graph.find_nodes(op="placeholder")))
last_bwd_input = next(reversed(bw_module.graph.find_nodes(op="placeholder")))
devices = OrderedSet(
get_device(node_pair["fwd"]) for node_pair in recomputable_rng_ops_map.values()
)
# pyrefly: ignore [unbound-name]
devices.discard(torch.device("cpu"))
# multiple cuda devices won't work with cudagraphs anyway,
# fallback to non graphsafe rng checkpointing
multi_cuda_devices = len(devices) > 1
# this changes numerics, so if fallback_random is set we will not use it
# pyrefly: ignore [unbound-name]
ind_config = torch._inductor.config
use_rng_graphsafe_rng_functionalization = (
config.graphsafe_rng_functionalization
and not multi_cuda_devices
and (
not ind_config.fallback_random
or ind_config.test_configs.graphsafe_rng_func_ignores_fallback_random
)
)
for rng_count, node_pair in enumerate(recomputable_rng_ops_map.values()):
# Step 2 - Modify the fwd pass such that
fw_node = node_pair["fwd"]
bw_node = node_pair["bwd"]
device = get_device(fw_node)
fw_graph = fw_module.graph
bw_graph = bw_module.graph
if (
use_rng_graphsafe_rng_functionalization
and device is not None
and device.type == "cuda"
):
last_fwd_input, last_bwd_input = apply_graphsafe_rng_functionalization(
fw_module,
bw_module,
fw_node,
bw_node,
device,
rng_count,
last_fwd_input,
last_bwd_input,
)
else:
with fw_graph.inserting_before(fw_node):
functional_fw_node = fw_graph.create_node(
"call_function",
run_and_save_rng,
args=(fw_node.target, *fw_node.args),
kwargs=fw_node.kwargs,
)
state = fw_graph.create_node(
"call_function",
operator.getitem,
args=(functional_fw_node, 0),
kwargs={},
)
state.meta["val"] = get_sample_rng_state(device)
rng_output = fw_graph.create_node(
"call_function",
operator.getitem,
args=(
functional_fw_node,
1,
),
kwargs={},
)
# Copy the meta data from the original node
rng_output.meta = copy.copy(fw_node.meta)
fw_node.replace_all_uses_with(rng_output)
fw_graph.erase_node(fw_node)
fw_rng_state_outputs.append(state)
# Step 3 - Modify the bwd pass such that
with bw_graph.inserting_before(bw_tangent_start_node):
state_name = f"rng_state_output_{next(uid)}"
bw_rng_state_node = bw_graph.placeholder(state_name)
bw_rng_state_node.meta["val"] = get_sample_rng_state(device)
with bw_graph.inserting_before(bw_node):
rng_output = bw_graph.create_node(
"call_function",
run_with_rng_state,
args=(bw_rng_state_node, bw_node.target, *bw_node.args),
kwargs=bw_node.kwargs,
)
bw_node.replace_all_uses_with(rng_output)
bw_graph.erase_node(bw_node)
# Add the rng states in the output of the fwd graph. AOT Autograd assumes
# that symints are at the end of forward graph outputs. So, insert the new
# rng states accordingly.
if fw_rng_state_outputs:
fw_output_node = next(iter(fw_module.graph.find_nodes(op="output")))
fw_outputs = fw_output_node.args[0]
sym_node_start_idx = len(fw_outputs) - num_sym_nodes
outputs = (
fw_outputs[:sym_node_start_idx]
+ tuple(fw_rng_state_outputs)
+ fw_outputs[sym_node_start_idx:]
)
fw_module.graph.output(outputs)
fw_module.graph.erase_node(fw_output_node)
fw_module.recompile()
bw_module.recompile()
return fw_module, bw_module
def force_save_collectives(joint_module: fx.GraphModule) -> None:
"""
By default, the partitioner is not allowed to recompute collectives
unless they come from a user-annotated AC region.
See Note [Recomputing collectives in the partitioner]
"""
for node in joint_module.graph.nodes:
if (
isinstance(node.target, torch._ops.OpOverload)
and node.target.namespace == "_c10d_functional"
and not must_recompute(node)
):
node.meta["recompute"] = CheckpointPolicy.MUST_SAVE
def force_save_bw_mutation_src(joint_module: fx.GraphModule) -> None:
# If we have mutations of the same primal in forward and backward,
# We must not recompute the source of mutation to not apply twice.
has_mutation_in_bw: OrderedSet[torch.fx.Node] = OrderedSet()
for node in reversed(joint_module.graph.nodes):
if node.op == "output":
continue
is_copy_ = node.target is torch.ops.aten.copy_.default
if is_copy_:
if _has_tag_must_be_in_backward(node):
has_mutation_in_bw.add(node.args[0])
if _has_tag_must_be_in_forward(node) and node.args[0] in has_mutation_in_bw:
node.args[1].meta["recompute"] = CheckpointPolicy.MUST_SAVE
else:
# We use invariant of aotdispatch joint graph,
# That we emit copy_ only in the end of it.
# We do not want to iterate through all the joint graph,
# so break at the first non-output, non-copy_ node.
break
def cleanup_recompute_tags(joint_module: fx.GraphModule) -> fx.GraphModule:
"""
If there are two consecutive checkpointed blocks with no operator in
between, we would still want to stash the tensor at the boundary of
checkpointed blocks. The following pass makes the last output node
non-recomputable to allow for that.
"""
for node in joint_module.graph.nodes:
if must_recompute(node):
for user in node.users:
if (
must_recompute(user)
and "ac_graph_id" in user.meta
and "ac_graph_id" in node.meta
and user.meta["ac_graph_id"] > node.meta["ac_graph_id"]
):
node.meta["recompute"] = CheckpointPolicy.MUST_SAVE
if node.meta.get("has_backward_hook", False) and not any(
must_recompute(user) for user in node.users
):
# If node is AC region output and has a backward hook on it, we intentionally choose to save it.
# This is to work around circular dependencies in Traceable FSDP2+AC.
# Example:
# ```
# out = fully_shard(utils.checkpoint(module))(x)
# norm_out = layer_norm(out)
# ```
# Here there is a circular dependency:
# 1. In backward, grad_input of layer_norm aka. `out_grad` is actually dependent on `out`.
# 2. `out` depends on `out`'s backward hook created by FSDP2 (which does all-gather for `module` weights)
# in order to be recomputed.
# 3. `out`'s backward hook, as is the case for all eager backward hooks, depends on `out_grad`
# -> circular dependency with (1)!
#
# Solution: check whether `out` has a backward hook, and if so, intentionally save `out`
# in forward graph outputs. With this, we can break the above circular dependency.
node.meta["recompute"] = CheckpointPolicy.MUST_SAVE
return joint_module
def solve_min_cut(
joint_graph: fx.Graph,
node_info: NodeInfo,
min_cut_options: MinCutOptions,
dont_ban: Optional[OrderedSet[fx.Node]] = None,
):
if dont_ban is None:
dont_ban = OrderedSet()
op_types = get_default_op_list()
if AOT_PARTITIONER_DEBUG:
joint_module_ops = OrderedSet(
str(node.target._overloadpacket)
for node in joint_graph.nodes
if node.op == "call_function" and hasattr(node.target, "_overloadpacket")
)
ops_ignored = joint_module_ops - OrderedSet(
str(i) for i in op_types.recomputable_ops
)
log.info("Ops banned from re-materialization: %s", ops_ignored)
def can_fuse_into_auto_functionalized(a, b):
if b.target != torch.ops.higher_order.auto_functionalized:
return False
mutable_op = b.args[0]
(
mutable_arg_names,
_,
) = torch._higher_order_ops.auto_functionalize.get_mutable_args(mutable_op)
for name in mutable_arg_names:
arg = b.kwargs[name]
if a is arg:
return True
if isinstance(arg, list):
if a in arg:
return True
return False
def can_fuse_into_triton_kernel_wrapper_functional(a, b):
if b.target != torch.ops.higher_order.triton_kernel_wrapper_functional:
return False
mutable_arg_names = b.kwargs["tensors_to_clone"]
for name in mutable_arg_names:
arg = b.kwargs["kwargs"][name]
if a is arg:
return True
return False
def is_fusible(a, b):
# We can perform "memory fusion" into a cat, but cat cannot be a
# producer to a fusion
if get_aten_target(b) == aten.cat:
return True
if can_fuse_into_auto_functionalized(a, b):
return True
if can_fuse_into_triton_kernel_wrapper_functional(a, b):
return True
if (
a.target is operator.getitem
and a.args[0].target
is torch.ops.higher_order.triton_kernel_wrapper_functional
):
# if a is the output of a user triton kernel,
# then (by default) we will not be able to fuse b into it
return False
return op_types.is_fusible(a) and op_types.is_fusible(b)
try:
import networkx as nx
except ImportError as e:
raise RuntimeError(
"Need networkx installed to perform smart recomputation heuristics"
) from e
def is_materialized_backwards(node):
if op_types.is_view(node):
return False
cur_nodes = OrderedSet([node])
while len(cur_nodes) > 0:
cur = cur_nodes.pop()
for user in cur.users:
if not node_info.is_required_fw(user) and not is_fusible(cur, user):
return True
if op_types.is_view(user):
cur_nodes.add(user)
return False
def should_ban_recomputation(node):
if node.op != "call_function":
return False
if node.target is operator.getitem:
return False
if node.meta.get("recompute", None) == CheckpointPolicy.MUST_SAVE:
return True
if config.recompute_views and op_types.is_view(node):
return False
if node.target in [aten.lift_fresh_copy.default, aten.lift_fresh.default]:
return False
if min_cut_options.ban_if_not_in_allowlist:
if not op_types.is_recomputable(node):
return True
else:
if op_types.is_random(node) or op_types.is_compute_intensive(node):
return True
# If a node *must* be materialized in the backwards pass, then we
# should never recompute it. This is a pretty subtle point. In
# general, the assumption we make is that recomputing a node in the
# backwards pass is "free". However, if a node must be materialized
# in the backwards pass, then recomputing it is never free.
if min_cut_options.ban_if_materialized_backward and is_materialized_backwards(
node
):
log.debug("materialized backwards: %s %s", node, tuple(node.users))
return True
# Arbitrary hack that sometimes seems to help things. The above
# modification appears to have made this heuristic a lot less critical
# for performance.
# NB: As of PR #121692, this hack no longer seems necessary.
if node.dist_from_bw < 1000 and node.dist_from_bw > config.max_dist_from_bw:
return True
# If the output of an op is 4x smaller (arbitrary choice),
# then we don't allow recomputation. The idea here is that for
# things like reductions, saving the output of the reduction is very
# cheap/small, and it makes sure we don't do things like recompute
# normalizations in the backwards.
if min_cut_options.ban_if_reduction:
input_tensors_size = sum(
_size_of(i) for i in node.args if isinstance(i, fx.Node)
)
output_size = _size_of(node)
return output_size * 4 < input_tensors_size
return False
def is_materialized(node):
if node.op == "placeholder":
return True
return not all(is_fusible(node, user) for user in node.users)
def get_node_weight(node, static_lifetime_input_nodes) -> float:
if (
config.treat_parameters_as_free_to_save
and node in static_lifetime_input_nodes
):
return 0
mem_sz = _size_of(node)
if config.recompute_views and op_types.is_view(node):
# If `config.recompute_views=True`, we don't save views. This is generally
# a good idea since views are free to recompute, and it makes it a bit simpler
# to analyze.
# NB: If they're not free to recompute (e.g. nested tensors)... I
# think we should modify checks for view_ops to `is_view` and check
# that. Basically, with nested tensors, `aten.view` is not a "view
# op".
return math.inf
if isinstance(node.meta["val"], py_sym_types):
# We never want to save symfloats
if not isinstance(node.meta["val"], torch.SymInt):
return INT_INF
# Heuristic to bias towards nodes closer to the backwards pass
# Complete guess about current value
mem_sz = int(mem_sz * (1.1 ** max(min(node.dist_from_bw, 100), 1)))
if is_materialized(node):
return mem_sz
else:
return mem_sz * 2
nx_graph = nx.DiGraph()
banned_nodes: OrderedSet[fx.Node] = OrderedSet()
def ban_recomputation_if_allowed(node):
if op_types.is_view(node):
return False
if node in dont_ban:
# collectives are *always* banned from recompute, overriding `dont_ban`
# (in particular, the activation memory budget logic is not allowed to recompute collectives)
is_collective = (
isinstance(node.target, torch._ops.OpOverload)
and node.target.namespace == "_c10d_functional"
)
if config.unsafe_allow_optimization_of_collectives or not is_collective:
return False
# This bans recomputation of the node unless we've been forced not to by
# user annotation
if must_recompute(node):
return False
if "val" in node.meta and isinstance(node.meta["val"], torch.SymFloat):
return False
banned_nodes.add(node)
# A node will only ever be recomputed if there is a path from an
# ancestor of this node to the backwards path through this node that
# doesn't go through any saved value. If this node is saved, then that
# condition is not possible.
nx_graph.add_edge("source", node.name + "_in", capacity=math.inf)
return True
for node in joint_graph.nodes:
if node.op == "output":
continue
if node in node_info.required_bw_nodes:
if node not in node_info.inputs:
nx_graph.add_edge(node.name + "_in", "sink", capacity=math.inf)
continue
# If someone saves a input for backward as-is and backward
# returns that tensor as-is as a grad input, then the node x would
# be both a required_bw_node and an input. In this case we
# (1) connect x_in to the source, (2) x_out to the sink, and
# (3) assign the proper weight to the x_in-x_out edge, so that
# x would be part of cut nodes. A case where this happens is if
# NestedTensor saves a offset tensor as part of the singleton int
# in sizes.
nx_graph.add_edge(node.name + "_out", "sink", capacity=math.inf)
if must_recompute(node):
# If user explicitly says they want to recompute a node, we honor it
# by adding an inf-capacity edge from X_in to the sink.
# This way, X_in node is guaranteed to be part of the subgraph that contains "sink"
# after the cut, thus guaranteeing that X op will be recomputed.
nx_graph.add_edge(node.name + "_in", "sink", capacity=math.inf)
continue
if _is_primal(node) or _is_fwd_seed_offset(node):
ban_recomputation_if_allowed(node)
# If a node can't be recomputed (too expensive or involves randomness),
# we prevent it from being recomputed by adding an inf edge to the source
# We only need to ban nodes in the fw pass, as those are the only ones that would be recomputed.
if node_info.is_required_fw(node) and should_ban_recomputation(node):
ban_recomputation_if_allowed(node)
# Checks if a node is actually a tuple. Can be simplified to just an isinstance check if we always use faketensors.
is_non_tensor_node = (
"val" not in node.meta and "tensor_meta" not in node.meta
) or ("val" in node.meta and not isinstance(node.meta["val"], torch.Tensor))
if is_sym_node(node):
weight = float(sym_node_size(node))
elif is_non_tensor_node:
weight = (
0.0 if isinstance(node.meta.get("val"), BackwardState) else math.inf
)
else:
weight = get_node_weight(node, node_info.static_lifetime_input_nodes)
# Creates the weights on the "node" edge
nx_graph.add_edge(node.name + "_in", node.name + "_out", capacity=weight)
for user in node.users:
nx_graph.add_edge(node.name + "_out", user.name + "_in", capacity=math.inf)
# todo(chilli): This is the most questionable of the 3 heuristics for banning recompute.
# Some example models to look at where this helps perf: poolformer_m36,
# mixer_b16_224, cait_m36_384
# The "rough" idea here is that if you have some node that is used by both a
# node nearby downstream as well as a node far downstream, if we recompute
# both of the downstream nodes, we're unlikely to be able to fuse both
# downstream nodes together.
# Thus, we shouldn't aim to recompute far downstream nodes that depend on
# this node. That intuition of "far downstream" is captured by whether
# there's an unfusible op along the chain somewhere
# It could probably be improved by properly analyzing what's going on in the
# backwards pass instead of only relying on whether it's unfusible in the
# forwards.
def find_first_unfusible(start_nodes: list[fx.Node], max_range: int) -> int:
"""
Finds the first unfusible node in the chain of nodes starting from
`start_nodes` and returns its position.
"""
sorted_nodes: list[tuple[int, fx.Node, bool]] = []
for n in start_nodes:
heapq.heappush(sorted_nodes, (node_info.get_fw_order(n), n, True))
while len(sorted_nodes) > 0:
_, node, node_is_fusible = heapq.heappop(sorted_nodes)
if not node_is_fusible:
return node_info.get_fw_order(node)
for user in node.users:
if node_info.is_required_fw(user):
if node_info.get_fw_order(user) > max_range:
continue
val: tuple[int, fx.Node, bool] = (
node_info.get_fw_order(user),
user,
is_fusible(node, user),
)
if val not in sorted_nodes:
heapq.heappush(sorted_nodes, val)
return max_range
if min_cut_options.ban_if_used_far_apart:
for used_node in node_info.required_fw_nodes:
orders = [
node_info.get_fw_order(user)
for user in used_node.users
if node_info.is_required_fw(user)
]
fw_users = [
user for user in used_node.users if node_info.is_required_fw(user)
]
if len(orders) > 0:
first_unfusible_use = find_first_unfusible(fw_users, max(orders))
for user in tuple(used_node.users):
if (
node_info.is_required_fw(user)
and node_info.get_fw_order(user) > first_unfusible_use
and is_fusible(used_node, user)
):
if user in banned_nodes:
continue
log.info(
"used above/below fusible %s:(%s) -> %s -> %s:(%s)",
used_node,
node_info.get_fw_order(used_node),
first_unfusible_use,
user,
node_info.get_fw_order(user),
)
ban_recomputation_if_allowed(user)
# This heuristic is fairly straightforward. The idea is that although it is
# cheap to recompute bandwidth-bound ops, we don't want to end up in a situation
# where we have a long chain of pointwise ops from the beginning to the end
# of the model (like say, residual connections)
# todo: I'm not totally sure why this heuristic matters. It's possible that this is
# working around Inductor fusion decisions, or that it's a patch over
# suboptimal partitioning decisions
# Some models it improves perf on are cait_m36_384, mixer_b16_224, poolformer_m36
if min_cut_options.ban_if_long_fusible_chains:
visited: OrderedSet[fx.Node] = OrderedSet()
for start_node in joint_graph.nodes:
if not node_info.is_required_fw(start_node):
continue
fusible: list[tuple[int, fx.Node]] = [
(node_info.get_fw_order(start_node), start_node)
]
start_order = node_info.get_fw_order(start_node)
while len(fusible) > 0:
_, cur = heapq.heappop(fusible)
if cur in visited:
continue
visited.add(cur)
# 100 is arbitrary choice to try and prevent degenerate cases
if (
node_info.get_fw_order(cur) > start_order + 100
and len(fusible) == 0
):
log.info(
"too long %s %s %s %s",
cur,
start_node,
node_info.get_fw_order(cur),
node_info.get_fw_order(start_node),
)
ban_recomputation_if_allowed(cur)
break
for user in cur.users:
if (
node_info.is_required_fw(user)
and is_fusible(cur, user)
and user not in banned_nodes
):
heapq.heappush(fusible, (node_info.get_fw_order(user), user))
try:
cut_value, partition = nx.minimum_cut(nx_graph, "source", "sink")
except Exception:
log.info("Failed to compute min-cut on following graph:")
log.info("\n".join(nx.readwrite.edgelist.generate_edgelist(nx_graph)))
visualize_min_cut_graph(nx_graph)
raise
reachable, non_reachable = partition
cutset: OrderedSet[tuple[str, str]] = OrderedSet()
for u, nbrs in ((n, nx_graph[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
cut_nodes: OrderedSet[str] = OrderedSet()
for node_in, node_out in cutset:
assert node_in[:-3] == node_out[:-4]
node_name = node_in[:-3]
cut_nodes.add(node_name)
name_to_node = get_name_to_node(joint_graph)
# To make this stuff deterministic
node_idx = {node: idx for idx, node in enumerate(joint_graph.nodes)}
saved_values = sorted(
(name_to_node[node] for node in cut_nodes), key=lambda x: node_idx[x]
)
return saved_values, banned_nodes
def visualize_min_cut_graph(nx_graph):
import networkx as nx
import pydot
dot_format = nx.nx_pydot.to_pydot(nx_graph).to_string()
dot_graph = pydot.graph_from_dot_data(dot_format)[0] # type: ignore[index]
for edge in dot_graph.get_edges():
weight = nx_graph[edge.get_source()][edge.get_destination()]["capacity"]
# Set edge label to weight
edge.set_label(str(weight)) # type: ignore[union-attr]
# Color edges with weight 'inf' as red
if weight == float("inf"):
edge.set_color("red") # type: ignore[union-attr]
log.info("Visualizing the failed graph to min_cut_failed.svg")
dot_graph.write_svg("min_cut_failed.svg") # type: ignore[union-attr]
def get_default_op_list() -> OpTypes:
default_recomputable_ops: list[Callable] = [
aten.add,
aten.sub,
aten.div,
aten.atan2,
aten.mul,
aten.max,
aten.min,
aten.pow,
aten.remainder,
aten.fmod,
aten.__and__,
aten.__or__,
aten.__xor__,
aten.__lshift__,
aten.__rshift__,
aten.eq,
aten.ne,
aten.ge,
aten.gt,
aten.le,
aten.lt,
aten.abs,
aten.bitwise_not,
aten.ceil,
aten.floor,
aten.frac,
aten.neg,
aten.relu,
aten.round,
aten.silu,
aten.trunc,
aten.log,
aten.log10,
aten.log1p,
aten.log2,
aten.lgamma,
aten.exp,
aten.expm1,
aten.erf,
aten.erfc,
aten.cos,
aten.acos,
aten.cosh,
aten.sin,
aten.asin,
aten.sinh,
aten.tan,
aten.atan,
aten.tanh,
aten.atanh,
aten.sqrt,
aten.rsqrt,
aten.reciprocal,
aten.sigmoid,
aten.softplus,
aten.threshold,
aten.threshold_backward,
aten.clamp,
aten.where,
aten.lerp,
aten.addcmul,
aten.gelu,
aten.gelu_backward,
aten.sum,
aten.mean,
aten._grad_sum_to_size,
aten.sum_to_size,
aten.amax,
aten.to,
aten.type_as,
operator.getitem,
aten.squeeze,
aten.unsqueeze,
aten.rsub,
aten._to_copy,
] # noqa: E501,B950
recomputable_view_ops = [aten.squeeze, aten.unsqueeze, aten.alias]
recomputable_view_ops += [
aten.view,
aten.slice,
aten.t,
prims.broadcast_in_dim,
aten.expand,
aten.as_strided,
aten.permute,
aten.select,
aten.split,
]
view_ops = recomputable_view_ops
default_recomputable_ops += [
prims.div,
prims.convert_element_type,
aten.clone,
aten._to_copy,
aten.full_like,
prims.var,
prims.sum,
aten.var,
aten.std,
prims.broadcast_in_dim,
aten.select,
aten._unsafe_view,
aten.view,
aten.expand,
aten.slice,
aten.reshape,
aten.broadcast_tensors,
aten.scalar_tensor,
aten.ones,
aten.new_zeros,
aten.lift_fresh_copy,
aten.arange,
aten.triu,
aten.var_mean,
aten.isinf,
aten.any,
aten.full,
aten.as_strided,
aten.zeros,
aten.empty,
aten.empty_like,
aten.argmax,
aten.maximum,
prims.iota,
prims._low_memory_max_pool_offsets_to_indices,
] # noqa: E501,B950
# Natalia said that we should allow recomputing indexing :)
default_recomputable_ops += [aten.index, aten.gather]
default_recomputable_ops += view_ops
default_recomputable_ops += pointwise_ops()
default_recomputable_ops += [
aten.zeros_like,
]
default_recomputable_ops += [method_to_operator(m) for m in magic_methods]
recomputable_ops = OrderedSet(default_recomputable_ops)
random_ops = OrderedSet[Callable[..., Any]](
[aten.native_dropout, aten.rand_like, aten.randn_like]
)
compute_intensive_ops = [
aten.mm,
aten.convolution,
aten.convolution_backward,
aten.bmm,
aten.addmm,
aten._scaled_dot_product_flash_attention,
aten._scaled_dot_product_efficient_attention,
aten._flash_attention_forward,
aten._efficient_attention_forward,
aten.upsample_bilinear2d,
aten._scaled_mm,
] # noqa: E501,B950
fusible_ops = recomputable_ops | random_ops
return OpTypes(
fusible_ops,
OrderedSet(compute_intensive_ops),
random_ops,
OrderedSet(view_ops),
recomputable_ops,
)
def get_name_to_node(graph: fx.Graph):
name_to_node = {}
for node in graph.nodes:
name_to_node[node.name] = node
return name_to_node
def _optimize_runtime_with_given_memory(
joint_graph: fx.Graph,
memory: list[float],
runtimes: list[float],
max_memory: float,
node_info: NodeInfo,
all_recomputable_banned_nodes: list[fx.Node],
) -> tuple[float, list[int], list[int]]:
SOLVER = config.activation_memory_budget_solver
if SOLVER == "greedy":
return greedy_knapsack(memory, runtimes, max_memory)
elif SOLVER == "ilp":
return ilp_knapsack(memory, runtimes, max_memory)
elif SOLVER == "dp":
return dp_knapsack(memory, runtimes, max_memory)
elif SOLVER == "dp_knapsack_sliding_hirschberg":
return dp_knapsack_sliding_hirschberg(memory, runtimes, max_memory)
elif SOLVER == "dynamic_memory_budget_dp":
log.warning(
"dynamic_memory_budget_dp is an experimental solver. "
"It does not guarantee performance improvements. "
"Additionally, it is not guaranteed to be stable."
)
graph_info_provider = GraphInfoProvider.inialize_from_graph(
joint_graph=joint_graph,
all_recomputable_banned_nodes=all_recomputable_banned_nodes,
recorded_knapsack_input_memories=memory,
recorded_knapsack_input_runtimes=runtimes,
)
return dp_knapsack(
memory,
runtimes,
KnapsackEvaluator(
graph_info_provider=graph_info_provider,
).get_knee_point_memory_budget(
knapsack_algo=dp_knapsack,
max_mem_budget=max_memory,
),
)
elif callable(SOLVER):
saved_node_idx, recomp_node_idx = SOLVER(
memory, joint_graph, max_memory, node_info, all_recomputable_banned_nodes
)
return (0.0, saved_node_idx, recomp_node_idx)
else:
raise RuntimeError(f"Not aware of memory budget knapsack solver: {SOLVER}")
from torch.utils._mode_utils import no_dispatch
# replace symbols in size and strides with their hints without guarding.
def _remove_symbols_without_guarding(x: torch.Tensor, fallback: int) -> torch.Tensor:
shape = list(x.shape)
def realize_symbol(d):
return hint_int(d, fallback=fallback)
shape = [realize_symbol(s) for s in shape]
stride = [realize_symbol(s) for s in x.stride()]
return x.new_empty_strided(shape, stride=stride)
def estimate_runtime(node):
RUNTIME_MODE = config.activation_memory_budget_runtime_estimator
def materialize_arg(x):
if isinstance(x, fx.Node) and isinstance(x.meta["val"], torch.Tensor):
return _remove_symbols_without_guarding(x.meta["val"], fallback=4096)
elif isinstance(x, fx.Node) and isinstance(x.meta["val"], torch.SymInt):
return hint_int(x.meta["val"], fallback=4096)
elif isinstance(x, fx.Node) and isinstance(x.meta["val"], torch.SymFloat):
return 1.0
elif isinstance(x, fx.Node) and isinstance(x.meta["val"], torch.SymBool):
return True
else:
return x
if RUNTIME_MODE == "testing":
return 1
elif RUNTIME_MODE == "profile":
with no_dispatch():
from torch._inductor.runtime.benchmarking import benchmarker
args, kwargs = pytree.tree_map(materialize_arg, (node.args, node.kwargs))
ms = benchmarker.benchmark_gpu(lambda: node.target(*args, **kwargs))
return ms
elif RUNTIME_MODE == "flops":
# todo(chilli): Normalize this to also return ms
from torch.utils.flop_counter import FlopCounterMode
args, kwargs = pytree.tree_map(materialize_arg, (node.args, node.kwargs))
with FlopCounterMode(display=False) as mode:
node.target(*args, **kwargs)
counted_flops = mode.get_total_flops()
return max(counted_flops, 1)
else:
raise RuntimeError(f"Not aware of runtime estimator: {RUNTIME_MODE}")
def choose_saved_values_set(
joint_graph: fx.Graph,
node_info: NodeInfo,
memory_budget=1,
) -> list[fx.Node]:
if memory_budget > 1 or memory_budget < 0:
raise RuntimeError(
f"The valid ranges for memory budget are 0 <= m <= 1. The provided value is {memory_budget}"
)
min_cut_options = MinCutOptions(
ban_if_used_far_apart=config.ban_recompute_used_far_apart,
ban_if_long_fusible_chains=config.ban_recompute_long_fusible_chains,
ban_if_materialized_backward=config.ban_recompute_materialized_backward,
ban_if_not_in_allowlist=config.ban_recompute_not_in_allowlist,
ban_if_reduction=config.ban_recompute_reductions,
)
if config.aggressive_recomputation:
min_cut_options = replace(
min_cut_options,
ban_if_used_far_apart=False,
ban_if_long_fusible_chains=False,
ban_if_materialized_backward=False,
ban_if_not_in_allowlist=False,
)
if memory_budget == 0:
return node_info.inputs
runtime_optimized_saved_values, _ = solve_min_cut(
joint_graph,
node_info,
min_cut_options,
)
# return runtime_optimized_saved_values
if memory_budget == 1:
return runtime_optimized_saved_values
def estimate_activations_size(saved_values: list[fx.Node]) -> float:
return sum(map(_size_of, saved_values)) / 1e9
min_act_size = estimate_activations_size(node_info.inputs)
max_act_size = estimate_activations_size(runtime_optimized_saved_values)
# The optimized choice is smaller than the inputs anyways
if max_act_size <= min_act_size:
return runtime_optimized_saved_values
def get_normalized_size(sz):
return (sz / 1e9) / (max_act_size - min_act_size)
def get_mem_ratio(activations: list[fx.Node]):
return (estimate_activations_size(activations) - min_act_size) / (
max_act_size - min_act_size
)
more_aggressive_options = replace(
min_cut_options,
ban_if_used_far_apart=False,
ban_if_long_fusible_chains=False,
ban_if_materialized_backward=False,
)
more_aggressive_saved_values, _ = solve_min_cut(
joint_graph, node_info, more_aggressive_options
)
if get_mem_ratio(more_aggressive_saved_values) < memory_budget:
return more_aggressive_saved_values
aggressive_options = replace(
more_aggressive_options,
ban_if_not_in_allowlist=False,
)
aggressive_recomputation_saved_values, banned_nodes = solve_min_cut(
joint_graph, node_info, aggressive_options
)
if get_mem_ratio(aggressive_recomputation_saved_values) < memory_budget:
return aggressive_recomputation_saved_values
from torch._inductor.fx_utils import get_node_storage
input_storages = OrderedSet(get_node_storage(node) for node in node_info.inputs)
def get_recomputable_banned_nodes(
banned_nodes: OrderedSet[fx.Node],
) -> list[fx.Node]:
return [
i
for i in banned_nodes
if (
# Only allow recomputing nodes that are actually required for BW
i.dist_from_bw < int(1e9) # type: ignore[attr-defined]
and get_node_storage(i) not in input_storages
)
]
recomputable_banned_nodes = get_recomputable_banned_nodes(banned_nodes)
must_save_nodes = [
i
for i in recomputable_banned_nodes
if i.meta.get("recompute", False) == CheckpointPolicy.MUST_SAVE
]
recomputable_banned_nodes = [
i for i in recomputable_banned_nodes if i not in must_save_nodes
]
# default: runtime_optimized_saved_values
# more aggressive: more_aggressive_saved_values
# full aggressive: aggressive_recomputation_saved_values
all_recomputable_banned_nodes = sorted(
recomputable_banned_nodes, key=_size_of, reverse=True
)
if len(all_recomputable_banned_nodes) == 0:
return node_info.inputs + must_save_nodes
memories_banned_nodes = [
get_normalized_size(_size_of(i)) for i in all_recomputable_banned_nodes
]
runtimes_banned_nodes = [
estimate_runtime(node) for node in all_recomputable_banned_nodes
]
from torch.utils._mode_utils import no_dispatch
def get_saved_values_knapsack(memory_budget, node_info, joint_graph):
with no_dispatch():
(
expected_runtime,
saved_node_idxs,
recomputable_node_idxs,
) = _optimize_runtime_with_given_memory(
joint_graph,
memories_banned_nodes,
runtimes_banned_nodes,
max(memory_budget, 0),
node_info,
all_recomputable_banned_nodes,
)
dont_ban: OrderedSet[fx.Node] = OrderedSet()
for idx in recomputable_node_idxs:
# if idx in all_recomputable_banned_nodes:
try:
dont_ban.add(all_recomputable_banned_nodes[idx])
except BaseException: # noqa: B036
pass
assert dont_ban.issubset(all_recomputable_banned_nodes)
saved_values, _ = solve_min_cut(
joint_graph,
node_info,
aggressive_options,
dont_ban,
)
if AOT_PARTITIONER_DEBUG:
create_structured_trace_for_min_cut_info(
joint_graph=joint_graph,
all_recomputable_banned_nodes=all_recomputable_banned_nodes,
saved_node_idxs=saved_node_idxs,
recomputable_node_idxs=recomputable_node_idxs,
expected_runtime=expected_runtime,
memories_banned_nodes=[
_size_of(i) for i in all_recomputable_banned_nodes
],
normalized_memories_banned_nodes=memories_banned_nodes,
runtimes_banned_nodes=runtimes_banned_nodes,
min_cut_saved_values=saved_values,
)
return saved_values, expected_runtime
if config.visualize_memory_budget_pareto:
def estimate_for_budget(b):
saved_values, expected_runtime = get_saved_values_knapsack(
b, node_info=node_info, joint_graph=joint_graph
)
return (
b,
sum(runtimes_banned_nodes) - expected_runtime,
get_mem_ratio(saved_values),
)
options = [estimate_for_budget(0.0), estimate_for_budget(1.0)]
if options[0][1:] != options[1][1:]:
bisects = [(options[0], options[1])]
while bisects:
lhs, rhs = bisects.pop()
if rhs[0] - lhs[0] < 1e-3:
options.append(lhs)
options.append(rhs)
continue
mid = estimate_for_budget((lhs[0] + rhs[0]) / 2)
if mid[1:] != lhs[1:]:
bisects.append((lhs, mid))
if mid[1:] != rhs[1:]:
bisects.append((mid, rhs))
options.sort()
import matplotlib.pyplot as plt
x_values = [item[2] for item in options]
y_values = [item[1] for item in options]
# Plotting the values with updated axis labels and chart title
plt.figure(figsize=(10, 6))
plt.plot(x_values, y_values, marker="o")
# Adding labels for each point
for i, txt in enumerate(x_values):
plt.annotate(
f"{txt:.4f}",
(txt, y_values[i]),
textcoords="offset points",
xytext=(0, 10),
ha="center",
)
plt.xlabel("Memory Budget")
plt.ylabel("Runtime of Recomputed Components")
plt.title("Pareto Frontier of Memory Budget vs. Recomputation Runtime")
plt.grid(True)
fig = plt.gcf()
plt.show()
fig_dir = os.getcwd()
if config.memory_budget_pareto_dir is not None:
fig_dir = config.memory_budget_pareto_dir
os.makedirs(fig_dir, exist_ok=True)
rank_suffix = ""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank_suffix = f"_rank_{torch.distributed.get_rank()}"
fig_name = os.path.join(
fig_dir, f"memory_budget_pareto{rank_suffix}_{get_aot_graph_name()}.svg"
)
fig.savefig(fig_name)
log.warning("Generated Pareto frontier curve at %s", fig_name)
# todo(chilli): Estimated doesn't align exactly with actual - actual is
# usually less memory than estimated. i'm guessing (actually quite
# unsure about this) that's because estimated is just only including
# tensors we actually banned from recompute, but there may be other
# tensors that we choose to save.
return get_saved_values_knapsack(
memory_budget=memory_budget, node_info=node_info, joint_graph=joint_graph
)[0]
def _sync_decision_cross_ranks(
joint_graph: torch.fx.Graph, saved_values: list[torch.fx.Node]
):
# use the same policy across different GPUs
from torch._subclasses.fake_tensor import unset_fake_temporarily
def has_collectives(joint_graph):
for node in joint_graph.nodes:
if isinstance(
node.target, torch._ops.OpOverload
) and node.target.namespace in {"_c10d_functional", "c10d_functional"}:
return True
return False
def has_same_nodes(joint_graph):
# proxy to check if the graph is the same across different GPUs.
# We only consider the name and order of nodes. A more robust way
# would be to check the hash of the whole graph (disregarding input shapes),
# this is a reasonable first-order approximation.
node_str = "/".join(x.name for x in joint_graph.nodes)
inputs = hashlib.sha256(node_str.encode("utf-8")).hexdigest()
all_inputs = [None for _ in range(torch.distributed.get_world_size())]
with no_dispatch(), unset_fake_temporarily():
# TODO: maybe use a different process group?
torch.distributed.all_gather_object(all_inputs, inputs)
return all(all_inputs[0] == x for x in all_inputs)
if (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_world_size() > 1
and has_collectives(joint_graph)
and has_same_nodes(joint_graph)
):
with no_dispatch(), unset_fake_temporarily():
objects = [[x.name for x in saved_values]]
saved_ops_names_all_ranks: list[list[str]] = [
[] for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather_object(saved_ops_names_all_ranks, objects[0])
name_to_node = get_name_to_node(joint_graph)
saved_sizes: list[int] = []
saved_ops_with_sizes: dict[str, int] = {}
for idx, saved_ops_names in enumerate(saved_ops_names_all_ranks):
saved_nodes = [name_to_node[op_name] for op_name in saved_ops_names]
saved_size = 0
for node in saved_nodes:
size_of_node = _size_of(node)
saved_size += size_of_node
if idx == torch.distributed.get_rank():
saved_ops_with_sizes[node.name] = size_of_node
saved_ops_with_sizes["total size"] = saved_size
saved_sizes.append(saved_size)
saved_sizes_tensor = torch.tensor(
saved_sizes,
device=torch.distributed.distributed_c10d._get_object_coll_device(),
)
torch.distributed.all_reduce(
saved_sizes_tensor, op=torch.distributed.distributed_c10d.ReduceOp.MAX
)
picked_rank_idx = int(torch.argmin(saved_sizes_tensor).item())
sync_decision_cross_ranks_str = f"picked_rank_idx={picked_rank_idx}, saved_nodes of current rank={saved_ops_with_sizes}"
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "aot_joint_graph_sync_decision_cross_ranks",
"encoding": "string",
},
payload_fn=lambda: sync_decision_cross_ranks_str,
)
saved_values = [
name_to_node[n] for n in saved_ops_names_all_ranks[picked_rank_idx]
]
return saved_values
def thread_graphsafe_rng_from_hops(module, is_backward):
"""
Graph-safe RNG lets torch.compile use CUDA Graphs for graphs with RNG ops.
For graphs without HOPs, the partitioner adds placeholder nodes
fwd_rng_state_* and bw_rng_state_* to the forward and backward graphs. At
runtime, the AOTDispatcher retrieves these RNG states and passes them to the
compiled graphs.
This works well for no-HOP graphs. With HOPs, the partitioner runs
recursively: it first partitions the HOP (producing forward/backward HOP
subgraphs) and then stitches them back into the outer joint graph. For HOPs
that contain RNG ops, the outer joint graph now includes HOP subgraph
modules with extra RNG placeholders. We must thread these placeholders
through the outer module partitioned forward and backward graphs—this
function does exactly that. It collects the RNG placeholder nodes from the
HOPs and creates corresponding placeholders in the outer forward and
backward graphs.
There is a catch: for a short period, the joint graph is in a “bad” state.
The HOP subgraphs expect additional inputs (because of the new
placeholders), but the outer graph call sites don't yet provide them. We
can't fix this in the joint graph because the joint graph's input signature
is fixed (primals, tangents). As a compromise, we keep the joint graph in
somewhat of a bad state for some time and, once the outer forward and
backward graphs are partitioned, insert the corresponding RNG placeholders
and wire up the calls.
"""
rng_count = 0
rng_string = "bwd_rng_state" if is_backward else "fwd_rng_state"
last_input = next(reversed(module.graph.find_nodes(op="placeholder")))
for hop_node in module.graph.find_nodes(
op="call_function", target=torch.ops.higher_order.invoke_subgraph
):
subgraph = getattr(module, hop_node.args[0].target)
if isinstance(subgraph, fx.GraphModule):
new_rng_inputs = []
for placeholder_node in subgraph.graph.find_nodes(op="placeholder"):
if rng_string in placeholder_node.name:
# Found a rng state placeholder in the hop graph, lets add
# the corresponding node in the outer graph
with module.graph.inserting_after(last_input):
rng_state = module.graph.placeholder(
f"{rng_string}_{rng_count}"
)
rng_count += 1
rng_state.meta["val"] = placeholder_node.meta["val"]
last_input = rng_state
new_rng_inputs.append(rng_state)
if new_rng_inputs:
# Pass on the new args that include the new_rng_inputs
with module.graph.inserting_after(hop_node):
new_hop_node_with_fixed_args = module.graph.create_node(
"call_function",
torch.ops.higher_order.invoke_subgraph,
(*hop_node.args, *new_rng_inputs), # type: ignore[arg-type]
{},
)
hop_node.replace_all_uses_with(
new_hop_node_with_fixed_args, propagate_meta=True
)
# Setup the eager_input_vals
eager_vals = hop_node.meta.get("eager_input_vals")
if eager_vals:
eager_args, eager_kwargs = eager_vals
new_eager_args = (
*eager_args,
*[inp.meta["val"] for inp in new_rng_inputs],
)
new_hop_node_with_fixed_args.meta["eager_input_vals"] = (
new_eager_args,
eager_kwargs,
)
module.graph.erase_node(hop_node)
return module
def min_cut_rematerialization_partition(
joint_module: fx.GraphModule,
_joint_inputs,
compiler="inductor",
*,
num_fwd_outputs,
static_lifetime_input_indices: Optional[list[int]] = None,
) -> tuple[fx.GraphModule, fx.GraphModule]:
"""
Partitions the joint graph such that the backward recomputes the forward.
Recomputing helps in trading off memory bandwidth with computation.
To create the fwd and bwd graph, we copy the joint graph, manually set the
outputs to just original forward or backward outputs. And then we run the
resulting graphs through dead code elimination.
.. warning::
This API is experimental and likely to change.
Args:
joint_module(fx.GraphModule): The joint forward and backward graph. This
is the result of AOT Autograd tracing.
_joint_inputs: The inputs to the joint graph. This is unused.
compiler: This option determines the default set of recomputable ops.
Currently, there are two options: ``nvfuser`` and ``inductor``.
recomputable_ops: This is an optional set of recomputable ops. If this
is not None, then this set of ops will be used instead of the
default set of ops.
num_fwd_outputs: The number of outputs from the forward graph.
Returns:
Returns the generated forward and backward Fx graph modules.
"""
joint_module.graph.eliminate_dead_code()
joint_module.recompile()
fx_g = joint_module.graph
# add the CSE pass
if config.cse:
cse_graph = fx_graph_cse(fx_g)
joint_module.graph = cse_graph
joint_graph = joint_module.graph
graph_has_recomputable_ops = has_recomputable_ops(joint_module)
graph_has_recomputable_rng_ops = has_recomputable_rng_ops(joint_module)
if graph_has_recomputable_ops:
joint_module = cleanup_recompute_tags(joint_module)
if not config.unsafe_allow_optimization_of_collectives:
force_save_collectives(joint_module)
force_save_bw_mutation_src(joint_module)
def classify_nodes(joint_module, static_lifetime_input_indices):
name_to_node = get_name_to_node(joint_module.graph)
required_bw_nodes: OrderedSet[fx.Node] = OrderedSet()
for node in joint_module.graph.nodes:
if node.op == "placeholder" and "tangents" in node.target:
required_bw_nodes.add(node)
elif _must_be_in_backward(node):
required_bw_nodes.add(node)
if node in required_bw_nodes:
required_bw_nodes.update(node.users)
primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))
fwd_seed_offset_inputs = list(
filter(_is_fwd_seed_offset, joint_module.graph.nodes)
)
inputs = primal_inputs + fwd_seed_offset_inputs
fwd_outputs, bwd_outputs, fwd_outputs_descs, bwd_outputs_descs = (
_extract_fwd_bwd_outputs(joint_module, num_fwd_outputs=num_fwd_outputs)
)
required_bw_nodes.update(
o for o in bwd_outputs if o is not None and o.op != "output"
)
forward_only_graph = _extract_graph_with_inputs_outputs(
joint_module.graph, inputs, fwd_outputs, fwd_outputs_descs, "forward"
)
required_fw_nodes: OrderedSet[fx.Node] = OrderedSet(
name_to_node[node.name]
for node in forward_only_graph.nodes
if node.op != "output"
)
unclaimed_nodes: OrderedSet[fx.Node] = OrderedSet(
node
for node in joint_module.graph.nodes
if node not in required_fw_nodes and node not in required_bw_nodes
)
static_lifetime_input_nodes = OrderedSet(
p for i, p in enumerate(primal_inputs) if i in static_lifetime_input_indices
)
fw_cnt = 0
fw_order = {}
for node in joint_module.graph.nodes:
if node in required_fw_nodes:
fw_order[node] = fw_cnt
fw_cnt += 1
return NodeInfo(
inputs,
required_fw_nodes,
required_bw_nodes,
unclaimed_nodes,
fw_order,
static_lifetime_input_nodes,
)
if static_lifetime_input_indices is None:
static_lifetime_input_indices = []
node_info = classify_nodes(joint_module, static_lifetime_input_indices)
# networkx blows up on graphs with no required backward nodes
# Since there's nothing to partition anyway, and the default partitioner can "handle"
# this case, send our graph over to the default partitioner.
if len(node_info.required_bw_nodes) == 0:
return default_partition(
joint_module,
_joint_inputs,
num_fwd_outputs=num_fwd_outputs,
static_lifetime_input_indices=static_lifetime_input_indices,
static_lifetime_input_nodes=node_info.static_lifetime_input_nodes,
)
for node in reversed(joint_module.graph.nodes):
if node.op == "output":
node.dist_from_bw = int(1e9)
elif not node_info.is_required_fw(node):
node.dist_from_bw = 0
else:
node.dist_from_bw = int(1e9)
for user in node.users:
node.dist_from_bw = min(node.dist_from_bw, user.dist_from_bw + 1)
memory_budget = config.activation_memory_budget
for node in joint_graph.nodes:
if isinstance(node.meta.get("memory_budget", None), float):
memory_budget = node.meta["memory_budget"]
break
saved_values = choose_saved_values_set(
joint_graph,
node_info,
memory_budget=memory_budget,
)
# pyrefly: ignore [unbound-name]
if config._sync_decision_cross_ranks:
saved_values = _sync_decision_cross_ranks(joint_graph, saved_values)
# save_for_backward on tensors and stashes symints in autograd .ctx
saved_sym_nodes = list(filter(is_sym_node, saved_values))
saved_values = list(filter(lambda n: not is_sym_node(n), saved_values))
# NB: saved_sym_nodes will be mutated to reflect the actual saved symbols
fw_module, bw_module = _extract_fwd_bwd_modules(
joint_module,
saved_values,
# pyrefly: ignore [bad-argument-type]
saved_sym_nodes=saved_sym_nodes,
num_fwd_outputs=num_fwd_outputs,
static_lifetime_input_nodes=node_info.static_lifetime_input_nodes,
)
if graph_has_recomputable_ops:
if graph_has_recomputable_rng_ops:
fw_module, bw_module = functionalize_rng_ops(
joint_module, fw_module, bw_module, len(saved_sym_nodes)
)
bw_module = reordering_to_mimic_autograd_engine(bw_module)
# raise all getitem ops to as early as possible
# this is helpful for memory, especially in the case of aot_eager backend
fw_module = raise_getitems(fw_module)
bw_module = raise_getitems(bw_module)
fw_module = thread_graphsafe_rng_from_hops(fw_module, is_backward=False)
bw_module = thread_graphsafe_rng_from_hops(bw_module, is_backward=True)
if AOT_PARTITIONER_DEBUG:
# Calculate sorted sizes of saved values
sorted_sizes = sorted([(_size_of(i), str(i)) for i in saved_values])
# Log total theoretical activations stored
total_activations_size_gb = sum(_size_of(i) for i in saved_values) / 1e9
log.info("Theoretical Activations Stored: %.2f GB", total_activations_size_gb)
# Log theoretical per activation storage sizes
log.info("Theoretical Per Activation Storage Sizes: %s", sorted_sizes)
fw_module_nodes = OrderedSet(
node.name for node in fw_module.graph.nodes if node.op == "call_function"
)
bw_module_nodes = OrderedSet(
node.name for node in bw_module.graph.nodes if node.op == "call_function"
)
remat_nodes = fw_module_nodes & bw_module_nodes
counts: dict[str, int] = defaultdict(int)
for node in fw_module.graph.nodes:
if node.name in remat_nodes and hasattr(node.target, "_overloadpacket"):
counts[str(node.target._overloadpacket)] += 1
log.info(
"# remat/fw/bw: %d/%d/%d",
len(remat_nodes),
len(fw_module_nodes),
len(bw_module_nodes),
)
rematerialized_ops = sorted(
counts.items(), key=operator.itemgetter(1), reverse=True
)
log.info("Count of Ops Rematerialized: %s", rematerialized_ops)
return fw_module, bw_module
def draw_graph(
traced: torch.fx.GraphModule,
fname: str,
figname: str = "fx_graph",
clear_meta: bool = True,
prog: Optional[Union[str, list[str]]] = None,
parse_stack_trace: bool = False,
dot_graph_shape: Optional[str] = None,
) -> None:
if clear_meta:
new_graph = copy.deepcopy(traced.graph)
traced = fx.GraphModule(traced, new_graph)
for node in traced.graph.nodes:
node.meta = {}
base, ext = os.path.splitext(fname)
if not ext:
ext = "." + config.torch_compile_graph_format
log.info("Writing FX graph to file: %s%s", base, ext)
g = graph_drawer.FxGraphDrawer(
traced,
figname,
parse_stack_trace=parse_stack_trace,
dot_graph_shape=dot_graph_shape,
)
x = g.get_main_dot_graph()
write_method = getattr(x, "write_" + ext.lstrip("."))
fname = f"{base}{ext}"
if prog is None:
write_method(fname)
else:
write_method(fname, prog=prog)
| InvalidNodeBase |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 32956,
"end": 33321
} | class ____:
"""Represent a rendezvous keep-alive update operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
if _should_keep_alive(ctx):
if time.monotonic() > deadline:
return _Action.ERROR_TIMEOUT
return _Action.KEEP_ALIVE
return _Action.FINISH
| _RendezvousKeepAliveOp |
python | tensorflow__tensorflow | tensorflow/lite/python/analyzer_test.py | {
"start": 1076,
"end": 8095
} | class ____(test_util.TensorFlowTestCase):
def testTxt(self):
model_path = resource_loader.get_path_to_datafile('../testdata/add.bin')
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(model_path=model_path)
txt = mock_stdout.getvalue()
self.assertIn('Subgraph#0(T#1) -> [T#2]', txt)
self.assertIn('Op#0 ADD(T#1, T#1) -> [T#0]', txt)
self.assertIn('Op#1 ADD(T#0, T#1) -> [T#2]', txt)
self.assertNotIn('Your model looks compatible with GPU delegate', txt)
def testMlir(self):
model_path = resource_loader.get_path_to_datafile('../testdata/add.bin')
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(
model_path=model_path, experimental_use_mlir=True)
mlir = mock_stdout.getvalue()
self.assertIn(
'func @main(%arg0: tensor<1x8x8x3xf32> '
'{tf_saved_model.index_path = ["a"]}) -> '
'(tensor<1x8x8x3xf32> {tf_saved_model.index_path = ["x"]}) attributes '
'{tf.entry_function = {inputs = "input", outputs = "output"}, '
'tf_saved_model.exported_names = ["serving_default"]}', mlir)
self.assertIn(
'%0 = tfl.add %arg0, %arg0 {fused_activation_function = "NONE"} : '
'tensor<1x8x8x3xf32>', mlir)
self.assertIn(
'%1 = tfl.add %0, %arg0 {fused_activation_function = "NONE"} : '
'tensor<1x8x8x3xf32>', mlir)
self.assertIn('return %1 : tensor<1x8x8x3xf32>', mlir)
def testMlirHugeConst(self):
model_path = resource_loader.get_path_to_datafile(
'../testdata/conv_huge_im2col.bin')
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(
model_path=model_path, experimental_use_mlir=True)
mlir = mock_stdout.getvalue()
self.assertIn(
'%1 = "tfl.pseudo_const"() <{value = dense_resource<__elided__> : '
'tensor<3x3x3x8xf32>}> : () -> tensor<3x3x3x8xf32>', mlir)
def testTxtWithFlatBufferModel(self):
@tf.function(
input_signature=[tf.TensorSpec(shape=[None], dtype=tf.float32)])
def func(x):
return x + tf.cos(x)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()], func)
fb_model = converter.convert()
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(model_content=fb_model)
txt = mock_stdout.getvalue()
self.assertIn('Subgraph#0 main(T#0) -> [T#2]', txt)
self.assertIn('Op#0 COS(T#0) -> [T#1]', txt)
self.assertIn('Op#1 ADD(T#0, T#1) -> [T#2]', txt)
def testMlirWithFlatBufferModel(self):
@tf.function(
input_signature=[tf.TensorSpec(shape=[None], dtype=tf.float32)])
def func(x):
return x + tf.cos(x)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()], func)
fb_model = converter.convert()
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(
model_content=fb_model, experimental_use_mlir=True)
mlir = mock_stdout.getvalue()
self.assertIn('func @main(%arg0: tensor<?xf32>) -> tensor<?xf32>', mlir)
self.assertIn('%0 = "tfl.cos"(%arg0) : (tensor<?xf32>) -> tensor<?xf32>',
mlir)
self.assertIn(
'%1 = tfl.add %arg0, %0 {fused_activation_function = "NONE"} : '
'tensor<?xf32>', mlir)
self.assertIn('return %1 : tensor<?xf32', mlir)
def testTxtSignatureDefs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
@tf.function(input_signature=[
tf.TensorSpec(shape=None, dtype=tf.float32),
tf.TensorSpec(shape=None, dtype=tf.float32)
])
def add(a, b):
return {'add_result': tf.add(a, b)}
@tf.function(input_signature=[
tf.TensorSpec(shape=None, dtype=tf.float32),
tf.TensorSpec(shape=None, dtype=tf.float32)
])
def sub(x, y):
return {'sub_result': tf.subtract(x, y)}
root = autotrackable.AutoTrackable()
root.f1 = add.get_concrete_function()
root.f2 = sub.get_concrete_function()
tf.saved_model.save(
root, tmp_dir, signatures={
'add': root.f1,
'sub': root.f2
})
converter = lite.TFLiteConverterV2.from_saved_model(tmp_dir)
fb_model = converter.convert()
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(model_content=fb_model)
txt = mock_stdout.getvalue()
self.assertIn("Your TFLite model has '2' signature_def(s).", txt)
self.assertIn("Signature#0 key: 'add'", txt)
self.assertIn(" 'a' : T#1", txt)
self.assertIn(" 'b' : T#0", txt)
self.assertIn(" 'add_result' : T#2", txt)
self.assertIn("Signature#1 key: 'sub'", txt)
self.assertIn(" 'x' : T#1_1", txt)
self.assertIn(" 'y' : T#1_0", txt)
self.assertIn(" 'sub_result' : T#1_2", txt)
def testTxtWithoutInput(self):
@tf.function()
def func():
return tf.cos(1.0)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()], func)
fb_model = converter.convert()
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(model_content=fb_model)
txt = mock_stdout.getvalue()
self.assertIn('Subgraph#0 main() -> [T#0]', txt)
def testTxtWithEinsum(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 100, 512], dtype=tf.float32),
tf.TensorSpec(shape=[512, 8, 64], dtype=tf.float32)
])
def func(lhs, rhs):
return tf.einsum('ABD,DNH->ABNH', lhs, rhs)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()], func)
converter.unfold_batchmatmul = True
fb_model = converter.convert()
mock_stdout = io.StringIO()
with test.mock.patch.object(sys, 'stdout', mock_stdout):
analyzer.ModelAnalyzer.analyze(model_content=fb_model)
txt = mock_stdout.getvalue()
self.assertIn('Op#0 RESHAPE(T#1, T#4[512, 512]) -> [T#5]', txt)
self.assertIn('Op#1 TRANSPOSE(T#5, T#3[1, 0]) -> [T#6]', txt)
self.assertIn('Op#2 FULLY_CONNECTED(T#0, T#6, T#-1) -> [T#7]', txt)
self.assertIn('Op#3 RESHAPE(T#7, T#2[1, 100, 8, 64]) -> [T#8]', txt)
self.assertIn(
'T#2(einsum/Einsum) shape:[4], type:INT32 RO 16 bytes, '
'buffer: 3, data:[1, 100, 8, 64]', txt)
self.assertIn(
'T#3(einsum/Einsum2) shape:[2], type:INT32 RO 8 bytes, '
'buffer: 4, data:[1, 0]', txt)
self.assertIn(
'T#4(einsum/Einsum3) shape:[2], type:INT32 RO 8 bytes, '
'buffer: 5, data:[512, 512]', txt)
if __name__ == '__main__':
test.main()
| AnalyzerTest |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 204625,
"end": 206405
} | class ____(TestCase):
def test_no_iterables(self):
self.assertEqual(tuple(mi.partial_product()), ((),))
def test_empty_iterable(self):
self.assertEqual(tuple(mi.partial_product('AB', '', 'CD')), ())
def test_one_iterable(self):
# a single iterable should pass through
self.assertEqual(
tuple(mi.partial_product('ABCD')),
(
('A',),
('B',),
('C',),
('D',),
),
)
def test_two_iterables(self):
self.assertEqual(
list(mi.partial_product('ABCD', [1])),
[('A', 1), ('B', 1), ('C', 1), ('D', 1)],
)
expected = [
('A', 1),
('B', 1),
('C', 1),
('D', 1),
('D', 2),
('D', 3),
('D', 4),
]
self.assertEqual(
list(mi.partial_product('ABCD', [1, 2, 3, 4])), expected
)
def test_basic(self):
ones = [1, 2, 3]
tens = [10, 20, 30, 40, 50]
hundreds = [100, 200]
expected = [
(1, 10, 100),
(2, 10, 100),
(3, 10, 100),
(3, 20, 100),
(3, 30, 100),
(3, 40, 100),
(3, 50, 100),
(3, 50, 200),
]
actual = list(mi.partial_product(ones, tens, hundreds))
self.assertEqual(actual, expected)
def test_uneven_length_iterables(self):
# this is also the docstring example
expected = [
('A', 'C', 'D'),
('B', 'C', 'D'),
('B', 'C', 'E'),
('B', 'C', 'F'),
]
self.assertEqual(list(mi.partial_product('AB', 'C', 'DEF')), expected)
| PartialProductTests |
python | pydantic__pydantic | pydantic/functional_validators.py | {
"start": 19218,
"end": 19777
} | class ____(Protocol[_ModelType]):
"""A `@model_validator` decorated function signature.
This is used when `mode='wrap'` and the function does not have info argument.
"""
def __call__( # noqa: D102
self,
cls: type[_ModelType],
# this can be a dict, a model instance
# or anything else that gets passed to validate_python
# thus validators _must_ handle all cases
value: Any,
handler: ModelWrapValidatorHandler[_ModelType],
/,
) -> _ModelType: ...
| ModelWrapValidatorWithoutInfo |
python | django__django | tests/validation/test_unique.py | {
"start": 409,
"end": 3793
} | class ____(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
(
[
(UniqueFieldsModel, ("id",)),
(UniqueFieldsModel, ("unique_charfield",)),
(UniqueFieldsModel, ("unique_integerfield",)),
],
[],
),
m._get_unique_checks(),
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
(
[
(UniqueTogetherModel, ("ifield", "cfield")),
(UniqueTogetherModel, ("ifield", "efield")),
(UniqueTogetherModel, ("id",)),
],
[],
),
m._get_unique_checks(),
)
def test_unique_together_normalization(self):
"""
Test the Meta.unique_together normalization with different sorts of
objects.
"""
data = {
"2-tuple": (("foo", "bar"), (("foo", "bar"),)),
"list": (["foo", "bar"], (("foo", "bar"),)),
"already normalized": (
(("foo", "bar"), ("bar", "baz")),
(("foo", "bar"), ("bar", "baz")),
),
"set": (
{("foo", "bar"), ("bar", "baz")}, # Ref #21469
(("foo", "bar"), ("bar", "baz")),
),
}
for unique_together, normalized in data.values():
class M(models.Model):
foo = models.IntegerField()
bar = models.IntegerField()
baz = models.IntegerField()
Meta = type(
"Meta", (), {"unique_together": unique_together, "apps": Apps()}
)
checks, _ = M()._get_unique_checks()
for t in normalized:
check = (M, t)
self.assertIn(check, checks)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(
([(CustomPKModel, ("my_pk_field",))], []), m._get_unique_checks()
)
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual(
(
[(UniqueForDateModel, ("id",))],
[
(UniqueForDateModel, "date", "count", "start_date"),
(UniqueForDateModel, "year", "count", "end_date"),
(UniqueForDateModel, "month", "order", "end_date"),
],
),
m._get_unique_checks(),
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual(
(
[(UniqueForDateModel, ("id",))],
[
(UniqueForDateModel, "year", "count", "end_date"),
(UniqueForDateModel, "month", "order", "end_date"),
],
),
m._get_unique_checks(exclude="start_date"),
)
def test_func_unique_constraint_ignored(self):
m = UniqueFuncConstraintModel()
self.assertEqual(
m._get_unique_checks(),
([(UniqueFuncConstraintModel, ("id",))], []),
)
| GetUniqueCheckTests |
python | Textualize__textual | src/textual/widgets/_tree.py | {
"start": 2674,
"end": 15496
} | class ____(Generic[TreeDataType]):
"""An object that represents a "node" in a tree control."""
def __init__(
self,
tree: Tree[TreeDataType],
parent: TreeNode[TreeDataType] | None,
id: NodeID,
label: Text,
data: TreeDataType | None = None,
*,
expanded: bool = True,
allow_expand: bool = True,
) -> None:
"""Initialise the node.
Args:
tree: The tree that the node is being attached to.
parent: The parent node that this node is being attached to.
id: The ID of the node.
label: The label for the node.
data: Optional data to associate with the node.
expanded: Should the node be attached in an expanded state?
allow_expand: Should the node allow being expanded by the user?
"""
self._tree = tree
self._parent = parent
self._id = id
self._label = tree.process_label(label)
self.data = data
"""Optional data associated with the tree node."""
self._expanded = expanded
self._children: list[TreeNode[TreeDataType]] = []
self._hover_ = False
self._selected_ = False
self._allow_expand = allow_expand
self._updates: int = 0
self._line: int = -1
def __rich_repr__(self) -> rich.repr.Result:
yield self._label.plain
yield self.data
def _reset(self) -> None:
self._hover_ = False
self._selected_ = False
self._updates += 1
@property
def tree(self) -> Tree[TreeDataType]:
"""The tree that this node is attached to."""
return self._tree
@property
def children(self) -> TreeNodes[TreeDataType]:
"""The child nodes of a TreeNode."""
return TreeNodes(self._children)
@property
def siblings(self) -> TreeNodes[TreeDataType]:
"""The siblings of this node (includes self)."""
if self.parent is None:
return TreeNodes([self])
else:
return self.parent.children
@property
def line(self) -> int:
"""The line number for this node, or -1 if it is not displayed."""
return self._line
@property
def _hover(self) -> bool:
"""Check if the mouse is over the node."""
return self._hover_
@_hover.setter
def _hover(self, hover: bool) -> None:
self._updates += 1
self._hover_ = hover
@property
def _selected(self) -> bool:
"""Check if the node is selected."""
return self._selected_
@_selected.setter
def _selected(self, selected: bool) -> None:
self._updates += 1
self._selected_ = selected
@property
def id(self) -> NodeID:
"""The ID of the node."""
return self._id
@property
def parent(self) -> TreeNode[TreeDataType] | None:
"""The parent of the node."""
return self._parent
@property
def next_sibling(self) -> TreeNode[TreeDataType] | None:
"""The next sibling below the node."""
siblings = self.siblings
index = siblings.index(self) + 1
try:
return siblings[index]
except IndexError:
return None
@property
def previous_sibling(self) -> TreeNode[TreeDataType] | None:
"""The previous sibling below the node."""
siblings = self.siblings
index = siblings.index(self) - 1
if index < 0:
return None
try:
return siblings[index]
except IndexError:
return None
@property
def is_expanded(self) -> bool:
"""Is the node expanded?"""
return self._expanded
@property
def is_collapsed(self) -> bool:
"""Is the node collapsed?"""
return not self._expanded
@property
def is_last(self) -> bool:
"""Is this the last child node of its parent?"""
if self._parent is None:
return True
return bool(
self._parent._children and self._parent._children[-1] == self,
)
@property
def is_root(self) -> bool:
"""Is this node the root of the tree?"""
return self == self._tree.root
@property
def allow_expand(self) -> bool:
"""Is this node allowed to expand?"""
return self._allow_expand
@allow_expand.setter
def allow_expand(self, allow_expand: bool) -> None:
self._allow_expand = allow_expand
self._updates += 1
def _expand(self, expand_all: bool) -> None:
"""Mark the node as expanded (its children are shown).
Args:
expand_all: If `True` expand all offspring at all depths.
"""
self._expanded = True
self._updates += 1
self._tree.post_message(Tree.NodeExpanded(self).set_sender(self._tree))
if expand_all:
for child in self.children:
child._expand(expand_all)
def expand(self) -> Self:
"""Expand the node (show its children).
Returns:
The `TreeNode` instance.
"""
self._expand(False)
self._tree._invalidate()
return self
def expand_all(self) -> Self:
"""Expand the node (show its children) and all those below it.
Returns:
The `TreeNode` instance.
"""
self._expand(True)
self._tree._invalidate()
return self
def _collapse(self, collapse_all: bool) -> None:
"""Mark the node as collapsed (its children are hidden).
Args:
collapse_all: If `True` collapse all offspring at all depths.
"""
self._expanded = False
self._updates += 1
self._tree.post_message(Tree.NodeCollapsed(self).set_sender(self._tree))
if collapse_all:
for child in self.children:
child._collapse(collapse_all)
def collapse(self) -> Self:
"""Collapse the node (hide its children).
Returns:
The `TreeNode` instance.
"""
self._collapse(False)
self._tree._invalidate()
return self
def collapse_all(self) -> Self:
"""Collapse the node (hide its children) and all those below it.
Returns:
The `TreeNode` instance.
"""
self._collapse(True)
self._tree._invalidate()
return self
def toggle(self) -> Self:
"""Toggle the node's expanded state.
Returns:
The `TreeNode` instance.
"""
if self._expanded:
self.collapse()
else:
self.expand()
return self
def toggle_all(self) -> Self:
"""Toggle the node's expanded state and make all those below it match.
Returns:
The `TreeNode` instance.
"""
if self._expanded:
self.collapse_all()
else:
self.expand_all()
return self
@property
def label(self) -> TextType:
"""The label for the node."""
return self._label
@label.setter
def label(self, new_label: TextType) -> None:
self.set_label(new_label)
def set_label(self, label: TextType) -> None:
"""Set a new label for the node.
Args:
label: A ``str`` or ``Text`` object with the new label.
"""
self._updates += 1
text_label = self._tree.process_label(label)
self._label = text_label
self._tree.call_later(self._tree._refresh_node, self)
def add(
self,
label: TextType,
data: TreeDataType | None = None,
*,
before: int | TreeNode[TreeDataType] | None = None,
after: int | TreeNode[TreeDataType] | None = None,
expand: bool = False,
allow_expand: bool = True,
) -> TreeNode[TreeDataType]:
"""Add a node to the sub-tree.
Args:
label: The new node's label.
data: Data associated with the new node.
before: Optional index or `TreeNode` to add the node before.
after: Optional index or `TreeNode` to add the node after.
expand: Node should be expanded.
allow_expand: Allow user to expand the node via keyboard or mouse.
Returns:
A new Tree node
Raises:
AddNodeError: If there is a problem with the addition request.
Note:
Only one of `before` or `after` can be provided. If both are
provided a `AddNodeError` will be raised.
"""
if before is not None and after is not None:
raise AddNodeError("Unable to add a node both before and after a node")
insert_index: int = len(self.children)
if before is not None:
if isinstance(before, int):
insert_index = before
elif isinstance(before, TreeNode):
try:
insert_index = self.children.index(before)
except ValueError:
raise AddNodeError(
"The node specified for `before` is not a child of this node"
)
else:
raise TypeError(
"`before` argument must be an index or a TreeNode object to add before"
)
if after is not None:
if isinstance(after, int):
insert_index = after + 1
if after < 0:
insert_index += len(self.children)
elif isinstance(after, TreeNode):
try:
insert_index = self.children.index(after) + 1
except ValueError:
raise AddNodeError(
"The node specified for `after` is not a child of this node"
)
else:
raise TypeError(
"`after` argument must be an index or a TreeNode object to add after"
)
text_label = self._tree.process_label(label)
node = self._tree._add_node(self, text_label, data)
node._expanded = expand
node._allow_expand = allow_expand
self._updates += 1
self._children.insert(insert_index, node)
self._tree._invalidate()
return node
def add_leaf(
self,
label: TextType,
data: TreeDataType | None = None,
*,
before: int | TreeNode[TreeDataType] | None = None,
after: int | TreeNode[TreeDataType] | None = None,
) -> TreeNode[TreeDataType]:
"""Add a 'leaf' node (a node that can not expand).
Args:
label: Label for the node.
data: Optional data.
before: Optional index or `TreeNode` to add the node before.
after: Optional index or `TreeNode` to add the node after.
Returns:
New node.
Raises:
AddNodeError: If there is a problem with the addition request.
Note:
Only one of `before` or `after` can be provided. If both are
provided a `AddNodeError` will be raised.
"""
node = self.add(
label,
data,
before=before,
after=after,
expand=False,
allow_expand=False,
)
return node
def _remove_children(self) -> None:
"""Remove child nodes of this node.
Note:
This is the internal support method for `remove_children`. Call
`remove_children` to ensure the tree gets refreshed.
"""
for child in reversed(self._children):
child._remove()
def _remove(self) -> None:
"""Remove the current node and all its children.
Note:
This is the internal support method for `remove`. Call `remove`
to ensure the tree gets refreshed.
"""
self._remove_children()
assert self._parent is not None
del self._parent._children[self._parent._children.index(self)]
del self._tree._tree_nodes[self.id]
def remove(self) -> None:
"""Remove this node from the tree.
Raises:
RemoveRootError: If there is an attempt to remove the root.
"""
if self.is_root:
raise RemoveRootError("Attempt to remove the root node of a Tree.")
self._remove()
self._tree._invalidate()
def remove_children(self) -> None:
"""Remove any child nodes of this node."""
self._remove_children()
self._tree._invalidate()
def refresh(self) -> None:
"""Initiate a refresh (repaint) of this node."""
self._updates += 1
self._tree._refresh_line(self._line)
| TreeNode |
python | pypa__warehouse | tests/unit/accounts/test_models.py | {
"start": 9931,
"end": 10285
} | class ____:
def test_repr(self, db_session):
unique_login = UserUniqueLoginFactory.create()
assert (
repr(unique_login)
== f"<UserUniqueLogin(user={unique_login.user.username!r}, "
f"ip_address={unique_login.ip_address!r}, "
f"status={unique_login.status!r})>"
)
| TestUserUniqueLogin |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 165408,
"end": 165648
} | class ____(
OrganizationDashboardDetailsOnDemandTest
):
# Re-run the on-demand tests with the transaction-like widget type
widget_type = DashboardWidgetTypes.TRANSACTION_LIKE
| OrganizationDashboardDetailsOnDemandTransactionLikeTest |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 618,
"end": 689
} | class ____(TestModel):
id = UUIDKeyField()
title = TextField()
| UID |
python | gevent__gevent | src/gevent/events.py | {
"start": 7849,
"end": 8317
} | class ____(object):
def __init__(self, mem_usage, max_allowed, memory_info):
self.mem_usage = mem_usage
self.max_allowed = max_allowed
self.memory_info = memory_info
def __repr__(self):
return "<%s used=%d max=%d details=%r>" % (
self.__class__.__name__,
self.mem_usage,
self.max_allowed,
self.memory_info,
)
@implementer(IMemoryUsageThresholdExceeded)
| _AbstractMemoryEvent |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 629103,
"end": 629428
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("SponsorableItem", graphql_name="node")
| SponsorableItemEdge |
python | sympy__sympy | sympy/core/function.py | {
"start": 27393,
"end": 27619
} | class ____(Function):
"""Base class for defined functions like ``sin``, ``cos``, ..."""
@cacheit
def __new__(cls, *args, **options) -> Expr: # type: ignore
return cls._new_(*args, **options)
| DefinedFunction |
python | neetcode-gh__leetcode | python/0097-interleaving-string.py | {
"start": 0,
"end": 576
} | class ____:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
if len(s1) + len(s2) != len(s3):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
dp[len(s1)][len(s2)] = True
for i in range(len(s1), -1, -1):
for j in range(len(s2), -1, -1):
if i < len(s1) and s1[i] == s3[i + j] and dp[i + 1][j]:
dp[i][j] = True
if j < len(s2) and s2[j] == s3[i + j] and dp[i][j + 1]:
dp[i][j] = True
return dp[0][0]
| Solution |
python | simplejson__simplejson | simplejson/tests/test_namedtuple.py | {
"start": 1167,
"end": 5896
} | class ____(unittest.TestCase):
def test_namedtuple_dumps(self):
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
d = v._asdict()
self.assertEqual(d, json.loads(json.dumps(v)))
self.assertEqual(
d,
json.loads(json.dumps(v, namedtuple_as_object=True)))
self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False)))
self.assertEqual(
d,
json.loads(json.dumps(v, namedtuple_as_object=True,
tuple_as_array=False)))
def test_namedtuple_dumps_false(self):
for v in [Value(1), Point(1, 2)]:
l = list(v)
self.assertEqual(
l,
json.loads(json.dumps(v, namedtuple_as_object=False)))
self.assertRaises(TypeError, json.dumps, v,
tuple_as_array=False, namedtuple_as_object=False)
def test_namedtuple_dump(self):
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
d = v._asdict()
sio = StringIO()
json.dump(v, sio)
self.assertEqual(d, json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=True)
self.assertEqual(
d,
json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, tuple_as_array=False)
self.assertEqual(d, json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=True,
tuple_as_array=False)
self.assertEqual(
d,
json.loads(sio.getvalue()))
def test_namedtuple_dump_false(self):
for v in [Value(1), Point(1, 2)]:
l = list(v)
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=False)
self.assertEqual(
l,
json.loads(sio.getvalue()))
self.assertRaises(TypeError, json.dump, v, StringIO(),
tuple_as_array=False, namedtuple_as_object=False)
def test_asdict_not_callable_dump(self):
for f in CONSTRUCTORS:
self.assertRaises(
TypeError,
json.dump,
f(DeadDuck()),
StringIO(),
namedtuple_as_object=True
)
sio = StringIO()
json.dump(f(DeadDict()), sio, namedtuple_as_object=True)
self.assertEqual(
json.dumps(f({})),
sio.getvalue())
self.assertRaises(
TypeError,
json.dump,
f(Value),
StringIO(),
namedtuple_as_object=True
)
def test_asdict_not_callable_dumps(self):
for f in CONSTRUCTORS:
self.assertRaises(TypeError,
json.dumps, f(DeadDuck()), namedtuple_as_object=True)
self.assertRaises(
TypeError,
json.dumps,
f(Value),
namedtuple_as_object=True
)
self.assertEqual(
json.dumps(f({})),
json.dumps(f(DeadDict()), namedtuple_as_object=True))
def test_asdict_unbound_method_dumps(self):
for f in CONSTRUCTORS:
self.assertEqual(
json.dumps(f(Value), default=lambda v: v.__name__),
json.dumps(f(Value.__name__))
)
def test_asdict_does_not_return_dict(self):
if not mock:
if hasattr(unittest, "SkipTest"):
raise unittest.SkipTest("unittest.mock required")
else:
print("unittest.mock not available")
return
fake = mock.Mock()
self.assertTrue(hasattr(fake, '_asdict'))
self.assertTrue(callable(fake._asdict))
self.assertFalse(isinstance(fake._asdict(), dict))
# https://github.com/simplejson/simplejson/pull/284
# when running under a debug build of CPython (COPTS=-UNDEBUG)
# a C assertion could fire due to an unchecked error of an PyDict
# API call on a non-dict internally in _speedups.c. Without a debug
# build of CPython this test likely passes either way despite the
# potential for internal data corruption. Getting it to crash in
# a debug build is not always easy either as it requires an
# assert(!PyErr_Occurred()) that could fire later on.
with self.assertRaises(TypeError):
json.dumps({23: fake}, namedtuple_as_object=True, for_json=False)
| TestNamedTuple |
python | django-debug-toolbar__django-debug-toolbar | tests/test_decorators.py | {
"start": 571,
"end": 1718
} | class ____(TestCase):
"""
Tests require_toolbar functionality and async compatibility.
"""
def setUp(self):
self.factory = RequestFactory()
self.async_factory = AsyncRequestFactory()
@override_settings(DEBUG=True)
def test_require_toolbar_debug_true(self):
response = stub_require_toolbar_view(self.factory.get("/"))
self.assertEqual(response.status_code, 200)
def test_require_toolbar_debug_false(self):
with self.assertRaises(Http404):
stub_require_toolbar_view(self.factory.get("/"))
# Following tests additionally tests async compatibility
# of require_toolbar decorator
@override_settings(DEBUG=True)
async def test_require_toolbar_async_debug_true(self):
response = await stub_require_toolbar_async_view(self.async_factory.get("/"))
self.assertEqual(response.status_code, 200)
async def test_require_toolbar_async_debug_false(self):
with self.assertRaises(Http404):
await stub_require_toolbar_async_view(self.async_factory.get("/"))
@override_settings(DEBUG=True, LANGUAGE_CODE="fr")
| TestRequireToolbar |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/filters/base.py | {
"start": 5230,
"end": 5541
} | class ____(Filter):
"""
Negation of another filter.
"""
def __init__(self, filter: Filter) -> None:
super().__init__()
self.filter = filter
def __call__(self) -> bool:
return not self.filter()
def __repr__(self) -> str:
return f"~{self.filter!r}"
| _Invert |
python | pytorch__pytorch | torch/export/decomp_utils.py | {
"start": 854,
"end": 5753
} | class ____(dict[torch._ops.OperatorBase, Callable]):
"""
This is a custom dictionary that is specifically used for handling decomp_table in export.
The reason we need this is because in the new world, you can only *delete* an op from decomp
table to preserve it. This is problematic for custom ops because we don't know when the custom
op will actually be loaded to the dispatcher. As a result, we need to record the custom ops operations
until we really need to materialize it (which is when we run decomposition pass.)
Invariants we hold are:
1. All aten decomp is loaded at the init time
2. We materialize ALL ops when user ever reads from the table to make it more likely
that dispatcher picks up the custom op.
3. If it is write operation, we don't necessarily materialize
4. We load the final time during export, right before calling run_decompositions()
"""
def __init__(self):
super().__init__()
from torch._decomp import _core_aten_decompositions_post_autograd
# For aten ops, we load them up in the beginning
self.decomp_table = _core_aten_decompositions_post_autograd()
for op in _collect_all_valid_cia_ops_for_aten_namespace():
if op not in PRESERVED_ATEN_CIA_OPS and op not in self.decomp_table:
self.decomp_table[op] = _get_decomp_for_cia(op)
# This is to track the *pending* deleted custom ops that haven't been materialized yet
self.deleted_custom_ops = set()
# When this is true, there shouldn't be any pending operations in the table.
self.has_materialized = False
def __getitem__(self, key):
self._materialize_if_needed()
return self.decomp_table.__getitem__(key)
def __setitem__(self, key, value) -> None:
self.decomp_table.__setitem__(key, value)
if key in self.deleted_custom_ops:
self.deleted_custom_ops.remove(key)
def keys(self):
self._materialize_if_needed()
return self.decomp_table.keys()
def __delitem__(self, key) -> None:
self.pop(key)
def update(self, other_dict): # type: ignore[override]
for k, v in other_dict.items():
self.decomp_table.__setitem__(k, v)
def __missing__(self, key) -> bool:
return not self.__contains__(key)
def __contains__(self, key) -> bool:
self._materialize_if_needed()
return self.decomp_table.__contains__(key)
def __len__(self) -> int:
self._materialize_if_needed()
return self.decomp_table.__len__()
def __iter__(self):
self._materialize_if_needed()
return self.decomp_table.__iter__()
def __reversed__(self):
self._materialize_if_needed()
return self.decomp_table.__reversed__()
def copy(self) -> "CustomDecompTable":
new_dict = CustomDecompTable()
new_dict.decomp_table = self.decomp_table.copy()
new_dict.deleted_custom_ops = self.deleted_custom_ops.copy()
new_dict.has_materialized = self.has_materialized
return new_dict
def pop(self, *args):
def _pop_if_can(key):
if _is_aten_op(key):
return self.decomp_table.pop(key)
if key in self.decomp_table:
# Even if we materialized it, we should add it to the deleted
# custom ops list so that when we materialize next time,
# we should respect user's intention.
self.deleted_custom_ops.add(key)
return self.decomp_table.pop(key)
if key in self.deleted_custom_ops:
raise KeyError(f"{key} doesn't exist in the table")
self.deleted_custom_ops.add(key)
# We would come here when user pops off something that is
# not in the table. In this case, we just pretend that it
# was in the table.
return _get_decomp_for_cia(key)
if len(args) == 1:
return _pop_if_can(args[0])
if len(args) == 2:
try:
return _pop_if_can(args[0])
except KeyError:
return args[1]
def items(self):
self._materialize_if_needed()
return self.decomp_table.items()
def materialize(self) -> dict[torch._ops.OperatorBase, Callable]:
for op in _collect_all_valid_cia_ops():
if _is_aten_op(op):
continue
elif op in self.decomp_table:
continue
elif op not in self.deleted_custom_ops:
self.decomp_table[op] = _get_decomp_for_cia(op)
self.has_materialized = True
self.deleted_custom_ops = set()
return {**self.decomp_table}
def _materialize_if_needed(self) -> None:
if not self.has_materialized:
self.materialize()
| CustomDecompTable |
python | walkccc__LeetCode | solutions/432. All O`one Data Structure/432.py | {
"start": 47,
"end": 425
} | class ____:
def __init__(self, count: int, key: str | None = None):
self.count = count
self.keys: set[str] = {key} if key else set()
self.prev: Node | None = None
self.next: Node | None = None
def __eq__(self, other) -> bool:
if not isinstance(other, Node):
return NotImplemented
return self.count == other.count and self.keys == other.keys
| Node |
python | huggingface__transformers | src/transformers/models/qwen2_vl/modeling_qwen2_vl.py | {
"start": 29102,
"end": 29572
} | class ____(PreTrainedModel):
config: Qwen2VLConfig
base_model_prefix = "model"
input_modalities = ("image", "video", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen2VLDecoderLayer", "Qwen2VLVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
@auto_docstring
| Qwen2VLPreTrainedModel |
python | dask__distributed | distributed/tests/test_gc.py | {
"start": 305,
"end": 4588
} | class ____:
"""
A mock timer producing random (but monotonic) values.
"""
def __init__(self):
self.last = 0.0
self.timings = []
self.durations = ([], [])
self.i_durations = itertools.cycle((0, 1))
self.random = random.Random(42)
def __call__(self):
dt = self.random.expovariate(1.0)
self.last += dt
self.timings.append(self.last)
self.durations[next(self.i_durations)].append(dt)
return self.last
def test_fractional_timer():
N = 10
def check_fraction(timer, ft):
# The running fraction should be approximately equal to the
# sum of last N "measurement" intervals over the sum of last
# 2N intervals (not 2N - 1 or 2N + 1)
actual = ft.running_fraction
expected = sum(timer.durations[1][-N:]) / (
sum(timer.durations[0][-N:] + timer.durations[1][-N:])
)
assert actual == pytest.approx(expected)
timer = RandomTimer()
ft = FractionalTimer(n_samples=N, timer=timer)
assert ft.duration_total == 0
for _ in range(N):
ft.start_timing()
ft.stop_timing()
expected_total = sum(ft._durations)
assert ft.duration_total == pytest.approx(expected_total / ft.MULT)
assert len(timer.timings) == N * 2
assert ft.running_fraction is None
assert ft.duration_total > 0
ft.start_timing()
ft.stop_timing()
expected_total += ft._durations[-1]
assert ft.duration_total == pytest.approx(expected_total / ft.MULT)
assert len(timer.timings) == (N + 1) * 2
assert ft.running_fraction is not None
check_fraction(timer, ft)
for _ in range(N * 10):
ft.start_timing()
ft.stop_timing()
check_fraction(timer, ft)
@contextlib.contextmanager
def enable_gc_diagnosis_and_log(diag, level="INFO"):
disable_gc_diagnosis(force=True) # just in case
if gc.callbacks:
print("Unexpected gc.callbacks", gc.callbacks)
with captured_logger("distributed.gc", level=level, propagate=False) as sio:
gc.disable()
gc.collect() # drain any leftover from previous tests
diag.enable()
try:
yield sio
finally:
diag.disable()
gc.enable()
# @pytest.mark.slow
def test_gc_diagnosis_cpu_time():
diag = GCDiagnosis(info_over_frac=0.75)
diag.N_SAMPLES = 3 # shorten tests
with enable_gc_diagnosis_and_log(diag, level="INFO") as sio:
# Spend some CPU time doing only full GCs
for _ in range(diag.N_SAMPLES):
gc.collect()
assert not sio.getvalue()
gc.collect()
gc.collect()
lines = sio.getvalue().splitlines()
assert len(lines) == 1
# Between 80% and 100%
assert re.match(
r"full garbage collections took (100|[89][0-9])% " r"CPU time recently",
lines[0],
)
with enable_gc_diagnosis_and_log(diag, level="INFO") as sio:
# Spend half the CPU time doing full GCs
for _ in range(diag.N_SAMPLES + 1):
t1 = thread_time()
gc.collect()
dt = thread_time() - t1
run_for(dt, timer=thread_time)
# Less than 75% so nothing printed
assert not sio.getvalue()
@pytest.mark.xfail(reason="flaky and re-fails on rerun")
def test_gc_diagnosis_rss_win():
diag = GCDiagnosis(info_over_rss_win=10e6)
def make_refcycle(nbytes):
l = [b"x" * nbytes]
l.append(l)
return
with enable_gc_diagnosis_and_log(diag) as sio:
make_refcycle(100 * 1024)
gc.collect()
# Too small, nothing printed
assert not sio.getvalue()
# NOTE: need to allocate a very large value to make sure RSS
# really shrinks (depending on the system memory allocator,
# "small" memory deallocations may keep the memory in the pool)
make_refcycle(200 * 1024 * 1024)
gc.collect()
lines = sio.getvalue().splitlines()
assert len(lines) == 1
# Several MB released, and at least 1 reference cycles
assert re.match(
r"full garbage collection released [\d\.]+ MB "
r"from [1-9]\d* reference cycles",
lines[0],
)
| RandomTimer |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 42168,
"end": 42649
} | class ____(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ("FIELD",)
| E10 |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 4779,
"end": 5240
} | class ____(list):
# This list is used to get a reference to the sub-estimators, which are not
# necessarily stored on the metaestimator. We need to override __deepcopy__
# because the sub-estimators are probably cloned, which would result in a
# new copy of the list, but we need copy and deep copy both to return the
# same instance.
def __deepcopy__(self, memo):
return self
def __copy__(self):
return self
| _Registry |
python | modin-project__modin | asv_bench/benchmarks/io/parquet.py | {
"start": 923,
"end": 1776
} | class ____:
shapes = get_benchmark_shapes("TimeReadParquet")
data_type = "str_int"
param_names = ["shape"]
params = [
shapes,
]
# test data file should be created only once
def setup_cache(self, test_filename="io_test_file"):
test_filenames = prepare_io_data_parquet(
test_filename, self.data_type, get_benchmark_shapes(self.__class__.__name__)
)
return test_filenames
def setup(self, test_filenames, shape):
# ray init
if ASV_USE_IMPL == "modin":
IMPL.DataFrame([])
self.shape_id = get_shape_id(shape)
def time_read_parquet(self, test_filenames, shape):
execute(
IMPL.read_parquet(
test_filenames[self.shape_id],
)
)
from ..utils import setup # noqa: E402, F401
| TimeReadParquet |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 4847,
"end": 4960
} | class ____:
def _session_callback_added(self, event: SessionCallbackAdded) -> None: ...
| SessionCallbackAddedMixin |
python | coleifer__peewee | tests/fields.py | {
"start": 1362,
"end": 1614
} | class ____(TestModel):
F_STICKY = 1
F_FAVORITE = 2
F_MINIMIZED = 4
flags = BitField()
is_sticky = flags.flag(F_STICKY)
is_favorite = flags.flag(F_FAVORITE)
is_minimized = flags.flag(F_MINIMIZED)
data = BigBitField()
| Bits |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 57673,
"end": 59528
} | class ____(GenericOrchestrationRule):
"""
Prevents transitions to PENDING.
This rule is only used for flow runs.
This is intended to prevent race conditions during duplicate submissions of runs.
Before a run is submitted to its execution environment, it should be placed in a
PENDING state. If two workers attempt to submit the same run, one of them should
encounter a PENDING -> PENDING transition and abort orchestration of the run.
Similarly, if the execution environment starts quickly the run may be in a RUNNING
state when the second worker attempts the PENDING transition. We deny these state
changes as well to prevent duplicate submission. If a run has transitioned to a
RUNNING state a worker should not attempt to submit it again unless it has moved
into a terminal state.
CANCELLING and CANCELLED runs should not be allowed to transition to PENDING.
For re-runs of deployed runs, they should transition to SCHEDULED first.
For re-runs of ad-hoc runs, they should transition directly to RUNNING.
"""
FROM_STATES = {
StateType.PENDING,
StateType.CANCELLING,
StateType.RUNNING,
StateType.CANCELLED,
}
TO_STATES = {StateType.PENDING}
async def before_transition(
self,
initial_state: states.State[Any] | None,
proposed_state: states.State[Any] | None,
context: OrchestrationContext[
orm_models.Run, Union[core.FlowRunPolicy, core.TaskRunPolicy]
],
) -> None:
if initial_state is None or proposed_state is None:
return
await self.abort_transition(
reason=(
f"This run is in a {initial_state.type.name} state and cannot"
" transition to a PENDING state."
)
)
| PreventPendingTransitions |
python | ray-project__ray | python/ray/llm/tests/serve/gpu/deployments/llm/prefill_decode_disagg/test_prefill_decode_disagg_gpu.py | {
"start": 225,
"end": 1267
} | class ____:
"""Test vLLM engine under PD disagg."""
@pytest.mark.asyncio
@pytest.mark.parametrize("kv_connector", ["NixlConnector", "LMCacheConnectorV1"])
async def test_pd_disagg_vllm_engine(
self,
# llm_config is a fixture defined in serve.tests.conftest.py
llm_config: LLMConfig,
kv_connector: str,
monkeypatch,
):
"""Test vLLM engine under PD disagg."""
if kv_connector == "LMCacheConnectorV1":
lmcache_mock = MagicMock()
monkeypatch.setitem(sys.modules, "lmcache", lmcache_mock)
llm_config = llm_config.model_copy(deep=True)
llm_config.engine_kwargs.update(
{
"kv_transfer_config": dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
}
)
vllm_engine = VLLMEngine(llm_config)
assert vllm_engine is not None
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestPDDisaggVLLMEngine |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py | {
"start": 2059,
"end": 29842
} | class ____(FunctionCallingLLM):
"""
Ollama LLM.
Visit https://ollama.com/ to download and install Ollama.
Run `ollama serve` to start a server.
Run `ollama pull <name>` to download a model to run.
Examples:
`pip install llama-index-llms-ollama`
```python
from llama_index.llms.ollama import Ollama
llm = Ollama(model="llama2", request_timeout=60.0)
response = llm.complete("What is the capital of France?")
print(response)
```
"""
base_url: str = Field(
default="http://localhost:11434",
description="Base url the model is hosted under.",
)
model: str = Field(description="The Ollama model to use.")
temperature: Optional[float] = Field(
default=None,
description="The temperature to use for sampling.",
)
context_window: int = Field(
default=-1,
description="The maximum number of context tokens for the model.",
)
request_timeout: float = Field(
default=DEFAULT_REQUEST_TIMEOUT,
description="The timeout for making http request to Ollama API server",
)
prompt_key: str = Field(
default="prompt", description="The key to use for the prompt in API calls."
)
json_mode: bool = Field(
default=False,
description="Whether to use JSON mode for the Ollama API.",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional model parameters for the Ollama API.",
)
is_function_calling_model: bool = Field(
default=True,
description="Whether the model is a function calling model.",
)
keep_alive: Optional[Union[float, str]] = Field(
default="5m",
description="controls how long the model will stay loaded into memory following the request(default: 5m)",
)
thinking: Optional[bool] = Field(
default=None,
description="Whether to enable or disable thinking in the model.",
)
_client: Optional[Client] = PrivateAttr()
_async_client: Optional[AsyncClient] = PrivateAttr()
def __init__(
self,
model: str,
base_url: str = "http://localhost:11434",
temperature: Optional[float] = None,
context_window: int = -1,
request_timeout: Optional[float] = DEFAULT_REQUEST_TIMEOUT,
prompt_key: str = "prompt",
json_mode: bool = False,
additional_kwargs: Optional[Dict[str, Any]] = None,
client: Optional[Client] = None,
async_client: Optional[AsyncClient] = None,
is_function_calling_model: bool = True,
keep_alive: Optional[Union[float, str]] = None,
thinking: Optional[bool] = None,
**kwargs: Any,
) -> None:
super().__init__(
model=model,
base_url=base_url,
temperature=temperature,
context_window=context_window,
request_timeout=request_timeout,
prompt_key=prompt_key,
json_mode=json_mode,
additional_kwargs=additional_kwargs or {},
is_function_calling_model=is_function_calling_model,
keep_alive=keep_alive,
thinking=thinking,
**kwargs,
)
self._client = client
self._async_client = async_client
@classmethod
def class_name(cls) -> str:
return "Ollama_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.get_context_window(),
num_output=DEFAULT_NUM_OUTPUTS,
model_name=self.model,
is_chat_model=True, # Ollama supports chat API for all models
# TODO: Detect if selected model is a function calling model?
is_function_calling_model=self.is_function_calling_model,
)
@property
def client(self) -> Client:
if self._client is None:
self._client = Client(host=self.base_url, timeout=self.request_timeout)
return self._client
@property
def async_client(self) -> AsyncClient:
if self._async_client is None:
self._async_client = AsyncClient(
host=self.base_url, timeout=self.request_timeout
)
return self._async_client
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"num_ctx": self.get_context_window(),
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def get_context_window(self) -> int:
if self.context_window == -1:
# Try to get the context window from the model info if not set
info = self.client.show(self.model).modelinfo
for key, value in info.items():
if "context_length" in key:
self.context_window = int(value)
break
# If the context window is still -1, use the default context window
return (
self.context_window if self.context_window != -1 else DEFAULT_CONTEXT_WINDOW
)
def _convert_to_ollama_messages(self, messages: Sequence[ChatMessage]) -> Dict:
ollama_messages = []
unique_tool_calls = []
for message in messages:
cur_ollama_message = {
"role": message.role.value,
"content": "",
}
for block in message.blocks:
if isinstance(block, TextBlock):
cur_ollama_message["content"] += block.text
elif isinstance(block, ImageBlock):
if "images" not in cur_ollama_message:
cur_ollama_message["images"] = []
cur_ollama_message["images"].append(
block.resolve_image(as_base64=True).read().decode("utf-8")
)
elif isinstance(block, ThinkingBlock):
if block.content:
cur_ollama_message["thinking"] = block.content
elif isinstance(block, ToolCallBlock):
if "tool_calls" not in cur_ollama_message:
cur_ollama_message["tool_calls"] = [
{
"function": {
"name": block.tool_name,
"arguments": block.tool_kwargs,
}
}
]
else:
cur_ollama_message["tool_calls"].extend(
[
{
"function": {
"name": block.tool_name,
"arguments": block.tool_kwargs,
}
}
]
)
unique_tool_calls.append((block.tool_name, str(block.tool_kwargs)))
else:
raise ValueError(f"Unsupported block type: {type(block)}")
# keep this code for compatibility with older chat histories
if "tool_calls" in message.additional_kwargs:
if (
"tool_calls" not in cur_ollama_message
or cur_ollama_message["tool_calls"] == []
):
cur_ollama_message["tool_calls"] = message.additional_kwargs[
"tool_calls"
]
else:
for tool_call in message.additional_kwargs["tool_calls"]:
if (
tool_call.get("name", ""),
str(tool_call.get("arguments", {})),
) not in unique_tool_calls:
cur_ollama_message["tool_calls"].append(tool_call)
ollama_messages.append(cur_ollama_message)
return ollama_messages
def _get_response_token_counts(self, raw_response: dict) -> dict:
"""Get the token usage reported by the response."""
try:
prompt_tokens = raw_response["prompt_eval_count"]
completion_tokens = raw_response["eval_count"]
total_tokens = prompt_tokens + completion_tokens
except KeyError:
return {}
except TypeError:
return {}
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": total_tokens,
}
def _prepare_chat_with_tools(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False, # doesn't appear to be supported by Ollama
tool_required: bool = False, # not yet supported https://github.com/ollama/ollama/blob/main/docs/openai.md#supported-request-fields
**kwargs: Any,
) -> Dict[str, Any]:
tool_specs = [
tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools
]
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
return {
"messages": messages,
"tools": tool_specs or None,
}
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: List["BaseTool"],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = [
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
argument_dict = tool_call.tool_kwargs
tool_selections.append(
ToolSelection(
# tool ids not provided by Ollama
tool_id=tool_call.tool_name,
tool_name=tool_call.tool_name,
tool_kwargs=cast(Dict[str, Any], argument_dict),
)
)
return tool_selections
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
ollama_messages = self._convert_to_ollama_messages(messages)
tools = kwargs.pop("tools", None)
think = kwargs.pop("think", None) or self.thinking
format = kwargs.pop("format", "json" if self.json_mode else None)
response = self.client.chat(
model=self.model,
messages=ollama_messages,
stream=False,
format=format,
tools=tools,
think=think,
options=self._model_kwargs,
keep_alive=self.keep_alive,
)
response = dict(response)
blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = []
tool_calls = response["message"].get("tool_calls", []) or []
thinking = response["message"].get("thinking", None)
if thinking:
blocks.append(ThinkingBlock(content=thinking))
blocks.append(TextBlock(text=response["message"].get("content", "")))
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_name=str(tool_call.get("function", {}).get("name", "")),
tool_kwargs=tool_call.get("function", {}).get("arguments", {}),
)
)
token_counts = self._get_response_token_counts(response)
if token_counts:
response["usage"] = token_counts
return ChatResponse(
message=ChatMessage(
blocks=blocks,
role=response["message"].get("role", MessageRole.ASSISTANT),
),
raw=response,
)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
ollama_messages = self._convert_to_ollama_messages(messages)
tools = kwargs.pop("tools", None)
think = kwargs.pop("think", None) or self.thinking
format = kwargs.pop("format", "json" if self.json_mode else None)
def gen() -> ChatResponseGen:
response = self.client.chat(
model=self.model,
messages=ollama_messages,
stream=True,
format=format,
tools=tools,
think=think,
options=self._model_kwargs,
keep_alive=self.keep_alive,
)
response_txt = ""
thinking_txt = ""
seen_tool_calls = set()
all_tool_calls = []
for r in response:
if r["message"]["content"] is None:
continue
r = dict(r)
response_txt += r["message"].get("content", "") or ""
thinking_txt += r["message"].get("thinking", "") or ""
new_tool_calls = [dict(t) for t in r["message"].get("tool_calls") or []]
for tool_call in new_tool_calls:
if (
str(tool_call["function"]["name"]),
str(tool_call["function"]["arguments"]),
) in seen_tool_calls:
continue
seen_tool_calls.add(
(
str(tool_call["function"]["name"]),
str(tool_call["function"]["arguments"]),
)
)
all_tool_calls.append(tool_call)
token_counts = self._get_response_token_counts(r)
if token_counts:
r["usage"] = token_counts
output_blocks: List[ToolCallBlock | ThinkingBlock | TextBlock] = [
TextBlock(text=response_txt)
]
if thinking_txt:
output_blocks.insert(0, ThinkingBlock(content=thinking_txt))
if all_tool_calls:
for tool_call in all_tool_calls:
output_blocks.append(
ToolCallBlock(
tool_name=tool_call.get("function", {}).get("name", ""),
tool_kwargs=tool_call.get("function", {}).get(
"arguments", {}
),
)
)
yield ChatResponse(
message=ChatMessage(
blocks=output_blocks,
role=r["message"].get("role", MessageRole.ASSISTANT),
),
delta=r["message"].get("content", ""),
raw=r,
additional_kwargs={
"thinking_delta": r["message"].get("thinking", None),
},
)
return gen()
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
ollama_messages = self._convert_to_ollama_messages(messages)
tools = kwargs.pop("tools", None)
think = kwargs.pop("think", None) or self.thinking
format = kwargs.pop("format", "json" if self.json_mode else None)
async def gen() -> ChatResponseAsyncGen:
response = await self.async_client.chat(
model=self.model,
messages=ollama_messages,
stream=True,
format=format,
tools=tools,
think=think,
options=self._model_kwargs,
keep_alive=self.keep_alive,
)
response_txt = ""
thinking_txt = ""
seen_tool_calls = set()
all_tool_calls = []
async for r in response:
if r["message"]["content"] is None:
continue
r = dict(r)
response_txt += r["message"].get("content", "") or ""
thinking_txt += r["message"].get("thinking", "") or ""
new_tool_calls = [dict(t) for t in r["message"].get("tool_calls") or []]
for tool_call in new_tool_calls:
if (
str(tool_call["function"]["name"]),
str(tool_call["function"]["arguments"]),
) in seen_tool_calls:
continue
seen_tool_calls.add(
(
str(tool_call["function"]["name"]),
str(tool_call["function"]["arguments"]),
)
)
all_tool_calls.append(tool_call)
token_counts = self._get_response_token_counts(r)
if token_counts:
r["usage"] = token_counts
output_blocks: List[ThinkingBlock | ToolCallBlock | TextBlock] = [
TextBlock(text=response_txt)
]
if thinking_txt:
output_blocks.insert(0, ThinkingBlock(content=thinking_txt))
if all_tool_calls:
for tool_call in all_tool_calls:
output_blocks.append(
ToolCallBlock(
tool_name=tool_call.get("function", {}).get("name", ""),
tool_kwargs=tool_call.get("function", {}).get(
"arguments", {}
),
)
)
yield ChatResponse(
message=ChatMessage(
blocks=output_blocks,
role=r["message"].get("role", MessageRole.ASSISTANT),
),
delta=r["message"].get("content", ""),
raw=r,
additional_kwargs={
"thinking_delta": r["message"].get("thinking", None),
},
)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
ollama_messages = self._convert_to_ollama_messages(messages)
tools = kwargs.pop("tools", None)
think = kwargs.pop("think", None) or self.thinking
format = kwargs.pop("format", "json" if self.json_mode else None)
response = await self.async_client.chat(
model=self.model,
messages=ollama_messages,
stream=False,
format=format,
tools=tools,
think=think,
options=self._model_kwargs,
keep_alive=self.keep_alive,
)
response = dict(response)
blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = []
tool_calls = response["message"].get("tool_calls", []) or []
thinking = response["message"].get("thinking", None)
if thinking:
blocks.append(ThinkingBlock(content=thinking))
blocks.append(TextBlock(text=response["message"].get("content", "")))
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_name=tool_call.get("function", {}).get("name", ""),
tool_kwargs=tool_call.get("function", {}).get("arguments", {}),
)
)
token_counts = self._get_response_token_counts(response)
if token_counts:
response["usage"] = token_counts
return ChatResponse(
message=ChatMessage(
blocks=blocks,
role=response["message"].get("role", MessageRole.ASSISTANT),
),
raw=response,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return chat_to_completion_decorator(self.chat)(prompt, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return await achat_to_completion_decorator(self.achat)(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
return stream_chat_to_completion_decorator(self.stream_chat)(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
return await astream_chat_to_completion_decorator(self.astream_chat)(
prompt, **kwargs
)
@dispatcher.span
def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
if self.pydantic_program_mode == PydanticProgramMode.DEFAULT:
llm_kwargs = llm_kwargs or {}
llm_kwargs["format"] = output_cls.model_json_schema()
messages = prompt.format_messages(**prompt_args)
response = self.chat(messages, **llm_kwargs)
return output_cls.model_validate_json(response.message.content or "")
else:
return super().structured_predict(
output_cls, prompt, llm_kwargs, **prompt_args
)
@dispatcher.span
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
if self.pydantic_program_mode == PydanticProgramMode.DEFAULT:
llm_kwargs = llm_kwargs or {}
llm_kwargs["format"] = output_cls.model_json_schema()
messages = prompt.format_messages(**prompt_args)
response = await self.achat(messages, **llm_kwargs)
return output_cls.model_validate_json(response.message.content or "")
else:
return await super().astructured_predict(
output_cls, prompt, llm_kwargs, **prompt_args
)
@dispatcher.span
def stream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Generator[Union[Model, FlexibleModel], None, None]:
"""
Stream structured predictions as they are generated.
Args:
output_cls: The Pydantic class to parse responses into
prompt: The prompt template to use
llm_kwargs: Optional kwargs for the LLM
**prompt_args: Args to format the prompt with
Returns:
Generator yielding partial objects as they are generated
"""
if self.pydantic_program_mode == PydanticProgramMode.DEFAULT:
def gen(
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Dict[str, Any],
prompt_args: Dict[str, Any],
) -> Generator[Union[Model, FlexibleModel], None, None]:
llm_kwargs = llm_kwargs or {}
llm_kwargs["format"] = output_cls.model_json_schema()
messages = prompt.format_messages(**prompt_args)
response_gen = self.stream_chat(messages, **llm_kwargs)
cur_objects = None
for response in response_gen:
try:
objects = process_streaming_objects(
response,
output_cls,
cur_objects=cur_objects,
allow_parallel_tool_calls=False,
flexible_mode=True,
)
cur_objects = (
objects if isinstance(objects, list) else [objects]
)
yield objects
except Exception:
continue
return gen(output_cls, prompt, llm_kwargs, prompt_args)
else:
return super().stream_structured_predict(
output_cls, prompt, llm_kwargs, **prompt_args
)
@dispatcher.span
async def astream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> AsyncGenerator[Union[Model, FlexibleModel], None]:
"""Async version of stream_structured_predict."""
if self.pydantic_program_mode == PydanticProgramMode.DEFAULT:
async def gen(
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Dict[str, Any],
prompt_args: Dict[str, Any],
) -> AsyncGenerator[Union[Model, FlexibleModel], None]:
llm_kwargs = llm_kwargs or {}
llm_kwargs["format"] = output_cls.model_json_schema()
messages = prompt.format_messages(**prompt_args)
response_gen = await self.astream_chat(messages, **llm_kwargs)
cur_objects = None
async for response in response_gen:
try:
objects = process_streaming_objects(
response,
output_cls,
cur_objects=cur_objects,
allow_parallel_tool_calls=False,
flexible_mode=True,
)
cur_objects = (
objects if isinstance(objects, list) else [objects]
)
yield objects
except Exception:
continue
return gen(output_cls, prompt, llm_kwargs, prompt_args)
else:
# Fall back to non-streaming structured predict
return await super().astream_structured_predict(
output_cls, prompt, llm_kwargs, **prompt_args
)
| Ollama |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride6.py | {
"start": 721,
"end": 929
} | class ____(Parent1[str]):
@overload
def m1(self, x: bool) -> int: ...
@overload
def m1(self, x: str) -> str: ...
def m1(self, x: bool | str) -> int | float | str:
return x
| Child1_3 |
python | scrapy__scrapy | tests/test_crawler.py | {
"start": 35544,
"end": 36908
} | class ____(TestCrawlerProcessSubprocessBase):
@property
def script_dir(self) -> Path:
return self.get_script_dir("AsyncCrawlerProcess")
def test_twisted_reactor_custom_settings_select(self):
log = self.run_script("twisted_reactor_custom_settings_select.py")
assert "Spider closed (finished)" not in log
assert (
"(twisted.internet.asyncioreactor.AsyncioSelectorReactor) "
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
) in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_same_loop(self):
log = self.run_script("asyncio_custom_loop_custom_settings_same.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_different_loop(self):
log = self.run_script("asyncio_custom_loop_custom_settings_different.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the one specified in the ASYNCIO_EVENT_LOOP "
"setting (uvloop.Loop)"
) in log
| TestAsyncCrawlerProcessSubprocess |
python | streamlit__streamlit | lib/tests/streamlit/elements/exception_test.py | {
"start": 12056,
"end": 12655
} | class ____(unittest.TestCase):
@parameterized.expand(
[
(["a", "b", "c", "-", "d", "e"], 3),
(["-", "a", "b", "c", "d", "e"], 0),
(["a", "b", "c", "d", "e", "-"], 5),
(["a", "b", "c", "d", "e", "f"], 100),
(["a", "-", "c", "d", "-", "f"], 1),
([], 100),
]
)
def test_split_list(self, input_list, split_index):
before, after = _split_list(input_list, split_point=lambda x: x == "-")
assert before == input_list[:split_index]
assert after == input_list[split_index:]
| SplitListTest |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 618707,
"end": 619076
} | class ____(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
| ModuleRefNode |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_coordinator.py | {
"start": 2131,
"end": 3336
} | class ____(object):
"""A reusable barrier class for worker synchronization."""
def __init__(self, num_participants):
"""Initializes the barrier object.
Args:
num_participants: an integer which is the expected number of calls of
`wait` pass to through this barrier.
"""
self._num_participants = num_participants
self._counter = 0
self._flag = False
self._local_sense = threading.local()
self._lock = threading.Lock()
self._condition = threading.Condition()
def wait(self):
"""Waits until all other callers reach the same wait call."""
self._local_sense.value = not self._flag
with self._lock:
self._counter += 1
if self._counter == self._num_participants:
self._counter = 0
self._flag = self._local_sense.value
with self._condition:
while self._flag != self._local_sense.value:
self._condition.wait()
self._condition.notify_all()
def _get_num_workers(cluster_spec):
"""Gets number of workers including chief."""
if not cluster_spec:
return 0
return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(
cluster_spec.as_dict().get(_TaskType.CHIEF, []))
| _Barrier |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol6.py | {
"start": 538,
"end": 602
} | class ____:
species: str
attributes: list[bytes]
| Armadillo |
python | plotly__plotly.py | plotly/graph_objs/parcats/_labelfont.py | {
"start": 233,
"end": 9886
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcats"
_path_str = "parcats.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font for the `dimension` labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcats.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super().__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcats.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcats.Labelfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Labelfont |
python | PrefectHQ__prefect | tests/server/models/test_block_documents.py | {
"start": 59698,
"end": 67867
} | class ____:
@pytest.fixture()
async def secret_block_type_and_schema(self, session):
class SecretBlockC(Block):
w: SecretDict
x: SecretStr
y: SecretBytes
z: str
secret_block_type = await models.block_types.create_block_type(
session=session, block_type=SecretBlockC._to_block_type()
)
secret_block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=SecretBlockC._to_block_schema(
block_type_id=secret_block_type.id
),
)
await session.commit()
return secret_block_type, secret_block_schema
@pytest.fixture()
async def secret_block_document(self, session, secret_block_type_and_schema):
secret_block_type, secret_block_schema = secret_block_type_and_schema
block = await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
name="secret-block",
data=dict(w={"secret": W}, x=X, y=Y, z=Z),
block_type_id=secret_block_type.id,
block_schema_id=secret_block_schema.id,
),
)
await session.commit()
return block
async def test_create_secret_block_document_obfuscates_results(
self, session, secret_block_type_and_schema
):
secret_block_type, secret_block_schema = secret_block_type_and_schema
block = await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
name="secret-block",
data=dict(w={"secret": W}, x=X, y=Y, z=Z),
block_type_id=secret_block_type.id,
block_schema_id=secret_block_schema.id,
),
)
assert block.data["w"] == {"secret": obfuscate_string(W)}
assert block.data["x"] == obfuscate_string(X)
assert block.data["y"] == obfuscate_string(Y)
assert block.data["z"] == Z
async def test_read_secret_block_document_by_id_obfuscates_results(
self, session, secret_block_document
):
block = await models.block_documents.read_block_document_by_id(
session=session, block_document_id=secret_block_document.id
)
assert block.data["w"] == {"secret": obfuscate_string(W)}
assert block.data["x"] == obfuscate_string(X)
assert block.data["y"] == obfuscate_string(Y)
assert block.data["z"] == Z
async def test_read_secret_block_document_by_id_with_secrets(
self, session, secret_block_document
):
block = await models.block_documents.read_block_document_by_id(
session=session,
block_document_id=secret_block_document.id,
include_secrets=True,
)
assert block.data["w"] == {"secret": W}
assert block.data["x"] == X
assert block.data["y"] == Y
assert block.data["z"] == Z
async def test_read_secret_block_document_by_name_obfuscates_results(
self, session, secret_block_document
):
block = await models.block_documents.read_block_document_by_name(
session=session,
name=secret_block_document.name,
block_type_slug=secret_block_document.block_type.slug,
)
assert block.data["w"] == {"secret": obfuscate_string(W)}
assert block.data["x"] == obfuscate_string(X)
assert block.data["y"] == obfuscate_string(Y)
assert block.data["z"] == Z
async def test_read_secret_block_document_by_name_with_secrets(
self, session, secret_block_document
):
block = await models.block_documents.read_block_document_by_name(
session=session,
name=secret_block_document.name,
block_type_slug=secret_block_document.block_type.slug,
include_secrets=True,
)
assert block.data["w"] == {"secret": W}
assert block.data["x"] == X
assert block.data["y"] == Y
assert block.data["z"] == Z
async def test_read_secret_block_documents_obfuscates_results(
self, session, secret_block_document
):
blocks = await models.block_documents.read_block_documents(
session=session,
block_document_filter=schemas.filters.BlockDocumentFilter(
block_type_id=dict(any_=[secret_block_document.block_type_id])
),
)
assert len(blocks) == 1
assert blocks[0].data["w"] == {"secret": obfuscate_string(W)}
assert blocks[0].data["x"] == obfuscate_string(X)
assert blocks[0].data["y"] == obfuscate_string(Y)
assert blocks[0].data["z"] == Z
async def test_read_secret_block_documents_with_secrets(
self, session, secret_block_document
):
blocks = await models.block_documents.read_block_documents(
session=session,
block_document_filter=schemas.filters.BlockDocumentFilter(
block_type_id=dict(any_=[secret_block_document.block_type_id])
),
include_secrets=True,
)
assert len(blocks) == 1
assert blocks[0].data["w"] == {"secret": W}
assert blocks[0].data["x"] == X
assert blocks[0].data["y"] == Y
assert blocks[0].data["z"] == Z
@pytest.mark.parametrize(
"merge_existing_data",
[True, False],
)
async def test_updating_secret_block_document_with_obfuscated_result_is_ignored(
self, session, secret_block_document, merge_existing_data
):
block = await models.block_documents.read_block_document_by_id(
session=session,
block_document_id=secret_block_document.id,
include_secrets=False,
)
assert block.data["x"] == obfuscate_string(X)
# set X to the secret value
await models.block_documents.update_block_document(
session=session,
block_document_id=secret_block_document.id,
block_document=schemas.actions.BlockDocumentUpdate(
data=dict(x=obfuscate_string(X)),
merge_existing_data=merge_existing_data,
),
)
block2 = await models.block_documents.read_block_document_by_id(
session=session,
block_document_id=secret_block_document.id,
include_secrets=True,
)
# x was NOT overwritten
assert block2.data["x"] != obfuscate_string(X)
async def test_block_with_list_of_secrets(self, session, prefect_client):
class ListSecretBlock(Block):
x: List[SecretStr]
# save the block
orig_block = ListSecretBlock(x=["a", "b"])
await orig_block.save(name="list-secret", client=prefect_client)
# load the block
block = await ListSecretBlock.load("list-secret", client=prefect_client)
assert block.x[0].get_secret_value() == "a"
assert block.x[1].get_secret_value() == "b"
assert block.x[1].get_secret_value() == "b"
async def test_block_with_list_in_secret_dict(
self, session, secret_block_type_and_schema
):
secret_block_type, secret_block_schema = secret_block_type_and_schema
block = await models.block_documents.create_block_document(
session=session,
block_document=schemas.actions.BlockDocumentCreate(
name="secret-block",
data=dict(w={"secret": [W, W]}, x=X, y=Y, z=Z),
block_type_id=secret_block_type.id,
block_schema_id=secret_block_schema.id,
),
)
assert block.data["w"] == {"secret": obfuscate([W, W])}
block = await models.block_documents.read_block_document_by_name(
session=session,
name="secret-block",
block_type_slug=secret_block_type.slug,
include_secrets=True,
)
assert block.data["w"] == {"secret": [W, W]}
| TestSecretBlockDocuments |
python | readthedocs__readthedocs.org | readthedocs/projects/querysets.py | {
"start": 6600,
"end": 6690
} | class ____(SettingsOverrideObject):
_default_class = ProjectQuerySetBase
| ProjectQuerySet |
python | pytorch__pytorch | torch/_inductor/ops_handler.py | {
"start": 26139,
"end": 27485
} | class ____:
@staticmethod
def add(a, b):
return f"{a} + {b}"
@staticmethod
def sub(a, b):
return f"{a} - {b}"
@staticmethod
def mul(a, b):
return f"{a} * {b}"
@staticmethod
def floordiv(a, b):
return f"{a} // {b}"
@staticmethod
def truediv(a, b):
return f"{a} / {b}"
@staticmethod
def mod(a, b):
# careful, depending on target semantics varies
return f"{a} % {b}"
@staticmethod
def pow(a, b):
return f"{a} ** {b}"
@staticmethod
def lshift(a, b):
return f"{a} << {b}"
@staticmethod
def rshift(a, b):
return f"{a} >> {b}"
@staticmethod
def and_(a, b):
return f"{a} & {b}"
@staticmethod
def or_(a, b):
return f"{a} | {b}"
@staticmethod
def xor(a, b):
return f"{a} ^ {b}"
@staticmethod
def eq(a, b):
return f"{a} == {b}"
@staticmethod
def ne(a, b):
return f"{a} != {b}"
@staticmethod
def lt(a, b):
return f"{a} < {b}"
@staticmethod
def gt(a, b):
return f"{a} > {b}"
@staticmethod
def le(a, b):
return f"{a} <= {b}"
@staticmethod
def ge(a, b):
return f"{a} >= {b}"
@staticmethod
def neg(a):
return f"-{a}"
| BasicMathOpsMixin |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 2873,
"end": 22753
} | class ____(Generic[_InT, _OutT]):
"""Abstract representation of a chain of validation, transformation, and parsing steps."""
_steps: tuple[_Step, ...]
def transform(
self,
func: Callable[[_OutT], _NewOutT],
) -> _Pipeline[_InT, _NewOutT]:
"""Transform the output of the previous step.
If used as the first step in a pipeline, the type of the field is used.
That is, the transformation is applied to after the value is parsed to the field's type.
"""
return _Pipeline[_InT, _NewOutT](self._steps + (_Transform(func),))
@overload
def validate_as(self, tp: type[_NewOutT], *, strict: bool = ...) -> _Pipeline[_InT, _NewOutT]: ...
@overload
def validate_as(self, tp: EllipsisType, *, strict: bool = ...) -> _Pipeline[_InT, Any]: # type: ignore
...
def validate_as(self, tp: type[_NewOutT] | EllipsisType, *, strict: bool = False) -> _Pipeline[_InT, Any]: # type: ignore
"""Validate / parse the input into a new type.
If no type is provided, the type of the field is used.
Types are parsed in Pydantic's `lax` mode by default,
but you can enable `strict` mode by passing `strict=True`.
"""
if isinstance(tp, EllipsisType):
return _Pipeline[_InT, Any](self._steps + (_ValidateAs(_FieldTypeMarker, strict=strict),))
return _Pipeline[_InT, _NewOutT](self._steps + (_ValidateAs(tp, strict=strict),))
def validate_as_deferred(self, func: Callable[[], type[_NewOutT]]) -> _Pipeline[_InT, _NewOutT]:
"""Parse the input into a new type, deferring resolution of the type until the current class
is fully defined.
This is useful when you need to reference the class in it's own type annotations.
"""
return _Pipeline[_InT, _NewOutT](self._steps + (_ValidateAsDefer(func),))
# constraints
@overload
def constrain(self: _Pipeline[_InT, _NewOutGe], constraint: annotated_types.Ge) -> _Pipeline[_InT, _NewOutGe]: ...
@overload
def constrain(self: _Pipeline[_InT, _NewOutGt], constraint: annotated_types.Gt) -> _Pipeline[_InT, _NewOutGt]: ...
@overload
def constrain(self: _Pipeline[_InT, _NewOutLe], constraint: annotated_types.Le) -> _Pipeline[_InT, _NewOutLe]: ...
@overload
def constrain(self: _Pipeline[_InT, _NewOutLt], constraint: annotated_types.Lt) -> _Pipeline[_InT, _NewOutLt]: ...
@overload
def constrain(
self: _Pipeline[_InT, _NewOutLen], constraint: annotated_types.Len
) -> _Pipeline[_InT, _NewOutLen]: ...
@overload
def constrain(
self: _Pipeline[_InT, _NewOutT], constraint: annotated_types.MultipleOf
) -> _Pipeline[_InT, _NewOutT]: ...
@overload
def constrain(
self: _Pipeline[_InT, _NewOutDatetime], constraint: annotated_types.Timezone
) -> _Pipeline[_InT, _NewOutDatetime]: ...
@overload
def constrain(self: _Pipeline[_InT, _OutT], constraint: annotated_types.Predicate) -> _Pipeline[_InT, _OutT]: ...
@overload
def constrain(
self: _Pipeline[_InT, _NewOutInterval], constraint: annotated_types.Interval
) -> _Pipeline[_InT, _NewOutInterval]: ...
@overload
def constrain(self: _Pipeline[_InT, _OutT], constraint: _Eq) -> _Pipeline[_InT, _OutT]: ...
@overload
def constrain(self: _Pipeline[_InT, _OutT], constraint: _NotEq) -> _Pipeline[_InT, _OutT]: ...
@overload
def constrain(self: _Pipeline[_InT, _OutT], constraint: _In) -> _Pipeline[_InT, _OutT]: ...
@overload
def constrain(self: _Pipeline[_InT, _OutT], constraint: _NotIn) -> _Pipeline[_InT, _OutT]: ...
@overload
def constrain(self: _Pipeline[_InT, _NewOutT], constraint: Pattern[str]) -> _Pipeline[_InT, _NewOutT]: ...
def constrain(self, constraint: _ConstraintAnnotation) -> Any:
"""Constrain a value to meet a certain condition.
We support most conditions from `annotated_types`, as well as regular expressions.
Most of the time you'll be calling a shortcut method like `gt`, `lt`, `len`, etc
so you don't need to call this directly.
"""
return _Pipeline[_InT, _OutT](self._steps + (_Constraint(constraint),))
def predicate(self: _Pipeline[_InT, _NewOutT], func: Callable[[_NewOutT], bool]) -> _Pipeline[_InT, _NewOutT]:
"""Constrain a value to meet a certain predicate."""
return self.constrain(annotated_types.Predicate(func))
def gt(self: _Pipeline[_InT, _NewOutGt], gt: _NewOutGt) -> _Pipeline[_InT, _NewOutGt]:
"""Constrain a value to be greater than a certain value."""
return self.constrain(annotated_types.Gt(gt))
def lt(self: _Pipeline[_InT, _NewOutLt], lt: _NewOutLt) -> _Pipeline[_InT, _NewOutLt]:
"""Constrain a value to be less than a certain value."""
return self.constrain(annotated_types.Lt(lt))
def ge(self: _Pipeline[_InT, _NewOutGe], ge: _NewOutGe) -> _Pipeline[_InT, _NewOutGe]:
"""Constrain a value to be greater than or equal to a certain value."""
return self.constrain(annotated_types.Ge(ge))
def le(self: _Pipeline[_InT, _NewOutLe], le: _NewOutLe) -> _Pipeline[_InT, _NewOutLe]:
"""Constrain a value to be less than or equal to a certain value."""
return self.constrain(annotated_types.Le(le))
def len(self: _Pipeline[_InT, _NewOutLen], min_len: int, max_len: int | None = None) -> _Pipeline[_InT, _NewOutLen]:
"""Constrain a value to have a certain length."""
return self.constrain(annotated_types.Len(min_len, max_len))
@overload
def multiple_of(self: _Pipeline[_InT, _NewOutDiv], multiple_of: _NewOutDiv) -> _Pipeline[_InT, _NewOutDiv]: ...
@overload
def multiple_of(self: _Pipeline[_InT, _NewOutMod], multiple_of: _NewOutMod) -> _Pipeline[_InT, _NewOutMod]: ...
def multiple_of(self: _Pipeline[_InT, Any], multiple_of: Any) -> _Pipeline[_InT, Any]:
"""Constrain a value to be a multiple of a certain number."""
return self.constrain(annotated_types.MultipleOf(multiple_of))
def eq(self: _Pipeline[_InT, _OutT], value: _OutT) -> _Pipeline[_InT, _OutT]:
"""Constrain a value to be equal to a certain value."""
return self.constrain(_Eq(value))
def not_eq(self: _Pipeline[_InT, _OutT], value: _OutT) -> _Pipeline[_InT, _OutT]:
"""Constrain a value to not be equal to a certain value."""
return self.constrain(_NotEq(value))
def in_(self: _Pipeline[_InT, _OutT], values: Container[_OutT]) -> _Pipeline[_InT, _OutT]:
"""Constrain a value to be in a certain set."""
return self.constrain(_In(values))
def not_in(self: _Pipeline[_InT, _OutT], values: Container[_OutT]) -> _Pipeline[_InT, _OutT]:
"""Constrain a value to not be in a certain set."""
return self.constrain(_NotIn(values))
# timezone methods
def datetime_tz_naive(self: _Pipeline[_InT, datetime.datetime]) -> _Pipeline[_InT, datetime.datetime]:
return self.constrain(annotated_types.Timezone(None))
def datetime_tz_aware(self: _Pipeline[_InT, datetime.datetime]) -> _Pipeline[_InT, datetime.datetime]:
return self.constrain(annotated_types.Timezone(...))
def datetime_tz(
self: _Pipeline[_InT, datetime.datetime], tz: datetime.tzinfo
) -> _Pipeline[_InT, datetime.datetime]:
return self.constrain(annotated_types.Timezone(tz)) # type: ignore
def datetime_with_tz(
self: _Pipeline[_InT, datetime.datetime], tz: datetime.tzinfo | None
) -> _Pipeline[_InT, datetime.datetime]:
return self.transform(partial(datetime.datetime.replace, tzinfo=tz))
# string methods
def str_lower(self: _Pipeline[_InT, str]) -> _Pipeline[_InT, str]:
return self.transform(str.lower)
def str_upper(self: _Pipeline[_InT, str]) -> _Pipeline[_InT, str]:
return self.transform(str.upper)
def str_title(self: _Pipeline[_InT, str]) -> _Pipeline[_InT, str]:
return self.transform(str.title)
def str_strip(self: _Pipeline[_InT, str]) -> _Pipeline[_InT, str]:
return self.transform(str.strip)
def str_pattern(self: _Pipeline[_InT, str], pattern: str) -> _Pipeline[_InT, str]:
return self.constrain(re.compile(pattern))
def str_contains(self: _Pipeline[_InT, str], substring: str) -> _Pipeline[_InT, str]:
return self.predicate(lambda v: substring in v)
def str_starts_with(self: _Pipeline[_InT, str], prefix: str) -> _Pipeline[_InT, str]:
return self.predicate(lambda v: v.startswith(prefix))
def str_ends_with(self: _Pipeline[_InT, str], suffix: str) -> _Pipeline[_InT, str]:
return self.predicate(lambda v: v.endswith(suffix))
# operators
def otherwise(self, other: _Pipeline[_OtherIn, _OtherOut]) -> _Pipeline[_InT | _OtherIn, _OutT | _OtherOut]:
"""Combine two validation chains, returning the result of the first chain if it succeeds, and the second chain if it fails."""
return _Pipeline((_PipelineOr(self, other),))
__or__ = otherwise
def then(self, other: _Pipeline[_OutT, _OtherOut]) -> _Pipeline[_InT, _OtherOut]:
"""Pipe the result of one validation chain into another."""
return _Pipeline((_PipelineAnd(self, other),))
__and__ = then
def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> cs.CoreSchema:
queue = deque(self._steps)
s = None
while queue:
step = queue.popleft()
s = _apply_step(step, s, handler, source_type)
s = s or cs.any_schema()
return s
def __supports_type__(self, _: _OutT) -> bool:
raise NotImplementedError
validate_as = _Pipeline[Any, Any](()).validate_as
validate_as_deferred = _Pipeline[Any, Any](()).validate_as_deferred
transform = _Pipeline[Any, Any]((_ValidateAs(_FieldTypeMarker),)).transform
def _check_func(
func: Callable[[Any], bool], predicate_err: str | Callable[[], str], s: cs.CoreSchema | None
) -> cs.CoreSchema:
def handler(v: Any) -> Any:
if func(v):
return v
raise ValueError(f'Expected {predicate_err if isinstance(predicate_err, str) else predicate_err()}')
if s is None:
return cs.no_info_plain_validator_function(handler)
else:
return cs.no_info_after_validator_function(handler, s)
def _apply_step(step: _Step, s: cs.CoreSchema | None, handler: GetCoreSchemaHandler, source_type: Any) -> cs.CoreSchema:
if isinstance(step, _ValidateAs):
s = _apply_parse(s, step.tp, step.strict, handler, source_type)
elif isinstance(step, _ValidateAsDefer):
s = _apply_parse(s, step.tp, False, handler, source_type)
elif isinstance(step, _Transform):
s = _apply_transform(s, step.func, handler)
elif isinstance(step, _Constraint):
s = _apply_constraint(s, step.constraint)
elif isinstance(step, _PipelineOr):
s = cs.union_schema([handler(step.left), handler(step.right)])
else:
assert isinstance(step, _PipelineAnd)
s = cs.chain_schema([handler(step.left), handler(step.right)])
return s
def _apply_parse(
s: cs.CoreSchema | None,
tp: type[Any],
strict: bool,
handler: GetCoreSchemaHandler,
source_type: Any,
) -> cs.CoreSchema:
if tp is _FieldTypeMarker:
return cs.chain_schema([s, handler(source_type)]) if s else handler(source_type)
if strict:
tp = Annotated[tp, Strict()] # type: ignore
if s and s['type'] == 'any':
return handler(tp)
else:
return cs.chain_schema([s, handler(tp)]) if s else handler(tp)
def _apply_transform(
s: cs.CoreSchema | None, func: Callable[[Any], Any], handler: GetCoreSchemaHandler
) -> cs.CoreSchema:
if s is None:
return cs.no_info_plain_validator_function(func)
if s['type'] == 'str':
if func is str.strip:
s = s.copy()
s['strip_whitespace'] = True
return s
elif func is str.lower:
s = s.copy()
s['to_lower'] = True
return s
elif func is str.upper:
s = s.copy()
s['to_upper'] = True
return s
return cs.no_info_after_validator_function(func, s)
def _apply_constraint( # noqa: C901
s: cs.CoreSchema | None, constraint: _ConstraintAnnotation
) -> cs.CoreSchema:
"""Apply a single constraint to a schema."""
if isinstance(constraint, annotated_types.Gt):
gt = constraint.gt
if s and s['type'] in {'int', 'float', 'decimal'}:
s = s.copy()
if s['type'] == 'int' and isinstance(gt, int):
s['gt'] = gt
elif s['type'] == 'float' and isinstance(gt, float):
s['gt'] = gt
elif s['type'] == 'decimal' and isinstance(gt, Decimal):
s['gt'] = gt
else:
def check_gt(v: Any) -> bool:
return v > gt
s = _check_func(check_gt, f'> {gt}', s)
elif isinstance(constraint, annotated_types.Ge):
ge = constraint.ge
if s and s['type'] in {'int', 'float', 'decimal'}:
s = s.copy()
if s['type'] == 'int' and isinstance(ge, int):
s['ge'] = ge
elif s['type'] == 'float' and isinstance(ge, float):
s['ge'] = ge
elif s['type'] == 'decimal' and isinstance(ge, Decimal):
s['ge'] = ge
def check_ge(v: Any) -> bool:
return v >= ge
s = _check_func(check_ge, f'>= {ge}', s)
elif isinstance(constraint, annotated_types.Lt):
lt = constraint.lt
if s and s['type'] in {'int', 'float', 'decimal'}:
s = s.copy()
if s['type'] == 'int' and isinstance(lt, int):
s['lt'] = lt
elif s['type'] == 'float' and isinstance(lt, float):
s['lt'] = lt
elif s['type'] == 'decimal' and isinstance(lt, Decimal):
s['lt'] = lt
def check_lt(v: Any) -> bool:
return v < lt
s = _check_func(check_lt, f'< {lt}', s)
elif isinstance(constraint, annotated_types.Le):
le = constraint.le
if s and s['type'] in {'int', 'float', 'decimal'}:
s = s.copy()
if s['type'] == 'int' and isinstance(le, int):
s['le'] = le
elif s['type'] == 'float' and isinstance(le, float):
s['le'] = le
elif s['type'] == 'decimal' and isinstance(le, Decimal):
s['le'] = le
def check_le(v: Any) -> bool:
return v <= le
s = _check_func(check_le, f'<= {le}', s)
elif isinstance(constraint, annotated_types.Len):
min_len = constraint.min_length
max_len = constraint.max_length
if s and s['type'] in {'str', 'list', 'tuple', 'set', 'frozenset', 'dict'}:
assert (
s['type'] == 'str'
or s['type'] == 'list'
or s['type'] == 'tuple'
or s['type'] == 'set'
or s['type'] == 'dict'
or s['type'] == 'frozenset'
)
s = s.copy()
if min_len != 0:
s['min_length'] = min_len
if max_len is not None:
s['max_length'] = max_len
def check_len(v: Any) -> bool:
if max_len is not None:
return (min_len <= len(v)) and (len(v) <= max_len)
return min_len <= len(v)
s = _check_func(check_len, f'length >= {min_len} and length <= {max_len}', s)
elif isinstance(constraint, annotated_types.MultipleOf):
multiple_of = constraint.multiple_of
if s and s['type'] in {'int', 'float', 'decimal'}:
s = s.copy()
if s['type'] == 'int' and isinstance(multiple_of, int):
s['multiple_of'] = multiple_of
elif s['type'] == 'float' and isinstance(multiple_of, float):
s['multiple_of'] = multiple_of
elif s['type'] == 'decimal' and isinstance(multiple_of, Decimal):
s['multiple_of'] = multiple_of
def check_multiple_of(v: Any) -> bool:
return v % multiple_of == 0
s = _check_func(check_multiple_of, f'% {multiple_of} == 0', s)
elif isinstance(constraint, annotated_types.Timezone):
tz = constraint.tz
if tz is ...:
if s and s['type'] == 'datetime':
s = s.copy()
s['tz_constraint'] = 'aware'
else:
def check_tz_aware(v: object) -> bool:
assert isinstance(v, datetime.datetime)
return v.tzinfo is not None
s = _check_func(check_tz_aware, 'timezone aware', s)
elif tz is None:
if s and s['type'] == 'datetime':
s = s.copy()
s['tz_constraint'] = 'naive'
else:
def check_tz_naive(v: object) -> bool:
assert isinstance(v, datetime.datetime)
return v.tzinfo is None
s = _check_func(check_tz_naive, 'timezone naive', s)
else:
raise NotImplementedError('Constraining to a specific timezone is not yet supported')
elif isinstance(constraint, annotated_types.Interval):
if constraint.ge:
s = _apply_constraint(s, annotated_types.Ge(constraint.ge))
if constraint.gt:
s = _apply_constraint(s, annotated_types.Gt(constraint.gt))
if constraint.le:
s = _apply_constraint(s, annotated_types.Le(constraint.le))
if constraint.lt:
s = _apply_constraint(s, annotated_types.Lt(constraint.lt))
assert s is not None
elif isinstance(constraint, annotated_types.Predicate):
func = constraint.func
# Same logic as in `_known_annotated_metadata.apply_known_metadata()`:
predicate_name = f'{func.__qualname__!r} ' if hasattr(func, '__qualname__') else ''
def predicate_func(v: Any) -> Any:
if not func(v):
raise PydanticCustomError(
'predicate_failed',
f'Predicate {predicate_name}failed', # pyright: ignore[reportArgumentType]
)
return v
if s is None:
s = cs.no_info_plain_validator_function(predicate_func)
else:
s = cs.no_info_after_validator_function(predicate_func, s)
elif isinstance(constraint, _NotEq):
value = constraint.value
def check_not_eq(v: Any) -> bool:
return operator.__ne__(v, value)
s = _check_func(check_not_eq, f'!= {value}', s)
elif isinstance(constraint, _Eq):
value = constraint.value
def check_eq(v: Any) -> bool:
return operator.__eq__(v, value)
s = _check_func(check_eq, f'== {value}', s)
elif isinstance(constraint, _In):
values = constraint.values
def check_in(v: Any) -> bool:
return operator.__contains__(values, v)
s = _check_func(check_in, f'in {values}', s)
elif isinstance(constraint, _NotIn):
values = constraint.values
def check_not_in(v: Any) -> bool:
return operator.__not__(operator.__contains__(values, v))
s = _check_func(check_not_in, f'not in {values}', s)
else:
assert isinstance(constraint, Pattern)
if s and s['type'] == 'str':
s = s.copy()
s['pattern'] = constraint.pattern
else:
def check_pattern(v: object) -> bool:
assert isinstance(v, str)
return constraint.match(v) is not None
s = _check_func(check_pattern, f'~ {constraint.pattern}', s)
return s
| _Pipeline |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-retry-engine-weaviate/llama_index/packs/retry_engine_weaviate/base.py | {
"start": 645,
"end": 2764
} | class ____(BaseLlamaPack):
"""Weaviate Retry query engine pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
host: str,
auth_client_secret: str,
nodes: Optional[List[TextNode]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
from weaviate import Client
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
weaviate_client = self.client
weaviate_collection = weaviate_client.get_or_create_collection(collection_name)
self._vector_store = WeaviateVectorStore(
weaviate_collection=weaviate_collection
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever()
base_query_engine = self._index.as_query_engine()
guideline_eval = GuidelineEvaluator(guidelines=DEFAULT_GUIDELINES)
self.query_engine = RetryGuidelineQueryEngine(
base_query_engine, guideline_eval, resynthesize_query=True
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
| WeaviateRetryEnginePack |
python | django__django | tests/admin_views/admin.py | {
"start": 3498,
"end": 3759
} | class ____(admin.ModelAdmin):
list_filter = (
"chap",
"chap__title",
"chap__book",
"chap__book__name",
"chap__book__promo",
"chap__book__promo__name",
"guest_author__promo__book",
)
| ChapterXtra1Admin |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-epsilla/llama_index/vector_stores/epsilla/base.py | {
"start": 678,
"end": 9973
} | class ____(BasePydanticVectorStore):
"""
The Epsilla Vector Store.
In this vector store we store the text, its embedding and
a few pieces of its metadata in a Epsilla collection. This implemnetation
allows the use of an already existing collection.
It also supports creating a new one if the collection does not
exist or if `overwrite` is set to True.
As a prerequisite, you need to install ``pyepsilla`` package
and have a running Epsilla vector database (for example, through our docker image)
See the following documentation for how to run an Epsilla vector database:
https://epsilla-inc.gitbook.io/epsilladb/quick-start
Args:
client (Any): Epsilla client to connect to.
collection_name (Optional[str]): Which collection to use.
Defaults to "llama_collection".
db_path (Optional[str]): The path where the database will be persisted.
Defaults to "/tmp/langchain-epsilla".
db_name (Optional[str]): Give a name to the loaded database.
Defaults to "langchain_store".
dimension (Optional[int]): The dimension of the embeddings. If not provided,
collection creation will be done on first insert. Defaults to None.
overwrite (Optional[bool]): Whether to overwrite existing collection with same
name. Defaults to False.
Returns:
EpsillaVectorStore: Vectorstore that supports add, delete, and query.
Examples:
`pip install llama-index-vector-stores-epsilla`
```python
from llama_index.vector_stores.epsilla import EpsillaVectorStore
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
```
"""
stores_text: bool = True
flat_metadata: bool = False
_client: vectordb.Client = PrivateAttr()
_collection_name: str = PrivateAttr()
_collection_created: bool = PrivateAttr()
def __init__(
self,
client: Any,
collection_name: str = "llama_collection",
db_path: Optional[str] = DEFAULT_PERSIST_DIR, # sub folder
db_name: Optional[str] = "llama_db",
dimension: Optional[int] = None,
overwrite: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__()
if not isinstance(client, vectordb.Client):
raise TypeError(
f"client should be an instance of pyepsilla.vectordb.Client, "
f"got {type(client)}"
)
self._client: vectordb.Client = client
self._collection_name = collection_name
self._client.load_db(db_name, db_path)
self._client.use_db(db_name)
self._collection_created = False
status_code, response = self._client.list_tables()
if status_code != 200:
self._handle_error(msg=response["message"])
table_list = response["result"]
if self._collection_name in table_list and overwrite is False:
self._collection_created = True
if self._collection_name in table_list and overwrite is True:
status_code, response = self._client.drop_table(
table_name=self._collection_name
)
if status_code != 200:
self._handle_error(msg=response["message"])
logger.debug(
f"Successfully removed old collection: {self._collection_name}"
)
if dimension is not None:
self._create_collection(dimension)
if self._collection_name not in table_list and dimension is not None:
self._create_collection(dimension)
@classmethod
def class_name(cls) -> str:
return "EpsillaVectorStore"
@property
def client(self) -> Any:
"""Return the Epsilla client."""
return self._client
def _handle_error(self, msg: str) -> None:
"""Handle error."""
logger.error(f"Failed to get records: {msg}")
raise Exception(f"Error: {msg}.")
def _create_collection(self, dimension: int) -> None:
"""
Create collection.
Args:
dimension (int): The dimension of the embeddings.
"""
fields: List[dict] = [
{"name": "id", "dataType": "STRING", "primaryKey": True},
{"name": DEFAULT_DOC_ID_KEY, "dataType": "STRING"},
{"name": DEFAULT_TEXT_KEY, "dataType": "STRING"},
{
"name": DEFAULT_EMBEDDING_KEY,
"dataType": "VECTOR_FLOAT",
"dimensions": dimension,
},
{"name": "metadata", "dataType": "JSON"},
]
status_code, response = self._client.create_table(
table_name=self._collection_name, table_fields=fields
)
if status_code != 200:
self._handle_error(msg=response["message"])
self._collection_created = True
logger.debug(f"Successfully created collection: {self._collection_name}")
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to Epsilla vector store.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
Returns:
List[str]: List of ids inserted.
"""
# If the collection doesn't exist yet, create the collection
if not self._collection_created and len(nodes) > 0:
dimension = len(nodes[0].get_embedding())
self._create_collection(dimension)
elif len(nodes) == 0:
return []
ids = []
records = []
for node in nodes:
ids.append(node.node_id)
text = node.get_content(metadata_mode=MetadataMode.NONE)
metadata_dict = node_to_metadata_dict(node, remove_text=True)
metadata = metadata_dict["_node_content"]
record = {
"id": node.node_id,
DEFAULT_DOC_ID_KEY: node.ref_doc_id,
DEFAULT_TEXT_KEY: text,
DEFAULT_EMBEDDING_KEY: node.get_embedding(),
"metadata": metadata,
}
records.append(record)
status_code, response = self._client.insert(
table_name=self._collection_name, records=records
)
if status_code != 200:
self._handle_error(msg=response["message"])
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
raise NotImplementedError("Delete with filtering will be coming soon.")
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query (VectorStoreQuery): query.
Returns:
Vector store query result.
"""
if not self._collection_created:
raise ValueError("Please initialize a collection first.")
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(f"Epsilla does not support {query.mode} yet.")
if query.filters is not None:
raise NotImplementedError("Epsilla does not support Metadata filters yet.")
if query.doc_ids is not None and len(query.doc_ids) > 0:
raise NotImplementedError("Epsilla does not support filters yet.")
status_code, response = self._client.query(
table_name=self._collection_name,
query_field=DEFAULT_EMBEDDING_KEY,
query_vector=query.query_embedding,
limit=query.similarity_top_k,
with_distance=True,
)
if status_code != 200:
self._handle_error(msg=response["message"])
results = response["result"]
logger.debug(
f"Successfully searched embedding in collection: {self._collection_name}"
f" Num Results: {len(results)}"
)
nodes = []
similarities = []
ids = []
for res in results:
try:
node = metadata_dict_to_node({"_node_content": res["metadata"]})
node.text = res[DEFAULT_TEXT_KEY]
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
metadata, node_info, relationships = legacy_metadata_dict_to_node(
res["metadata"]
)
node = TextNode(
id=res["id"],
text=res[DEFAULT_TEXT_KEY],
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
nodes.append(node)
similarities.append(res["@distance"])
ids.append(res["id"])
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
| EpsillaVectorStore |
python | pandas-dev__pandas | pandas/core/groupby/generic.py | {
"start": 4937,
"end": 54671
} | class ____(GroupBy[Series]):
def _wrap_agged_manager(self, mgr: Manager) -> Series:
out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
out._name = self.obj.name
return out
def _get_data_to_aggregate(
self, *, numeric_only: bool = False, name: str | None = None
) -> SingleBlockManager:
ser = self._obj_with_exclusions
single = ser._mgr
if numeric_only and not is_numeric_dtype(ser.dtype):
# GH#41291 match Series behavior
kwd_name = "numeric_only"
raise TypeError(
f"Cannot use {kwd_name}=True with "
f"{type(self).__name__}.{name} and non-numeric dtypes."
)
return single
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64
"""
)
def apply(self, func, *args, **kwargs) -> Series:
"""
Apply function ``func`` group-wise and combine the results together.
The function passed to ``apply`` must take a series as its first
argument and return a DataFrame, Series or scalar. ``apply`` will
then take care of combining the results back together into a single
dataframe or series. ``apply`` is therefore a highly flexible
grouping method.
While ``apply`` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like ``agg`` or ``transform``. Pandas offers a wide range of method that will
be much faster than using ``apply`` for their specific purposes, so try to
use them before reaching for ``apply``.
Parameters
----------
func : callable
A callable that takes a series as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
*args : tuple
Optional positional arguments to pass to ``func``.
**kwargs : dict
Optional keyword arguments to pass to ``func``.
Returns
-------
Series or DataFrame
A pandas object with the result of applying ``func`` to each group.
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
Notes
-----
The resulting dtype will reflect the return value of the passed ``func``,
see the examples below.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> s = pd.Series([0, 1, 2], index="a a b".split())
>>> g1 = s.groupby(s.index, group_keys=False)
>>> g2 = s.groupby(s.index, group_keys=True)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only
differ in their ``group_keys`` argument. Calling `apply` in various ways,
we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series.
The resulting dtype will reflect the return value of the passed ``func``.
>>> g1.apply(lambda x: x * 2 if x.name == "a" else x / 2)
a 0.0
a 2.0
b 1.0
dtype: float64
In the above, the groups are not part of the index. We can have them included
by using ``g2`` where ``group_keys=True``:
>>> g2.apply(lambda x: x * 2 if x.name == "a" else x / 2)
a a 0.0
a 2.0
b b 1.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g1.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
The ``group_keys`` argument has no effect here because the result is not
like-indexed (i.e. :ref:`a transform <groupby.transform>`) when compared
to the input.
>>> g2.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
"""
return super().apply(func, *args, **kwargs)
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
"""
Aggregate using one or more operations.
The ``aggregate`` method enables flexible and efficient aggregation of grouped
data using a variety of functions, including built-in, user-defined, and
optimized JIT-compiled functions.
Parameters
----------
func : function, str, list, dict or None
Function to use for aggregating the data. If a function, must either
work when passed a Series or when passed to Series.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- None, in which case ``**kwargs`` are used with Named Aggregation. Here
the output has one column for each element in ``**kwargs``. The name of
the column is keyword, whereas the value determines the aggregation
used to compute the values in the column.
Can also accept a Numba JIT function with
``engine='numba'`` specified. Only passing a single function is supported
with this engine.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. deprecated:: 2.1.0
Passing a dictionary is deprecated and will raise in a future version
of pandas. Pass a list of aggregations instead.
*args
Positional arguments to pass to func.
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
**kwargs
* If ``func`` is None, ``**kwargs`` are used to define the output names and
aggregations via Named Aggregation. See ``func`` entry.
* Otherwise, keyword arguments to be passed into func.
Returns
-------
Series
Aggregated Series based on the grouping and the applied aggregation
functions.
See Also
--------
SeriesGroupBy.apply : Apply function func group-wise
and combine the results together.
SeriesGroupBy.transform : Transforms the Series on each group
based on the given function.
Series.aggregate : Aggregate using one or more operations.
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
The resulting dtype will reflect the return value of the passed ``func``,
see the examples below.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg("min")
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(["min", "max"])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum="min",
... maximum="max",
... )
minimum maximum
1 1 2
2 3 4
The resulting dtype will reflect the return value of the aggregating
function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64
"""
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
if maybe_use_numba(engine) and engine is not None:
# Not all agg functions support numba, only propagate numba kwargs
# if user asks for numba, and engine is not None
# (if engine is None, the called function will handle the case where
# numba is requested via the global option)
kwargs["engine"] = engine
if engine_kwargs is not None:
kwargs["engine_kwargs"] = engine_kwargs
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
kwargs["engine"] = engine
kwargs["engine_kwargs"] = engine_kwargs
ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
if relabeling:
# columns is not narrowed by mypy from relabeling flag
assert columns is not None # for mypy
ret.columns = columns
if not self.as_index:
ret = ret.reset_index()
return ret
else:
if maybe_use_numba(engine):
return self._aggregate_with_numba(
func, *args, engine_kwargs=engine_kwargs, **kwargs
)
if self.ngroups == 0:
# e.g. test_evaluate_with_empty_groups without any groups to
# iterate over, we have no output on which to do dtype
# inference. We default to using the existing dtype.
# xref GH#51445
obj = self._obj_with_exclusions
return self._wrap_aggregated_output(
self.obj._constructor(
[],
name=self.obj.name,
index=self._grouper.result_index,
dtype=obj.dtype,
)
)
return self._python_agg_general(func, *args, **kwargs)
agg = aggregate
def _python_agg_general(self, func, *args, **kwargs):
f = lambda x: func(x, *args, **kwargs)
obj = self._obj_with_exclusions
result = self._grouper.agg_series(obj, f)
res = obj._constructor(result, name=obj.name)
return self._wrap_aggregated_output(res)
def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
if isinstance(arg, dict):
raise SpecificationError("nested renamer is not supported")
if any(isinstance(x, (tuple, list)) for x in arg):
arg = ((x, x) if not isinstance(x, (tuple, list)) else x for x in arg)
else:
# list of functions / function names
columns = (com.get_callable_name(f) or f for f in arg)
arg = zip(columns, arg, strict=True)
results: dict[base.OutputKey, DataFrame | Series] = {}
with com.temp_setattr(self, "as_index", True):
# Combine results using the index, need to adjust index after
# if as_index=False (GH#50724)
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func, *args, **kwargs)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results]
)
return res_df
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
return output
def _wrap_applied_output(
self,
data: Series,
values: list[Any],
not_indexed_same: bool = False,
is_transform: bool = False,
) -> DataFrame | Series:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(values) == 0:
# GH #6265
if is_transform:
# GH#47787 see test_group_on_empty_multiindex
res_index = data.index
elif not self.group_keys:
res_index = None
else:
res_index = self._grouper.result_index
return self.obj._constructor(
[],
name=self.obj.name,
index=res_index,
dtype=data.dtype,
)
assert values is not None
if isinstance(values[0], dict):
# GH #823 #24880
index = self._grouper.result_index
res_df = self.obj._constructor_expanddim(values, index=index)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack()
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
result = self._concat_objects(
values,
not_indexed_same=not_indexed_same,
is_transform=is_transform,
)
if isinstance(result, Series):
result.name = self.obj.name
if not self.as_index and not_indexed_same:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result.__finalize__(self.obj, method="groupby")
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self._grouper.result_index, name=self.obj.name
)
if not self.as_index:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result.__finalize__(self.obj, method="groupby")
__examples_series_doc = dedent(
"""
>>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],
... index=["Falcon", "Falcon", "Parrot", "Parrot"],
... name="Max Speed")
>>> grouped = ser.groupby([1, 1, 2, 2])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Falcon 0.707107
Falcon -0.707107
Parrot 0.707107
Parrot -0.707107
Name: Max Speed, dtype: float64
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
Falcon 40.0
Falcon 40.0
Parrot 10.0
Parrot 10.0
Name: Max Speed, dtype: float64
>>> grouped.transform("mean")
Falcon 370.0
Falcon 370.0
Parrot 25.0
Parrot 25.0
Name: Max Speed, dtype: float64
The resulting dtype will reflect the return value of the passed ``func``,
for example:
>>> grouped.transform(lambda x: x.astype(int).max())
Falcon 390
Falcon 390
Parrot 30
Parrot 30
Name: Max Speed, dtype: int64
"""
)
@Substitution(klass="Series", example=__examples_series_doc)
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(self, how: str, numeric_only: bool = False, **kwargs):
obj = self._obj_with_exclusions
try:
result = self._grouper._cython_operation(
"transform", obj._values, how, 0, **kwargs
)
except NotImplementedError as err:
# e.g. test_groupby_raises_string
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(
self, func: Callable, engine, engine_kwargs, *args, **kwargs
) -> Series:
"""
Transform with a callable `func`.
"""
if maybe_use_numba(engine):
return self._transform_with_numba(
func, *args, engine_kwargs=engine_kwargs, **kwargs
)
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self._grouper.get_iterator(
self._obj_with_exclusions,
):
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results, ignore_index=True)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Filter elements from groups that don't satisfy a criterion.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Criterion to apply to each group. Should return True or False.
dropna : bool, optional
Drop groups that do not pass the filter. True by default; if False,
groups that evaluate False are filled with NaNs.
*args : tuple
Optional positional arguments to pass to `func`.
**kwargs : dict
Optional keyword arguments to pass to `func`.
Returns
-------
Series
The filtered subset of the original Series.
See Also
--------
Series.filter: Filter elements of ungrouped Series.
DataFrameGroupBy.filter : Filter elements from groups base on criterion.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": ["foo", "bar", "foo", "bar", "foo", "bar"],
... "B": [1, 2, 3, 4, 5, 6],
... "C": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0],
... }
... )
>>> grouped = df.groupby("A")
>>> df.groupby("A").B.filter(lambda x: x.mean() > 3.0)
1 2
3 4
5 6
Name: B, dtype: int64
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return notna(b) and b
try:
indices = [
self._get_index(name)
for name, group in self._grouper.get_iterator(self._obj_with_exclusions)
if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series | DataFrame:
"""
Return number of unique elements in the group.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
Number of unique values within each group.
See Also
--------
core.resample.Resampler.nunique : Method nunique for Resampler.
Examples
--------
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 3], index=lst)
>>> ser
a 1
a 2
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).nunique()
a 2
b 1
dtype: int64
"""
ids = self._grouper.ids
ngroups = self._grouper.ngroups
val = self.obj._values
codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
if self._grouper.has_dropped_na:
mask = ids >= 0
ids = ids[mask]
codes = codes[mask]
group_index = get_group_index(
labels=[ids, codes],
shape=(ngroups, len(uniques)),
sort=False,
xnull=dropna,
)
if dropna:
mask = group_index >= 0
if (~mask).any():
ids = ids[mask]
group_index = group_index[mask]
mask = duplicated(group_index, "first")
res = np.bincount(ids[~mask], minlength=ngroups)
res = ensure_int64(res)
ri = self._grouper.result_index
result: Series | DataFrame = self.obj._constructor(
res, index=ri, name=self.obj.name
)
if not self.as_index:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result
@doc(Series.describe)
def describe(self, percentiles=None, include=None, exclude=None) -> Series:
return super().describe(
percentiles=percentiles, include=include, exclude=exclude
)
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
) -> Series | DataFrame:
"""
Return a Series or DataFrame containing counts of unique rows.
Parameters
----------
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int or list of ints, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data.
dropna : bool, default True
Don't include counts of rows that contain NA values.
Returns
-------
Series or DataFrame
Series if the groupby ``as_index`` is True, otherwise DataFrame.
See Also
--------
Series.value_counts: Equivalent method on Series.
DataFrame.value_counts: Equivalent method on DataFrame.
DataFrameGroupBy.value_counts: Equivalent method on DataFrameGroupBy.
Notes
-----
- If the groupby ``as_index`` is True then the returned Series will have a
MultiIndex with one level per input column.
- If the groupby ``as_index`` is False then the returned DataFrame will have an
additional column with the value_counts. The column is labelled 'count' or
'proportion', depending on the ``normalize`` parameter.
By default, rows that contain any NA values are omitted from
the result.
By default, the result will be in descending order so that the
first element of each group is the most frequently-occurring row.
Examples
--------
>>> s = pd.Series(
... [1, 1, 2, 3, 2, 3, 3, 1, 1, 3, 3, 3],
... index=["A", "A", "A", "A", "A", "A", "B", "B", "B", "B", "B", "B"],
... )
>>> s
A 1
A 1
A 2
A 3
A 2
A 3
B 3
B 1
B 1
B 3
B 3
B 3
dtype: int64
>>> g1 = s.groupby(s.index)
>>> g1.value_counts(bins=2)
A (0.997, 2.0] 4
(2.0, 3.0] 2
B (2.0, 3.0] 4
(0.997, 2.0] 2
Name: count, dtype: int64
>>> g1.value_counts(normalize=True)
A 1 0.333333
2 0.333333
3 0.333333
B 3 0.666667
1 0.333333
Name: proportion, dtype: float64
"""
name = "proportion" if normalize else "count"
if bins is None:
result = self._value_counts(
normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
)
result.name = name
return result
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids = self._grouper.ids
val = self.obj._values
index_names = self._grouper.names + [self.obj.name]
if isinstance(val.dtype, CategoricalDtype) or (
bins is not None and not np.iterable(bins)
):
# scalar bins cannot be done at top level
# in a backward compatible way
# GH38672 relates to categorical dtype
ser = self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ser.name = name
ser.index.names = index_names
return ser
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
lab: Index | np.ndarray
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)
cat_obj = cast("Categorical", cat_ser._values)
lev = cat_obj.categories
lab = lev.take(
cat_obj.codes,
allow_fill=True,
fill_value=lev._na_value,
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if isinstance(lab.dtype, IntervalDtype):
# TODO: should we do this inside II?
lab_interval = cast(Interval, lab)
sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
if isinstance(self._grouper.result_index, MultiIndex):
codes = list(self._grouper.result_index.codes)
else:
codes = [
algorithms.factorize(
self._grouper.result_index,
sort=self._grouper._sort,
use_na_sentinel=self._grouper.dropna,
)[0]
]
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = self._grouper.levels + [lev]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
# error: Argument 1 to "get_join_indexers" has incompatible type
# "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
# ndarray[Any, Any]], Index, Series]]
_, idx = get_join_indexers(
left, # type: ignore[arg-type]
right,
sort=False,
how="left",
)
if idx is not None:
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(
levels=levels, codes=codes, names=index_names, verify_integrity=False
)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
result = self.obj._constructor(out, index=mi, name=name)
if not self.as_index:
result = result.reset_index()
return result
def take(
self,
indices: TakeIndexer,
**kwargs,
) -> Series:
"""
Return the elements in the given *positional* indices in each group.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
If a requested index does not exist for some group, this method will raise.
To get similar behavior that ignores indices that don't exist, see
:meth:`.SeriesGroupBy.nth`.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take in each group.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
Series
A Series containing the elements taken from each group.
See Also
--------
Series.take : Take elements from a Series along an axis.
Series.loc : Select a subset of a DataFrame by labels.
Series.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.
Examples
--------
>>> df = pd.DataFrame(
... [
... ("falcon", "bird", 389.0),
... ("parrot", "bird", 24.0),
... ("lion", "mammal", 80.5),
... ("monkey", "mammal", np.nan),
... ("rabbit", "mammal", 15.0),
... ],
... columns=["name", "class", "max_speed"],
... index=[4, 3, 2, 1, 0],
... )
>>> df
name class max_speed
4 falcon bird 389.0
3 parrot bird 24.0
2 lion mammal 80.5
1 monkey mammal NaN
0 rabbit mammal 15.0
>>> gb = df["name"].groupby([1, 1, 2, 2, 2])
Take elements at rows 0 and 1 in each group.
>>> gb.take([0, 1])
1 4 falcon
3 parrot
2 2 lion
1 monkey
Name: name, dtype: object
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> gb.take([-1, -2])
1 3 parrot
4 falcon
2 0 rabbit
1 monkey
Name: name, dtype: object
"""
result = self._op_via_apply("take", indices=indices, **kwargs)
return result
def skew(
self,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series:
"""
Return unbiased skew within groups.
Normalized by N-1.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased skew within groups.
See Also
--------
Series.skew : Return unbiased skew over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, np.nan, 22.0, 20.0, 30.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew()
Falcon 1.525174
Parrot 1.457863
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew(skipna=False)
Falcon NaN
Parrot 1.457863
Name: Max Speed, dtype: float64
"""
return self._cython_agg_general(
"skew", alt=None, skipna=skipna, numeric_only=numeric_only, **kwargs
)
def kurt(
self,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series:
"""
Return unbiased kurtosis within groups.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased kurtosis within groups.
See Also
--------
Series.kurt : Return unbiased kurtosis over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, 333.0, np.nan, 22.0, 20.0, 30.0, 40.0, 41.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon 333.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Parrot 40.0
Parrot 41.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt()
Falcon 1.622109
Parrot -2.878714
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt(skipna=False)
Falcon NaN
Parrot -2.878714
Name: Max Speed, dtype: float64
"""
def alt(obj):
# This should not be reached since the cython path should raise
# TypeError and not NotImplementedError.
raise TypeError(f"'kurt' is not supported for dtype={obj.dtype}")
return self._cython_agg_general(
"kurt", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
)
@property
@doc(Series.plot.__doc__)
def plot(self) -> GroupByPlot:
result = GroupByPlot(self)
return result
@doc(Series.nlargest.__doc__)
def nlargest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
f = partial(Series.nlargest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
@doc(Series.nsmallest.__doc__)
def nsmallest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
f = partial(Series.nsmallest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
def idxmin(self, skipna: bool = True) -> Series:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA values.
Returns
-------
Series
Indexes of minima in each group.
Raises
------
ValueError
When there are no valid values for a group. Then can happen if:
* There is an unobserved group and ``observed=False``.
* All values for a group are NA.
* Some values for a group are NA and ``skipna=False``.
.. versionchanged:: 3.0.0
Previously if all values for a group are NA or some values for a group are
NA and ``skipna=False``, this method would return NA. Now it raises instead.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.groupby(["a", "a", "b", "b"]).idxmin()
a 2023-01-01
b 2023-02-01
dtype: datetime64[us]
"""
return self._idxmax_idxmin("idxmin", skipna=skipna)
def idxmax(self, skipna: bool = True) -> Series:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA values.
Returns
-------
Series
Indexes of maxima in each group.
Raises
------
ValueError
When there are no valid values for a group. Then can happen if:
* There is an unobserved group and ``observed=False``.
* All values for a group are NA.
* Some values for a group are NA and ``skipna=False``.
.. versionchanged:: 3.0.0
Previously if all values for a group are NA or some values for a group are
NA and ``skipna=False``, this method would return NA. Now it raises instead.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.groupby(["a", "a", "b", "b"]).idxmax()
a 2023-01-15
b 2023-02-15
dtype: datetime64[us]
"""
return self._idxmax_idxmin("idxmax", skipna=skipna)
@doc(Series.corr.__doc__)
def corr(
self,
other: Series,
method: CorrelationMethod = "pearson",
min_periods: int | None = None,
) -> Series:
result = self._op_via_apply(
"corr", other=other, method=method, min_periods=min_periods
)
return result
@doc(Series.cov.__doc__)
def cov(
self, other: Series, min_periods: int | None = None, ddof: int | None = 1
) -> Series:
result = self._op_via_apply(
"cov", other=other, min_periods=min_periods, ddof=ddof
)
return result
@property
def is_monotonic_increasing(self) -> Series:
"""
Return whether each group's values are monotonically increasing.
Returns
-------
Series
See Also
--------
SeriesGroupBy.is_monotonic_decreasing : Return whether each group's values
are monotonically decreasing.
Examples
--------
>>> s = pd.Series([2, 1, 3, 4], index=["Falcon", "Falcon", "Parrot", "Parrot"])
>>> s.groupby(level=0).is_monotonic_increasing
Falcon False
Parrot True
dtype: bool
"""
return self.apply(lambda ser: ser.is_monotonic_increasing)
@property
def is_monotonic_decreasing(self) -> Series:
"""
Return whether each group's values are monotonically decreasing.
Returns
-------
Series
See Also
--------
SeriesGroupBy.is_monotonic_increasing : Return whether each group's values
are monotonically increasing.
Examples
--------
>>> s = pd.Series([2, 1, 3, 4], index=["Falcon", "Falcon", "Parrot", "Parrot"])
>>> s.groupby(level=0).is_monotonic_decreasing
Falcon True
Parrot False
dtype: bool
"""
return self.apply(lambda ser: ser.is_monotonic_decreasing)
@doc(Series.hist.__doc__)
def hist(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot: float | None = None,
ylabelsize: int | None = None,
yrot: float | None = None,
figsize: tuple[float, float] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
):
result = self._op_via_apply(
"hist",
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
backend=backend,
legend=legend,
**kwargs,
)
return result
@property
@doc(Series.dtype.__doc__)
def dtype(self) -> Series:
return self.apply(lambda ser: ser.dtype)
def unique(self) -> Series:
"""
Return unique values for each group.
It returns unique values for each of the grouped values. Returned in
order of appearance. Hash table-based unique, therefore does NOT sort.
Returns
-------
Series
Unique values for each of the grouped values.
See Also
--------
Series.unique : Return unique values of Series object.
Examples
--------
>>> df = pd.DataFrame(
... [
... ("Chihuahua", "dog", 6.1),
... ("Beagle", "dog", 15.2),
... ("Chihuahua", "dog", 6.9),
... ("Persian", "cat", 9.2),
... ("Chihuahua", "dog", 7),
... ("Persian", "cat", 8.8),
... ],
... columns=["breed", "animal", "height_in"],
... )
>>> df
breed animal height_in
0 Chihuahua dog 6.1
1 Beagle dog 15.2
2 Chihuahua dog 6.9
3 Persian cat 9.2
4 Chihuahua dog 7.0
5 Persian cat 8.8
>>> ser = df.groupby("animal")["breed"].unique()
>>> ser
animal
cat [Persian]
dog [Chihuahua, Beagle]
Name: breed, dtype: object
"""
result = self._op_via_apply("unique")
return result
@set_module("pandas.api.typing")
| SeriesGroupBy |
python | scrapy__scrapy | tests/test_http_response.py | {
"start": 33403,
"end": 36623
} | class ____(TestTextResponse):
response_class = XmlResponse
def test_xml_encoding(self):
body = b"<xml></xml>"
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, self.response_class._DEFAULT_ENCODING, body)
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, "iso-8859-1", body)
# make sure replace() preserves the explicit encoding passed in the __init__ method
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r3 = self.response_class("http://www.example.com", body=body, encoding="utf-8")
body2 = b"New body"
r4 = r3.replace(body=body2)
self._assert_response_values(r4, "utf-8", body2)
def test_replace_encoding(self):
# make sure replace() keeps the previous encoding unless overridden explicitly
body = b"""<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
body2 = b"""<?xml version="1.0" encoding="utf-8"?><xml></xml>"""
r5 = self.response_class("http://www.example.com", body=body)
r6 = r5.replace(body=body2)
r7 = r5.replace(body=body2, encoding="utf-8")
self._assert_response_values(r5, "iso-8859-1", body)
self._assert_response_values(r6, "iso-8859-1", body2)
self._assert_response_values(r7, "utf-8", body2)
def test_selector(self):
body = b'<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
assert isinstance(response.selector, Selector)
assert response.selector.type == "xml"
assert response.selector is response.selector # property is cached
assert response.selector.response is response
assert response.selector.xpath("//elem/text()").getall() == ["value"]
def test_selector_shortcuts(self):
body = b'<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
assert (
response.xpath("//elem/text()").getall()
== response.selector.xpath("//elem/text()").getall()
)
def test_selector_shortcuts_kwargs(self):
body = b"""<?xml version="1.0" encoding="utf-8"?>
<xml xmlns:somens="http://scrapy.org">
<somens:elem>value</somens:elem>
</xml>"""
response = self.response_class("http://www.example.com", body=body)
assert (
response.xpath(
"//s:elem/text()", namespaces={"s": "http://scrapy.org"}
).getall()
== response.selector.xpath(
"//s:elem/text()", namespaces={"s": "http://scrapy.org"}
).getall()
)
response.selector.register_namespace("s2", "http://scrapy.org")
assert (
response.xpath(
"//s1:elem/text()", namespaces={"s1": "http://scrapy.org"}
).getall()
== response.selector.xpath("//s2:elem/text()").getall()
)
| TestXmlResponse |
python | un33k__django-uuslug | uuslug/tests/tests.py | {
"start": 4253,
"end": 7335
} | class ____(TestCase):
"""Tests for Slug - Unique"""
def test_manager(self):
name = "john"
# with PrintQueries("create first john"): # display the SQL queries
with self.assertNumQueries(2):
# 1. query: SELECT test, if slug 'john' exists
# 2. query: INSERT values
obj = CoolSlug.objects.create(name=name)
self.assertEqual(obj.slug, "john")
# with PrintQueries("create second john"): # display the SQL queries
with self.assertNumQueries(3):
# 1. query: SELECT test, if slug 'john' exists
# 2. query: SELECT test, if slug 'john-1' exists
# 3. query: INSERT values
obj = CoolSlug.objects.create(name=name)
self.assertEqual(obj.slug, "john-1")
def test_start_no(self):
name = 'Foo Bar'
# with PrintQueries("create first 'Foo Bar'"): # display the SQL queries
with self.assertNumQueries(2):
# 1. query: SELECT test, if slug 'foo-bar' exists
# 2. query: INSERT values
obj = AnotherSlug.objects.create(name=name)
self.assertEqual(obj.slug, "foo-bar")
# with PrintQueries("create second 'Foo Bar'"): # display the SQL queries
with self.assertNumQueries(3):
# 1. query: SELECT test, if slug 'foo-bar' exists
# 2. query: SELECT test, if slug 'foo-bar-2' exists
# 3. query: INSERT values
obj = AnotherSlug.objects.create(name=name)
self.assertEqual(obj.slug, "foo-bar-2")
# with PrintQueries("create third 'Foo Bar'"): # display the SQL queries
with self.assertNumQueries(4):
# 1. query: SELECT test, if slug 'foo-bar' exists
# 2. query: SELECT test, if slug 'foo-bar-2' exists
# 3. query: SELECT test, if slug 'foo-bar-3' exists
# 4. query: INSERT values
obj = AnotherSlug.objects.create(name=name)
self.assertEqual(obj.slug, "foo-bar-3")
def test_max_length(self):
name = 'jaja---lol-méméméoo--a'
obj = TruncatedSlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememeoo") # 17 is max_length
obj = TruncatedSlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememe-2") # 17 is max_length
obj = TruncatedSlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememe-3") # 17 is max_length
def test_max_length_exact_word_boundary(self):
name = 'jaja---lol-méméméoo--a'
obj = SmartTruncatedExactWordBoundarySlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememeoo-a") # 19 is max_length
obj = SmartTruncatedExactWordBoundarySlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememeoo-9") # 19 is max_length, start_no = 9
obj = SmartTruncatedExactWordBoundarySlug.objects.create(name=name)
self.assertEqual(obj.slug, "jaja-lol-mememeo-10") # 19 is max_length, readjust for "-10"
| SlugUniqueTestCase |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 10075,
"end": 10466
} | class ____(XsdBoolean):
@classmethod
def convert_from_xml(cls, str_value: str) -> bool:
if str_value not in ("1", "0", "true", "false", "on", "off"):
raise InvalidXmlError(
"value must be one of '1', '0', 'true', 'false', 'on', or 'o"
"ff', got '%s'" % str_value
)
return str_value in ("1", "true", "on")
| ST_OnOff |
python | getsentry__sentry | src/sentry/auth/provider.py | {
"start": 743,
"end": 1492
} | class ____(namedtuple("MigratingIdentityId", ["id", "legacy_id"])):
"""
MigratingIdentityId may be used in the ``id`` field of an identity
dictionary to facilitate migrating user identities from one identifying id
to another.
Context - when google oauth was initially created, the auth_identity key was simply
the provider email. This can cause issues if the customer changes their domain name,
and now their email is different and they're locked out of their account.
This logic updates their id to the provider id instead.
NOTE: this should _only_ really be relevant for google oauth implementation
"""
__slots__ = ()
def __str__(self) -> str:
return force_str(self.id)
| MigratingIdentityId |
python | google__pytype | pytype/errors/error_types.py | {
"start": 5455,
"end": 5716
} | class ____(ProtocolError):
def __init__(self, left_type, other_type, attribute, actual, expected):
super().__init__(left_type, other_type)
self.attribute_name = attribute
self.actual_type = actual
self.expected_type = expected
| ProtocolTypeError |
python | openai__openai-python | src/openai/resources/models.py | {
"start": 9586,
"end": 9999
} | class ____:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = _legacy_response.to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
models.delete,
)
| ModelsWithRawResponse |
python | boto__boto3 | tests/unit/dynamodb/test_conditions.py | {
"start": 14332,
"end": 21772
} | class ____(unittest.TestCase):
def setUp(self):
self.builder = ConditionExpressionBuilder()
def assert_condition_expression_build(
self,
condition,
ref_string,
ref_names,
ref_values,
is_key_condition=False,
):
exp_string, names, values = self.builder.build_expression(
condition, is_key_condition=is_key_condition
)
assert exp_string == ref_string
assert names == ref_names
assert values == ref_values
def test_bad_input(self):
a = Attr('myattr')
with pytest.raises(DynamoDBNeedsConditionError):
self.builder.build_expression(a)
def test_build_expression_eq(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.eq('foo'), '#n0 = :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_reset(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.eq('foo'), '#n0 = :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
self.assert_condition_expression_build(
a.eq('foo'), '#n1 = :v1', {'#n1': 'myattr'}, {':v1': 'foo'}
)
self.builder.reset()
self.assert_condition_expression_build(
a.eq('foo'), '#n0 = :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_lt(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.lt('foo'), '#n0 < :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_lte(self):
a1 = Attr('myattr')
self.assert_condition_expression_build(
a1.lte('foo'), '#n0 <= :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_gt(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.gt('foo'), '#n0 > :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_gte(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.gte('foo'), '#n0 >= :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_begins_with(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.begins_with('foo'),
'begins_with(#n0, :v0)',
{'#n0': 'myattr'},
{':v0': 'foo'},
)
def test_build_expression_between(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.between('foo', 'foo2'),
'#n0 BETWEEN :v0 AND :v1',
{'#n0': 'myattr'},
{':v0': 'foo', ':v1': 'foo2'},
)
def test_build_expression_ne(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.ne('foo'), '#n0 <> :v0', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_expression_in(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.is_in([1, 2, 3]),
'#n0 IN (:v0, :v1, :v2)',
{'#n0': 'myattr'},
{':v0': 1, ':v1': 2, ':v2': 3},
)
def test_build_expression_exists(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.exists(), 'attribute_exists(#n0)', {'#n0': 'myattr'}, {}
)
def test_build_expression_not_exists(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.not_exists(), 'attribute_not_exists(#n0)', {'#n0': 'myattr'}, {}
)
def test_build_contains(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.contains('foo'),
'contains(#n0, :v0)',
{'#n0': 'myattr'},
{':v0': 'foo'},
)
def test_build_size(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.size(), 'size(#n0)', {'#n0': 'myattr'}, {}
)
def test_build_size_with_other_conditons(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.size().eq(5), 'size(#n0) = :v0', {'#n0': 'myattr'}, {':v0': 5}
)
def test_build_attribute_type(self):
a = Attr('myattr')
self.assert_condition_expression_build(
a.attribute_type('foo'),
'attribute_type(#n0, :v0)',
{'#n0': 'myattr'},
{':v0': 'foo'},
)
def test_build_and(self):
a = Attr('myattr')
a2 = Attr('myattr2')
self.assert_condition_expression_build(
a.eq('foo') & a2.eq('bar'),
'(#n0 = :v0 AND #n1 = :v1)',
{'#n0': 'myattr', '#n1': 'myattr2'},
{':v0': 'foo', ':v1': 'bar'},
)
def test_build_or(self):
a = Attr('myattr')
a2 = Attr('myattr2')
self.assert_condition_expression_build(
a.eq('foo') | a2.eq('bar'),
'(#n0 = :v0 OR #n1 = :v1)',
{'#n0': 'myattr', '#n1': 'myattr2'},
{':v0': 'foo', ':v1': 'bar'},
)
def test_build_not(self):
a = Attr('myattr')
self.assert_condition_expression_build(
~a.eq('foo'), '(NOT #n0 = :v0)', {'#n0': 'myattr'}, {':v0': 'foo'}
)
def test_build_attribute_with_attr_value(self):
a = Attr('myattr')
value = Attr('myreference')
self.assert_condition_expression_build(
a.eq(value),
'#n0 = #n1',
{'#n0': 'myattr', '#n1': 'myreference'},
{},
)
def test_build_with_is_key_condition(self):
k = Key('myattr')
self.assert_condition_expression_build(
k.eq('foo'),
'#n0 = :v0',
{'#n0': 'myattr'},
{':v0': 'foo'},
is_key_condition=True,
)
def test_build_with_is_key_condition_throws_error(self):
a = Attr('myattr')
with pytest.raises(DynamoDBNeedsKeyConditionError):
self.builder.build_expression(a.eq('foo'), is_key_condition=True)
def test_build_attr_map(self):
a = Attr('MyMap.MyKey')
self.assert_condition_expression_build(
a.eq('foo'),
'#n0.#n1 = :v0',
{'#n0': 'MyMap', '#n1': 'MyKey'},
{':v0': 'foo'},
)
def test_build_attr_list(self):
a = Attr('MyList[0]')
self.assert_condition_expression_build(
a.eq('foo'), '#n0[0] = :v0', {'#n0': 'MyList'}, {':v0': 'foo'}
)
def test_build_nested_attr_map_list(self):
a = Attr('MyMap.MyList[2].MyElement')
self.assert_condition_expression_build(
a.eq('foo'),
'#n0.#n1[2].#n2 = :v0',
{'#n0': 'MyMap', '#n1': 'MyList', '#n2': 'MyElement'},
{':v0': 'foo'},
)
def test_build_double_nested_and_or(self):
a = Attr('myattr')
a2 = Attr('myattr2')
self.assert_condition_expression_build(
(a.eq('foo') & a2.eq('foo2')) | (a.eq('bar') & a2.eq('bar2')),
'((#n0 = :v0 AND #n1 = :v1) OR (#n2 = :v2 AND #n3 = :v3))',
{
'#n0': 'myattr',
'#n1': 'myattr2',
'#n2': 'myattr',
'#n3': 'myattr2',
},
{':v0': 'foo', ':v1': 'foo2', ':v2': 'bar', ':v3': 'bar2'},
)
| TestConditionExpressionBuilder |
python | django__django | docs/_ext/github_links.py | {
"start": 67,
"end": 1847
} | class ____(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.current_path = []
self.node_line_numbers = {}
self.import_locations = {}
@classmethod
def from_code(cls, code):
tree = ast.parse(code)
locator = cls()
locator.visit(tree)
return locator
def visit_node(self, node):
self.current_path.append(node.name)
self.node_line_numbers[".".join(self.current_path)] = node.lineno
self.generic_visit(node)
self.current_path.pop()
def visit_FunctionDef(self, node):
self.visit_node(node)
def visit_ClassDef(self, node):
self.visit_node(node)
def visit_ImportFrom(self, node):
for alias in node.names:
if alias.asname:
# Exclude linking aliases (`import x as y`) to avoid confusion
# when clicking a source link to a differently named entity.
continue
if alias.name == "*":
# Resolve wildcard imports.
file = module_name_to_file_path(node.module)
file_contents = file.read_text(encoding="utf-8")
locator = CodeLocator.from_code(file_contents)
self.import_locations.update(locator.import_locations)
self.import_locations.update(
{n: node.module for n in locator.node_line_numbers if "." not in n}
)
else:
self.import_locations[alias.name] = ("." * node.level) + (
node.module or ""
)
@functools.lru_cache(maxsize=1024)
def get_locator(file):
file_contents = file.read_text(encoding="utf-8")
return CodeLocator.from_code(file_contents)
| CodeLocator |
python | walkccc__LeetCode | solutions/3318. Find X-Sum of All K-Long Subarrays I/3318.py | {
"start": 42,
"end": 1315
} | class ____:
def findXSum(self, nums: list[int], k: int, x: int) -> list[int]:
ans = []
windowSum = 0
count = collections.Counter()
top = SortedList()
bot = SortedList()
def update(num: int, freq: int) -> None:
"""Updates the count of num by freq and the window sum accordingly."""
nonlocal windowSum
if count[num] > 0: # Clean up old values.
if [count[num], num] in bot:
bot.remove([count[num], num])
else:
top.remove([count[num], num])
windowSum -= num * count[num]
count[num] += freq
if count[num] > 0:
bot.add([count[num], num])
for i, num in enumerate(nums):
update(num, 1)
if i >= k:
update(nums[i - k], -1)
# Move the bottom element to the top if needed.
while bot and len(top) < x:
countB, b = bot.pop()
top.add([countB, b])
windowSum += b * countB
# Swap the bottom and top elements if needed.
while bot and bot[-1] > top[0]:
countB, b = bot.pop()
countT, t = top.pop(0)
bot.add([countT, t])
windowSum -= t * countT
top.add([countB, b])
windowSum += b * countB
if i >= k - 1:
ans.append(windowSum)
return ans
| Solution |
python | apache__airflow | devel-common/src/sphinx_exts/operators_and_hooks_ref.py | {
"start": 19289,
"end": 19605
} | class ____(BaseJinjaReferenceDirective):
"""Generate list of deprecated entities"""
def render_content(self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_deprecations_content(
header_separator=header_separator,
)
| DeprecationsDirective |
python | sqlalchemy__sqlalchemy | test/orm/test_session.py | {
"start": 5372,
"end": 13135
} | class ____(_fixtures.FixtureTest):
run_inserts = None
__prefer_requires__ = ("independent_connections",)
def test_no_close_on_flush(self):
"""Flush() doesn't close a connection the session didn't open"""
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
c.exec_driver_sql("select * from users")
self.mapper_registry.map_imperatively(User, users)
s = Session(bind=c)
s.add(User(name="first"))
s.flush()
c.exec_driver_sql("select * from users")
def test_close(self):
"""close() doesn't close a connection the session didn't open"""
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
c.exec_driver_sql("select * from users")
self.mapper_registry.map_imperatively(User, users)
s = Session(bind=c)
s.add(User(name="first"))
s.flush()
c.exec_driver_sql("select * from users")
s.close()
c.exec_driver_sql("select * from users")
def test_autobegin_execute(self):
# test the new autobegin behavior introduced in #5074
s = Session(testing.db)
is_(s._transaction, None)
s.execute(select(1))
is_not(s._transaction, None)
s.commit()
is_(s._transaction, None)
s.execute(select(1))
is_not(s._transaction, None)
s.close()
is_(s._transaction, None)
s.execute(select(1))
is_not(s._transaction, None)
s.close()
is_(s._transaction, None)
def test_autobegin_flush(self):
# test the new autobegin behavior introduced in #5074
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
s = Session(testing.db)
is_(s._transaction, None)
# empty flush, nothing happens
s.flush()
is_(s._transaction, None)
s.add(User(id=1, name="name"))
s.flush()
is_not(s._transaction, None)
s.commit()
is_(s._transaction, None)
def test_autobegin_within_flush(self):
"""test :ticket:`6233`"""
s = Session(testing.db)
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
s.add(User(name="u1"))
s.commit()
u1 = s.query(User).first()
s.commit()
u1.name = "newname"
s.flush()
eq_(s.connection().scalar(select(User.name)), "newname")
assert s.in_transaction()
s.rollback()
assert not s.in_transaction()
eq_(s.connection().scalar(select(User.name)), "u1")
@testing.combinations(
"select1", "lazyload", "unitofwork", argnames="trigger"
)
@testing.combinations("commit", "close", "rollback", None, argnames="op")
def test_no_autobegin(self, op, trigger):
User, users = self.classes.User, self.tables.users
Address, addresses = self.classes.Address, self.tables.addresses
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
with Session(testing.db) as sess:
sess.add(User(name="u1"))
sess.commit()
s = Session(testing.db, autobegin=False)
orm_trigger = trigger == "lazyload" or trigger == "unitofwork"
with expect_raises_message(
exc.InvalidRequestError,
r"Autobegin is disabled on this Session; please call "
r"session.begin\(\) to start a new transaction",
):
if op or orm_trigger:
s.begin()
is_true(s.in_transaction())
if orm_trigger:
u1 = s.scalar(select(User).filter_by(name="u1"))
else:
eq_(s.scalar(select(1)), 1)
if op:
getattr(s, op)()
elif orm_trigger:
s.rollback()
is_false(s.in_transaction())
if trigger == "select1":
s.execute(select(1))
elif trigger == "lazyload":
if op == "close":
s.add(u1)
else:
u1.addresses
elif trigger == "unitofwork":
s.add(u1)
s.begin()
if trigger == "select1":
s.execute(select(1))
elif trigger == "lazyload":
if op == "close":
s.add(u1)
u1.addresses
is_true(s.in_transaction())
if op:
getattr(s, op)()
is_false(s.in_transaction())
def test_autobegin_begin_method(self):
s = Session(testing.db)
s.begin() # OK
assert_raises_message(
exc.InvalidRequestError,
"A transaction is already begun on this Session.",
s.begin,
)
@testing.combinations((True,), (False,), argnames="begin")
@testing.combinations((True,), (False,), argnames="expire_on_commit")
@testing.combinations((True,), (False,), argnames="modify_unconditional")
@testing.combinations(
("nothing",), ("modify",), ("add",), ("delete",), argnames="case_"
)
def test_autobegin_attr_change(
self, case_, begin, modify_unconditional, expire_on_commit
):
"""test :ticket:`6360`"""
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
s = Session(
testing.db,
expire_on_commit=expire_on_commit,
)
u = User(name="x")
u2 = User(name="d")
u3 = User(name="e")
s.add_all([u, u2, u3])
s.commit()
if begin:
s.begin()
if case_ == "add":
# this autobegins
s.add(User(name="q"))
elif case_ == "delete":
# this autobegins
s.delete(u2)
elif case_ == "modify":
# this autobegins
u3.name = "m"
if case_ == "nothing" and not begin:
assert not s._transaction
expect_expire = expire_on_commit
else:
assert s._transaction
expect_expire = True
if modify_unconditional:
# this autobegins
u.name = "y"
expect_expire = True
if not expect_expire:
assert not s._transaction
# test is that state is consistent after rollback()
s.rollback()
if not expect_expire:
assert "name" in u.__dict__
else:
assert "name" not in u.__dict__
eq_(u.name, "x")
@testing.requires.independent_connections
@engines.close_open_connections
def test_transaction(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
conn1 = testing.db.connect()
conn2 = testing.db.connect()
sess = Session(bind=conn1)
u = User(name="x")
sess.add(u)
sess.flush()
assert (
conn1.exec_driver_sql("select count(1) from users").scalar() == 1
)
assert (
conn2.exec_driver_sql("select count(1) from users").scalar() == 0
)
sess.commit()
assert (
conn1.exec_driver_sql("select count(1) from users").scalar() == 1
)
assert (
testing.db.connect()
.exec_driver_sql("select count(1) from users")
.scalar()
== 1
)
sess.close()
| TransScopingTest |
python | walkccc__LeetCode | solutions/3455. Shortest Matching Substring/3455.py | {
"start": 0,
"end": 1174
} | class ____:
def shortestMatchingSubstring(self, s: str, p: str) -> int:
n = len(s)
a, b, c = p.split('*')
lpsA = self._getLPS(a + '#' + s)[len(a) + 1:]
lpsB = self._getLPS(b + '#' + s)[len(b) + 1:]
lpsC = self._getLPS(c + '#' + s)[len(c) + 1:]
ans = math.inf
i = 0 # lpsA's index
j = 0 # lpsB's index
k = 0 # lpsC's index
while i + len(b) + len(c) < n:
while i < n and lpsA[i] != len(a):
i += 1
while j < n and (j < i + len(b) or lpsB[j] != len(b)):
j += 1
while k < n and (k < j + len(c) or lpsC[k] != len(c)):
k += 1
if k == n:
break
ans = min(ans, k - i + len(a))
i += 1
return -1 if ans == math.inf else ans
def _getLPS(self, pattern: str) -> list[int]:
"""
Returns the lps array, where lps[i] is the length of the longest prefix of
pattern[0..i] which is also a suffix of this substring.
"""
lps = [0] * len(pattern)
j = 0
for i in range(1, len(pattern)):
while j > 0 and pattern[j] != pattern[i]:
j = lps[j - 1]
if pattern[i] == pattern[j]:
lps[i] = j + 1
j += 1
return lps
| Solution |
python | pytorch__pytorch | test/quantization/jit/test_quantize_jit.py | {
"start": 111633,
"end": 123563
} | class ____(QuantizationTestCase):
def test_prepare_dynamic(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
model = torch.jit.script(M())
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
m = prepare_dynamic_jit(model, {"": qconfig})
# observer for weight
assert len(attrs_with_prefix(m.fc, "_observer_")) == 1
if qconfig == float16_dynamic_qconfig:
observer_name = 'PlaceholderObserver = prim::GetAttr[name="_observer_'
FileCheck().check(observer_name).run(m.fc.graph)
else:
# for input of FC for dynamic quant
assert len(attrs_with_prefix(m, "_observer_")) == 1
observer_name = 'Observer = prim::GetAttr[name="_observer_'
FileCheck().check(observer_name).check(
'prim::GetAttr[name="fc"]'
).check("prim::CallMethod").check_not(observer_name).run(m.graph)
def test_prepare_dynamic_child_qconfig(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
# only quantize child module.
m = prepare_dynamic_jit(m, {"sub.fc": default_dynamic_qconfig})
# input of sub for dynamic quant
assert len(attrs_with_prefix(m, "_observer_")) == 1
# not quantized
assert len(attrs_with_prefix(m.conv, "_observer_")) == 0
# no observers since we observe in the outer most call site
assert len(attrs_with_prefix(m.sub, "_observer_")) == 0
# weight of linear
assert len(attrs_with_prefix(m.sub.fc, "_observer_")) == 1
FileCheck().check('prim::GetAttr[name="sub').check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check("prim::CallMethod").check_not(
'Observer = prim::GetAttr[name="_observer_'
).run(m.graph)
def test_insert_quant_dequant_linear_dynamic(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).float()
self.fc2 = torch.nn.Linear(5, 5).float()
def forward(self, x):
x = self.fc1(x)
return self.fc2(x)
for is_per_channel in [True, False]:
m = torch.jit.script(M())
qconfig = (
per_channel_dynamic_qconfig
if is_per_channel is True
else default_dynamic_qconfig
)
m = quantize_dynamic_jit(m, {"": qconfig}, debug=True)
assert len(m._modules._c.items()) == 2, (
"Expected to have two submodule of linear"
)
wt_quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
act_quant_func = "aten::quantize_per_tensor"
# quantizing activations
FileCheck().check("aten::_choose_qparams_per_tensor").check_next(
act_quant_func
).check_next("aten::dequantize").check(
"aten::_choose_qparams_per_tensor"
).check_next(act_quant_func).check_next("aten::dequantize").check(
wt_quant_func
).check_next("aten::dequantize").check_not(wt_quant_func).check(
"return"
).run(m.graph)
@override_qengines
def test_dynamic_multi_op(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = x + 5
return self.fc1(x)
x = torch.randn(5, 5)
for tracing in [True, False]:
model = self.checkGraphModeOp(
M(), x, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
# add op is not dynamically quantized.
FileCheck().check("aten::add").run(model.graph)
@override_qengines
def test_dynamic_quant_multi_uses(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
size1 = x.size()
size2 = x.size()
return self.fc(x), size1, size2
x = torch.randn(5, 5)
for tracing in [True, False]:
model = self.checkGraphModeOp(
M(), x, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
FileCheck().check_not("aten::_choose_qparams_per_tensor").run(model.graph)
@override_qengines
def test_dynamic_shared_weights(self):
class myMod(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.linear = nn.Linear(5, 5)
self.linear.weight = weight
def forward(self, x):
return self.linear(x)
class DynamicModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(5, 5))
self.mod1 = myMod(self.weight)
def forward(self, x):
y = self.mod1(x)
z = torch.nn.functional.linear(y, self.weight)
return z
model = torch.jit.script(DynamicModel()).eval()
data = torch.randn(5, 5, dtype=torch.float)
quant_ops = ["mod1", ""]
counts = [1, 2]
for op, count in zip(quant_ops, counts):
qconfig_dict = {op: default_dynamic_qconfig}
m1 = quantize_dynamic_jit(model, qconfig_dict)
out_graph = m1(data)
FileCheck().check_count(
"quantized::linear_dynamic(", count, exactly=True
).check_not("aten::_choose_qparams_per_tensor").run(m1.graph)
# Explicitly call forward on model before convert
m2 = prepare_dynamic_jit(model, qconfig_dict)
m2(data)
m2 = convert_dynamic_jit(m2, debug=False)
out_ref = m2(data)
self.assertEqual(out_graph, out_ref)
@override_qengines
def test_dynamic_with_if(self):
class Res(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(5, 5))
def forward(self, x: torch.Tensor, cond: bool) -> torch.Tensor:
if cond:
return torch.nn.functional.linear(x, self.weight)
else:
return torch.nn.functional.linear(x, self.weight)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.res1 = Res()
self.res2 = Res()
def forward(self, x):
x = self.res1(x, True)
x = self.res2(x, False)
return x
model = torch.jit.script(M()).eval()
data = torch.randn(5, 5, dtype=torch.float)
qconfig_dict = {"": default_dynamic_qconfig}
for tracing in [True, False]:
m1 = self.checkGraphModeOp(
M(), data, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
FileCheck().check_count(
"quantized::linear_dynamic(", 2, exactly=True
).check_not("aten::_choose_qparams_per_tensor").run(m1.graph)
# Check to make sure weight observers run correctly
ref_qparams = []
qconfig = script_qconfig(default_dynamic_qconfig)
wt_module = wrap_cpp_module(qconfig.weight)
for wt in [model.res1.weight, model.res2.weight]:
wt_module(wt)
qparams = wt_module.calculate_qparams()
ref_qparams.append((qparams[0].item(), qparams[1].item()))
m2 = quantize_dynamic_jit(model, qconfig_dict, debug=True)
graph_params = []
for x, obs in m2._modules._c.items():
if x == "res1":
graph_params.append(
(
obs.getattr("weight.2_scale_0"),
obs.getattr("weight.2_zero_point_0"),
)
)
elif x == "res2":
graph_params.append(
(
obs.getattr("weight.4_scale_0"),
obs.getattr("weight.4_zero_point_0"),
)
)
self.assertEqual(ref_qparams, graph_params)
def test_dynamic_weight_observer(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5).float()
self.fc2 = torch.nn.Linear(5, 5).float()
def forward(self, x):
x = self.fc(x)
return self.fc2(x)
qconfig_dict = {"": default_dynamic_qconfig}
eager_model = M().eval()
for tracing in [True, False]:
x = torch.rand(5, 5)
model = get_script_module(eager_model, tracing, x)
ref_qparams = []
for wt in [model.fc.weight, model.fc2.weight]:
wt_module = default_dynamic_qconfig.weight()
wt_module(wt)
qparams = wt_module.calculate_qparams()
ref_qparams.append((qparams[0].item(), qparams[1].item()))
model = quantize_dynamic_jit(model, qconfig_dict, debug=True)
graph_qparams = []
for x, obs in model._modules._c.items():
n = 2 if x == "fc" and tracing else 1
graph_qparams.append(
(
obs.getattr(f"weight.{n}_scale_0"),
obs.getattr(f"weight.{n}_zero_point_0"),
)
)
self.assertEqual(ref_qparams, graph_qparams)
def test_convert_dynamic_fp16(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
m = torch.jit.script(M())
m = quantize_dynamic_jit(m, {"": float16_dynamic_qconfig}, debug=True)
FileCheck().check("aten::_saturate_weight_to_fp16").check(
"aten::linear"
).check_not("aten::dequantize").check_not("aten::quantize").run(m.graph)
def test_quantize_dynamic_fp16(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
m = torch.jit.script(M())
m = quantize_dynamic_jit(m, {"": float16_dynamic_qconfig})
FileCheck().check("quantized::linear_dynamic_fp16").check_not(
"aten::linear"
).check_not("aten::dequantize").check_not("aten::quantize").run(m.graph)
| TestQuantizeDynamicJitPasses |
python | apache__airflow | devel-common/src/tests_common/test_utils/asserts.py | {
"start": 2951,
"end": 6163
} | class ____:
"""
Counts the number of queries sent to Airflow Database in a given context.
Does not support multiple processes. When a new process is started in context, its queries will
not be included.
"""
def __init__(
self,
*,
stacklevel: int = 1,
stacklevel_from_module: str | None = None,
session: Session | None = None,
):
self.result: Counter[str] = Counter()
self.stacklevel = stacklevel
self.stacklevel_from_module = stacklevel_from_module
self.session = session
def __enter__(self):
if self.session:
event.listen(self.session, "do_orm_execute", self.after_cursor_execute)
else:
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
return self.result
def __exit__(self, type_, value, tb):
if self.session:
event.remove(self.session, "do_orm_execute", self.after_cursor_execute)
else:
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
log.debug("Queries count: %d", sum(self.result.values()))
def after_cursor_execute(self, *args, **kwargs):
stack = QueriesTraceInfo.from_traceback(traceback.extract_stack())
if not self.stacklevel_from_module:
stacklevel = self.stacklevel
else:
stacklevel = stack.module_level(self.stacklevel_from_module)
stack_info = " > ".join(map(str, stack.traces[-stacklevel:]))
self.result[stack_info] += 1
count_queries = CountQueries
@contextmanager
def assert_queries_count(
expected_count: int,
message_fmt: str | None = None,
margin: int = 0,
stacklevel: int = 5,
stacklevel_from_module: str | None = None,
session: Session | None = None,
):
"""
Assert that the number of queries is as expected with the margin applied.
The margin is helpful in case of complex cases where we do not want to change it every time we
changed queries, but we want to catch cases where we spin out of control
:param expected_count: expected number of queries
:param message_fmt: message printed optionally if the number is exceeded
:param margin: margin to add to expected number of calls
:param stacklevel: limits the output stack trace to that numbers of frame
:param stacklevel_from_module: Filter stack trace from specific module.
"""
with count_queries(
stacklevel=stacklevel, stacklevel_from_module=stacklevel_from_module, session=session
) as result:
yield None
count = sum(result.values())
if count > expected_count + margin:
message_fmt = (
message_fmt
or "The expected number of db queries is {expected_count} with extra margin: {margin}. "
"The current number is {current_count}.\n\n"
"Recorded query locations:"
)
message = message_fmt.format(current_count=count, expected_count=expected_count, margin=margin)
for location, count in result.items():
message += f"\n\t{location}:\t{count}"
raise AssertionError(message)
| CountQueries |
python | mkdocs__mkdocs | mkdocs/config/defaults.py | {
"start": 833,
"end": 1202
} | class ____(_LogLevel):
levels: Mapping[str, int] = {
**_LogLevel.levels,
"relative_to_docs": _AbsoluteLinksValidationValue.RELATIVE_TO_DOCS,
}
# NOTE: The order here is important. During validation some config options
# depend on others. So, if config option A depends on B, then A should be
# listed higher in the schema.
| _AbsoluteLinksValidation |
python | aio-libs__aiohttp | aiohttp/tracing.py | {
"start": 7925,
"end": 8144
} | class ____:
"""Parameters sent by the `on_request_exception` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
exception: BaseException
@frozen_dataclass_decorator
| TraceRequestExceptionParams |
python | gevent__gevent | src/gevent/tests/test___config.py | {
"start": 130,
"end": 2461
} | class ____(unittest.TestCase):
old_resolver = None
def setUp(self):
if 'GEVENT_RESOLVER' in os.environ:
self.old_resolver = os.environ['GEVENT_RESOLVER']
del os.environ['GEVENT_RESOLVER']
def tearDown(self):
if self.old_resolver:
os.environ['GEVENT_RESOLVER'] = self.old_resolver
def test_key(self):
self.assertEqual(_config.Resolver.environment_key, 'GEVENT_RESOLVER')
def test_default(self):
from gevent.resolver.thread import Resolver
conf = _config.Resolver()
self.assertEqual(conf.get(), Resolver)
def test_env(self):
from gevent.resolver.blocking import Resolver
os.environ['GEVENT_RESOLVER'] = 'foo,bar,block,dnspython'
conf = _config.Resolver()
self.assertEqual(conf.get(), Resolver)
os.environ['GEVENT_RESOLVER'] = 'dnspython'
# The existing value is unchanged
self.assertEqual(conf.get(), Resolver)
# A new object reflects it
try:
from gevent.resolver.dnspython import Resolver as DResolver
except ImportError: # pragma: no cover
# dnspython is optional; skip it.
import warnings
warnings.warn('dnspython not installed')
else:
conf = _config.Resolver()
self.assertEqual(conf.get(), DResolver)
def test_set_str_long(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set('gevent.resolver.blocking.Resolver')
self.assertEqual(conf.get(), Resolver)
def test_set_str_short(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set('block')
self.assertEqual(conf.get(), Resolver)
def test_set_class(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set(Resolver)
self.assertEqual(conf.get(), Resolver)
def test_set_through_config(self):
from gevent.resolver.thread import Resolver as Default
from gevent.resolver.blocking import Resolver
conf = _config.Config()
self.assertEqual(conf.resolver, Default)
conf.resolver = 'block'
self.assertEqual(conf.resolver, Resolver)
| TestResolver |
python | getsentry__sentry | tests/sentry/notifications/notification_action/metric_alert_registry/test_discord_metric_alert_handler.py | {
"start": 1160,
"end": 8434
} | class ____(MetricAlertHandlerBase):
def setUp(self) -> None:
self.create_models()
self.action = self.create_action(
type=Action.Type.DISCORD,
integration_id=1234567890,
config={
"target_identifier": "channel123",
"target_type": ActionTarget.SPECIFIC,
},
)
self.handler = DiscordMetricAlertHandler()
@mock.patch("sentry.integrations.discord.actions.metric_alert.send_incident_alert_notification")
@freeze_time("2021-01-01 00:00:00")
def test_send_alert(self, mock_send_incident_alert_notification: mock.MagicMock) -> None:
notification_context = NotificationContext.from_action_model(self.action)
assert self.group_event.occurrence is not None
assert self.group_event.occurrence.priority is not None
alert_context = AlertContext.from_workflow_engine_models(
self.detector,
self.evidence_data,
self.group_event.group.status,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
metric_issue_context = MetricIssueContext.from_group_event(
self.group,
self.evidence_data,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
open_period_context = OpenPeriodContext.from_group(self.group)
notification_uuid = str(uuid.uuid4())
self.handler.send_alert(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
trigger_status=TriggerStatus.ACTIVE,
project=self.detector.project,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
)
mock_send_incident_alert_notification.assert_called_once_with(
organization=self.detector.project.organization,
alert_context=alert_context,
notification_context=notification_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
alert_rule_serialized_response=get_alert_rule_serializer(self.detector),
incident_serialized_response=get_detailed_incident_serializer(self.open_period),
detector_serialized_response=get_detector_serializer(self.detector),
notification_uuid=notification_uuid,
)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.DiscordMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry(self, mock_send_alert: mock.MagicMock) -> None:
self.handler.invoke_legacy_registry(self.event_data, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="channel123",
target_display=None,
sentry_app_config=None,
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.ABOVE,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[0]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CRITICAL,
metric_value=123.45,
group=self.group_event.group,
title=self.group_event.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group_event.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.DiscordMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry_with_activity(self, mock_send_alert: mock.MagicMock) -> None:
# Create an Activity instance with evidence data and priority
activity_data = asdict(self.evidence_data)
activity = Activity(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
data=activity_data,
)
activity.save()
# Create event data with Activity instead of GroupEvent
event_data_with_activity = WorkflowEventData(
event=activity,
workflow_env=self.workflow.environment,
group=self.group,
)
self.handler.invoke_legacy_registry(event_data_with_activity, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
# Verify that the same data is extracted from Activity.data as from GroupEvent.occurrence.evidence_data
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="channel123",
target_display=None,
sentry_app_config=None,
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.BELOW,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[2]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CLOSED,
metric_value=123.45,
group=self.group,
title=self.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
| TestDiscordMetricAlertHandler |
python | django__django | django/utils/functional.py | {
"start": 75,
"end": 1413
} | class ____:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
A cached property can be made out of an existing method:
(e.g. ``url = cached_property(get_absolute_url)``).
"""
name = None
@staticmethod
def func(instance):
raise TypeError(
"Cannot use cached_property instance without calling "
"__set_name__() on it."
)
def __init__(self, func):
self.real_func = func
self.__doc__ = getattr(func, "__doc__")
def __set_name__(self, owner, name):
if self.name is None:
self.name = name
self.func = self.real_func
elif name != self.name:
raise TypeError(
"Cannot assign the same cached_property to two different names "
"(%r and %r)." % (self.name, name)
)
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
| cached_property |
python | aimacode__aima-python | learning4e.py | {
"start": 24299,
"end": 29412
} | class ____:
def __init__(self, clf, decision_function='ovr'):
self.clf = clf
self.decision_function = decision_function
self.n_class, self.classifiers = 0, []
def fit(self, X, y):
"""
Trains n_class or n_class * (n_class - 1) / 2 classifiers
according to the training method, ovr or ovo respectively.
:param X: array of size [n_samples, n_features] holding the training samples
:param y: array of size [n_samples] holding the class labels
:return: array of classifiers
"""
labels = np.unique(y)
self.n_class = len(labels)
if self.decision_function == 'ovr': # one-vs-rest method
for label in labels:
y1 = np.array(y)
y1[y1 != label] = -1.0
y1[y1 == label] = 1.0
self.clf.fit(X, y1)
self.classifiers.append(copy.deepcopy(self.clf))
elif self.decision_function == 'ovo': # use one-vs-one method
n_labels = len(labels)
for i in range(n_labels):
for j in range(i + 1, n_labels):
neg_id, pos_id = y == labels[i], y == labels[j]
X1, y1 = np.r_[X[neg_id], X[pos_id]], np.r_[y[neg_id], y[pos_id]]
y1[y1 == labels[i]] = -1.0
y1[y1 == labels[j]] = 1.0
self.clf.fit(X1, y1)
self.classifiers.append(copy.deepcopy(self.clf))
else:
return ValueError("Decision function must be either 'ovr' or 'ovo'.")
return self
def predict(self, X):
"""
Predicts the class of a given example according to the training method.
"""
n_samples = len(X)
if self.decision_function == 'ovr': # one-vs-rest method
assert len(self.classifiers) == self.n_class
score = np.zeros((n_samples, self.n_class))
for i in range(self.n_class):
clf = self.classifiers[i]
score[:, i] = clf.predict_score(X)
return np.argmax(score, axis=1)
elif self.decision_function == 'ovo': # use one-vs-one method
assert len(self.classifiers) == self.n_class * (self.n_class - 1) / 2
vote = np.zeros((n_samples, self.n_class))
clf_id = 0
for i in range(self.n_class):
for j in range(i + 1, self.n_class):
res = self.classifiers[clf_id].predict(X)
vote[res < 0, i] += 1.0 # negative sample: class i
vote[res > 0, j] += 1.0 # positive sample: class j
clf_id += 1
return np.argmax(vote, axis=1)
else:
return ValueError("Decision function must be either 'ovr' or 'ovo'.")
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
"""
[Section 18.6.3]
Linear classifier with hard threshold.
"""
idx_i = dataset.inputs
idx_t = dataset.target
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# initialize random weights
num_weights = len(idx_i) + 1
w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)
for epoch in range(epochs):
err = []
# pass over all examples
for example in examples:
x = [1] + example
y = np.dot(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] + learning_rate * (np.dot(err, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return np.dot(w, x)
return predict
def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100):
"""
[Section 18.6.4]
Linear classifier with logistic regression.
"""
idx_i = dataset.inputs
idx_t = dataset.target
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# initialize random weights
num_weights = len(idx_i) + 1
w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)
for epoch in range(epochs):
err = []
h = []
# pass over all examples
for example in examples:
x = [1] + example
y = Sigmoid()(np.dot(w, x))
h.append(Sigmoid().derivative(y))
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
buffer = [x * y for x, y in zip(err, h)]
w[i] = w[i] + learning_rate * (np.dot(buffer, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return Sigmoid()(np.dot(w, x))
return predict
| MultiClassLearner |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 18278,
"end": 19073
} | class ____(utils.OrderedBase):
"""A complex parameter, to be used in a Factory.Params section.
Must implement:
- A "compute" function, performing the actual declaration override
- Optionally, a get_revdeps() function (to compute other parameters it may alter)
"""
def as_declarations(self, field_name, declarations):
"""Compute the overrides for this parameter.
Args:
- field_name (str): the field this parameter is installed at
- declarations (dict): the global factory declarations
Returns:
dict: the declarations to override
"""
raise NotImplementedError()
def get_revdeps(self, parameters):
"""Retrieve the list of other parameters modified by this one."""
return []
| Parameter |
python | django__django | tests/model_forms/tests.py | {
"start": 108545,
"end": 116075
} | class ____(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be
# used on the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'<link href="/some/form/css" media="all" rel="stylesheet">'
'<script src="/some/form/javascript"></script>',
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields["status"].clean("42")
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields["status"].clean("z")
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Color.objects.create(name="blue")
red = Color.objects.create(name="red")
multicolor_item = ColorfulItem.objects.create()
multicolor_item.colors.add(blue, red)
red_item = ColorfulItem.objects.create()
red_item.colors.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ", ".join(c.name for c in obj.colors.all())
field = ColorModelChoiceField(ColorfulItem.objects.prefetch_related("colors"))
# CPython < 3.14 calls ModelChoiceField.__len__() when coercing to
# tuple. PyPy and Python 3.14+ don't call __len__() and so .count()
# isn't called on the QuerySet. The following would trigger an extra
# query if prefetch were ignored.
with self.assertNumQueries(2 if PYPY or PY314 else 3):
self.assertEqual(
tuple(field.choices),
(
("", "---------"),
(multicolor_item.pk, "blue, red"),
(red_item.pk, "red"),
),
)
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name="Apple")
pear = Inventory.objects.create(barcode=22, name="Pear")
core = Inventory.objects.create(barcode=87, name="Core", parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name="barcode")
self.assertEqual(
tuple(field.choices),
(("", "---------"), (86, "Apple"), (87, "Core"), (22, "Pear")),
)
form = InventoryForm(instance=core)
self.assertHTMLEqual(
str(form["parent"]),
"""<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>""",
)
data = model_to_dict(core)
data["parent"] = "22"
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, "Pear")
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ["description", "url"]
self.assertEqual(list(CategoryForm.base_fields), ["description", "url"])
self.assertHTMLEqual(
str(CategoryForm()),
'<div><label for="id_description">Description:</label><input type="text" '
'name="description" required id="id_description"></div><div>'
'<label for="id_url">The URL:</label><input type="text" name="url" '
'maxlength="40" required id="id_url"></div>',
)
# to_field_name should also work on ModelMultipleChoiceField.
field = forms.ModelMultipleChoiceField(
Inventory.objects.all(), to_field_name="barcode"
)
self.assertEqual(
tuple(field.choices), ((86, "Apple"), (87, "Core"), (22, "Pear"))
)
self.assertSequenceEqual(field.clean([86]), [apple])
form = SelectInventoryForm({"items": [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertSequenceEqual(form.cleaned_data["items"], [core, pear])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ["name"])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'<div><label for="id_name">Name:</label><input type="text" '
'name="name" maxlength="10" required id="id_name"></div>',
)
def test_iterable_model_m2m(self):
class ColorfulItemForm(forms.ModelForm):
class Meta:
model = ColorfulItem
fields = "__all__"
color = Color.objects.create(name="Blue")
form = ColorfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""
<p>
<label for="id_name">Name:</label>
<input id="id_name" type="text" name="name" maxlength="50" required></p>
<p><label for="id_colors">Colors:</label>
<select multiple name="colors" id="id_colors" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>
"""
% {"blue_pk": color.pk},
)
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ("title", "date_published", "mode", "category")
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required>
</p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}"
required>
<input id="initial-id_date_published" name="initial-date_published"
type="hidden" value="{0}">
</p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di">
</p>
<p>
<label for="id_category">Category:</label>
<select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden"
value="3">
""".format(
today_str
),
)
empty_data = {
"title": "",
"date_published": today_str,
"initial-date_published": today_str,
"mode": "di",
"initial-mode": "di",
"category": "3",
"initial-category": "3",
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
| OtherModelFormTests |
python | ray-project__ray | release/cluster_tests/workloads/tune_scale_up_down.py | {
"start": 1482,
"end": 2412
} | class ____(tune.Callback):
def __init__(self):
self.node_counts = []
def on_step_begin(self, iteration, trials, **info):
node_count = len([n for n in ray.nodes() if n["Alive"]])
self.node_counts.append(node_count)
def main():
ray.init()
head_node_ip = ray.util.get_node_ip_address()
assert (
len([n for n in ray.nodes() if n["Alive"]]) == 1
), "Too many nodes available at start of script"
node_counter = NodeCountCallback()
tune.run(
train_fn,
num_samples=3,
config={"head_node_ip": head_node_ip},
callbacks=[node_counter],
resources_per_trial={"cpu": 4},
)
node_counts = Counter(node_counter.node_counts)
assert node_counts[3] > 0, "Cluster never scaled to 3 nodes"
assert node_counter.node_counts[-1] == 1, "Cluster didn't scale down to 1 node."
if __name__ == "__main__":
main()
| NodeCountCallback |
python | python__mypy | mypy/build.py | {
"start": 72405,
"end": 72512
} | class ____(Exception):
"""Control flow exception to signal that a module was not found."""
| ModuleNotFound |
python | ansible__ansible | lib/ansible/_internal/_wrapt.py | {
"start": 3921,
"end": 13913
} | class ____(with_metaclass(_ObjectProxyMetaType)):
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, '__qualname__', wrapped.__qualname__)
except AttributeError:
pass
# Python 3.10 onwards also does not allow itself to be overridden
# using a property and it must instead be set explicitly.
try:
object.__setattr__(self, '__annotations__', wrapped.__annotations__)
except AttributeError:
pass
def __self_setattr__(self, name, value):
object.__setattr__(self, name, value)
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if not PY2:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return '<{} at 0x{:x} for {} at 0x{:x}>'.format(
type(self).__name__, id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__))
def __format__(self, format_spec):
return format(self.__wrapped__, format_spec)
def __reversed__(self):
return reversed(self.__wrapped__)
if not PY2:
def __round__(self, ndigits=None):
return round(self.__wrapped__, ndigits)
if sys.hexversion >= 0x03070000:
def __mro_entries__(self, bases):
return (self.__wrapped__,)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith('_self_'):
object.__setattr__(self, name, value)
elif name == '__wrapped__':
object.__setattr__(self, name, value)
try:
object.__delattr__(self, '__qualname__')
except AttributeError:
pass
try:
object.__setattr__(self, '__qualname__', value.__qualname__)
except AttributeError:
pass
try:
object.__delattr__(self, '__annotations__')
except AttributeError:
pass
try:
object.__setattr__(self, '__annotations__', value.__annotations__)
except AttributeError:
pass
elif name == '__qualname__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif name == '__annotations__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith('_self_'):
object.__delattr__(self, name)
elif name == '__wrapped__':
raise TypeError('__wrapped__ must be an object')
elif name == '__qualname__':
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __complex__(self):
return complex(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
def __copy__(self):
raise NotImplementedError('object proxy must define __copy__()')
def __deepcopy__(self, memo):
raise NotImplementedError('object proxy must define __deepcopy__()')
def __reduce__(self):
raise NotImplementedError(
'object proxy must define __reduce__()')
def __reduce_ex__(self, protocol):
raise NotImplementedError(
'object proxy must define __reduce_ex__()')
| ObjectProxy |
python | apache__airflow | airflow-core/src/airflow/models/asset.py | {
"start": 21482,
"end": 23344
} | class ____(Base):
"""References from a task to an asset that it updates / produces."""
asset_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False)
dag_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
task_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at: Mapped[datetime] = mapped_column(
UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False
)
asset = relationship("AssetModel", back_populates="producing_tasks")
__tablename__ = "task_outlet_asset_reference"
__table_args__ = (
ForeignKeyConstraint(
(asset_id,),
["asset.id"],
name="toar_asset_fkey",
ondelete="CASCADE",
),
PrimaryKeyConstraint(asset_id, dag_id, task_id, name="toar_pkey"),
ForeignKeyConstraint(
columns=(dag_id,),
refcolumns=["dag.dag_id"],
name="toar_dag_id_fkey",
ondelete="CASCADE",
),
Index("idx_task_outlet_asset_reference_dag_id", dag_id),
)
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return (
self.asset_id == other.asset_id
and self.dag_id == other.dag_id
and self.task_id == other.task_id
)
return NotImplemented
def __hash__(self):
return hash(self.__mapper__.primary_key)
def __repr__(self):
args = []
for attr in [x.name for x in self.__mapper__.primary_key]:
args.append(f"{attr}={getattr(self, attr)!r}")
return f"{self.__class__.__name__}({', '.join(args)})"
| TaskOutletAssetReference |
python | gevent__gevent | src/gevent/monkey/_errors.py | {
"start": 425,
"end": 547
} | class ____(RuntimeWarning):
"""
The type of warnings we issue.
.. versionadded:: 1.3a2
"""
| MonkeyPatchWarning |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 43833,
"end": 46479
} | class ____(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
| CreateOpTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor30.py | {
"start": 370,
"end": 426
} | class ____(ABase): ...
TA = TypeVar("TA", bound=ABase)
| A |
python | html5lib__html5lib-python | html5lib/html5parser.py | {
"start": 99753,
"end": 101328
} | class ____(Phase):
__slots__ = tuple()
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
startTagHandler = _utils.MethodDispatcher([
("html", startTagHtml)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([("html", endTagHtml)])
endTagHandler.default = endTagOther
| AfterBodyPhase |
python | sympy__sympy | sympy/matrices/expressions/matexpr.py | {
"start": 1200,
"end": 18717
} | class ____(Expr):
"""Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol, MatAdd, MatMul, Transpose, Inverse
"""
__slots__: tuple[str, ...] = ()
# Should not be considered iterable by the
# sympy.utilities.iterables.iterable function. Subclass that actually are
# iterable (i.e., explicit matrices) should set this to True.
_iterable = False
_op_priority = 11.0
is_Matrix: bool = True
is_MatrixExpr: bool = True
is_Identity: FuzzyBool = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
is_number = False
is_symbol = False
is_scalar = False
kind: MatrixKind = MatrixKind()
def __new__(cls, *args, **kwargs):
args = map(_sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
@property
def shape(self) -> tuple[Expr | int, Expr | int]:
raise NotImplementedError
@property
def _add_handler(self):
return MatAdd
@property
def _mul_handler(self):
return MatMul
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __matmul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmatmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return MatPow(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self) -> bool | None:
rows, cols = self.shape
if isinstance(rows, Integer) and isinstance(cols, Integer):
return rows == cols
if rows == cols:
return True
return None
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(Transpose(self))
def as_real_imag(self, deep=True, **hints):
return self._eval_as_real_imag()
def _eval_as_real_imag(self):
real = S.Half * (self + self._eval_conjugate())
im = (self - self._eval_conjugate())/(2*S.ImaginaryUnit)
return (real, im)
def _eval_inverse(self):
return Inverse(self)
def _eval_determinant(self):
return Determinant(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_trace(self):
return None
def _eval_power(self, exp):
"""
Override this in sub-classes to implement simplification of powers. The cases where the exponent
is -1, 0, 1 are already covered in MatPow.doit(), so implementations can exclude these cases.
"""
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
from sympy.simplify import simplify
return self.func(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _eval_derivative_n_times(self, x, n):
return Basic._eval_derivative_n_times(self, x, n)
def _eval_derivative(self, x):
# `x` is a scalar:
if self.has(x) or (isinstance(x, MatrixElement) and self.has(x.parent)):
# See if there are other methods using it:
return super()._eval_derivative(x)
else:
return ZeroMatrix(*self.shape)
@classmethod
def _check_dim(cls, dim):
"""Helper function to check invalid matrix dimensions"""
ok = not dim.is_Float and check_assumptions(
dim, integer=True, nonnegative=True)
if ok is False:
raise ValueError(
"The dimension specification {} should be "
"a nonnegative integer.".format(dim))
def _entry(self, i, j, **kwargs):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product."""
return S.One, self
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
@property
def T(self):
'''Matrix transposition'''
return self.transpose()
def inverse(self):
if self.is_square is False:
raise NonSquareMatrixError('Inverse of non-square matrix')
return self._eval_inverse()
def inv(self):
return self.inverse()
def det(self):
from sympy.matrices.expressions.determinant import det
return det(self)
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(self.rows is None or
(i >= -self.rows) != False and (i < self.rows) != False) and
(j >= -self.cols) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = _sympify(i), _sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
elif isinstance(key, (SYMPY_INTS, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
# allow single indexing if number of columns is known
if not isinstance(cols, Integer):
raise IndexError(filldedent('''
Single indexing is only supported when the number
of columns is known.'''))
key = _sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid index %s" % key)
elif isinstance(key, (Symbol, Expr)):
raise IndexError(filldedent('''
Only integers may be used when addressing the matrix
with a single index.'''))
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def _is_shape_symbolic(self) -> bool:
return (not isinstance(self.rows, (SYMPY_INTS, Integer))
or not isinstance(self.cols, (SYMPY_INTS, Integer)))
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableDenseMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
if self._is_shape_symbolic():
raise ValueError(
'Matrix with symbolic shape '
'cannot be represented explicitly.')
from sympy.matrices.immutable import ImmutableDenseMatrix
return ImmutableDenseMatrix([[self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableDenseMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self, dtype=object, copy=None):
if copy is not None and not copy:
raise TypeError("Cannot implement copy=False when converting Matrix to ndarray")
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return S.One, MatMul(self)
@staticmethod
def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r"""
Parse expression of matrices with explicitly summed indices into a
matrix expression without indices, if possible.
This transformation expressed in mathematical notation:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Optional parameter ``first_index``: specify which free index to use as
the index starting the expression.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum
>>> from sympy.abc import i, j, k, l, N
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A.T*B
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
Trace(A)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> MatrixExpr.from_index_summation(expr)
A*B.T*A.T
"""
from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
first_indices = []
if first_index is not None:
first_indices.append(first_index)
if last_index is not None:
first_indices.append(last_index)
arr = convert_indexed_to_array(expr, first_indices=first_indices)
return convert_array_to_matrix(arr)
def applyfunc(self, func):
from .applyfunc import ElementwiseApplyFunction
return ElementwiseApplyFunction(func, self)
@dispatch(MatrixExpr, Expr)
def _eval_is_eq(lhs, rhs): # noqa:F811
return False
@dispatch(MatrixExpr, MatrixExpr) # type: ignore
def _eval_is_eq(lhs, rhs): # noqa:F811
if lhs.shape != rhs.shape:
return False
if (lhs - rhs).is_ZeroMatrix:
return True
def get_postprocessor(cls):
def _postprocessor(expr):
# To avoid circular imports, we can't have MatMul/MatAdd on the top level
mat_class = {Mul: MatMul, Add: MatAdd}[cls]
nonmatrices = []
matrices = []
for term in expr.args:
if isinstance(term, MatrixExpr):
matrices.append(term)
else:
nonmatrices.append(term)
if not matrices:
return cls._from_args(nonmatrices)
if nonmatrices:
if cls == Mul:
for i in range(len(matrices)):
if not matrices[i].is_MatrixExpr:
# If one of the matrices explicit, absorb the scalar into it
# (doit will combine all explicit matrices into one, so it
# doesn't matter which)
matrices[i] = matrices[i].__mul__(cls._from_args(nonmatrices))
nonmatrices = []
break
else:
# Maintain the ability to create Add(scalar, matrix) without
# raising an exception. That way different algorithms can
# replace matrix expressions with non-commutative symbols to
# manipulate them like non-commutative scalars.
return cls._from_args(nonmatrices + [mat_class(*matrices).doit(deep=False)])
if mat_class == MatAdd:
return mat_class(*matrices).doit(deep=False)
return mat_class(cls._from_args(nonmatrices), *matrices).doit(deep=False)
return _postprocessor
Basic._constructor_postprocessor_mapping[MatrixExpr] = {
"Mul": [get_postprocessor(Mul)],
"Add": [get_postprocessor(Add)],
}
def _matrix_derivative(expr, x, old_algorithm=False):
if isinstance(expr, MatrixBase) or isinstance(x, MatrixBase):
# Do not use array expressions for explicit matrices:
old_algorithm = True
if old_algorithm:
return _matrix_derivative_old_algorithm(expr, x)
from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array
from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
array_expr = convert_matrix_to_array(expr)
diff_array_expr = array_derive(array_expr, x)
diff_matrix_expr = convert_array_to_matrix(diff_array_expr)
return diff_matrix_expr
def _matrix_derivative_old_algorithm(expr, x):
from sympy.tensor.array.array_derivatives import ArrayDerivative
lines = expr._eval_derivative_matrix_lines(x)
parts = [i.build() for i in lines]
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
parts = [[convert_array_to_matrix(j) for j in i] for i in parts]
def _get_shape(elem):
if isinstance(elem, MatrixExpr):
return elem.shape
return 1, 1
def get_rank(parts):
return sum(j not in (1, None) for i in parts for j in _get_shape(i))
ranks = [get_rank(i) for i in parts]
rank = ranks[0]
def contract_one_dims(parts):
if len(parts) == 1:
return parts[0]
else:
p1, p2 = parts[:2]
if p2.is_Matrix:
p2 = p2.T
if p1 == Identity(1):
pbase = p2
elif p2 == Identity(1):
pbase = p1
else:
pbase = p1*p2
if len(parts) == 2:
return pbase
else: # len(parts) > 2
if pbase.is_Matrix:
raise ValueError("")
return pbase*Mul.fromiter(parts[2:])
if rank <= 2:
return Add.fromiter([contract_one_dims(i) for i in parts])
return ArrayDerivative(expr, x)
| MatrixExpr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 4454,
"end": 4531
} | class ____(ExpressionElementRole[_T]):
__slots__ = ()
| LabeledColumnExprRole |
python | getsentry__sentry | tests/sentry/taskworker/test_worker.py | {
"start": 4405,
"end": 24535
} | class ____(TestCase):
def test_tasks_exist(self) -> None:
import sentry.taskworker.tasks.examples as example_tasks
assert example_tasks.simple_task
assert example_tasks.retry_task
assert example_tasks.at_most_once_task
def test_fetch_task(self) -> None:
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=100,
process_type="fork",
)
with mock.patch.object(taskworker.client, "get_task") as mock_get:
mock_get.return_value = SIMPLE_TASK
task = taskworker.fetch_task()
mock_get.assert_called_once()
assert task
assert task.activation.id == SIMPLE_TASK.activation.id
def test_fetch_no_task(self) -> None:
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=100,
process_type="fork",
)
with mock.patch.object(taskworker.client, "get_task") as mock_get:
mock_get.return_value = None
task = taskworker.fetch_task()
mock_get.assert_called_once()
assert task is None
def test_run_once_no_next_task(self) -> None:
max_runtime = 5
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=1,
process_type="fork",
)
with mock.patch.object(taskworker, "client") as mock_client:
mock_client.get_task.return_value = SIMPLE_TASK
# No next_task returned
mock_client.update_task.return_value = None
taskworker.start_result_thread()
taskworker.start_spawn_children_thread()
start = time.time()
while True:
taskworker.run_once()
if mock_client.update_task.called:
break
if time.time() - start > max_runtime:
taskworker.shutdown()
raise AssertionError("Timeout waiting for update_task to be called")
taskworker.shutdown()
assert mock_client.get_task.called
assert mock_client.update_task.call_count == 1
assert mock_client.update_task.call_args.args[0].host == "localhost:50051"
assert mock_client.update_task.call_args.args[0].task_id == SIMPLE_TASK.activation.id
assert (
mock_client.update_task.call_args.args[0].status == TASK_ACTIVATION_STATUS_COMPLETE
)
assert mock_client.update_task.call_args.args[1] is None
def test_run_once_with_next_task(self) -> None:
# Cover the scenario where update_task returns the next task which should
# be processed.
max_runtime = 5
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=1,
process_type="fork",
)
with mock.patch.object(taskworker, "client") as mock_client:
def update_task_response(*args, **kwargs):
if mock_client.update_task.call_count >= 1:
return None
return SIMPLE_TASK
mock_client.update_task.side_effect = update_task_response
mock_client.get_task.return_value = SIMPLE_TASK
taskworker.start_result_thread()
taskworker.start_spawn_children_thread()
# Run until two tasks have been processed
start = time.time()
while True:
taskworker.run_once()
if mock_client.update_task.call_count >= 2:
break
if time.time() - start > max_runtime:
taskworker.shutdown()
raise AssertionError("Timeout waiting for get_task to be called")
taskworker.shutdown()
assert mock_client.get_task.called
assert mock_client.update_task.call_count == 2
assert mock_client.update_task.call_args.args[0].host == "localhost:50051"
assert mock_client.update_task.call_args.args[0].task_id == SIMPLE_TASK.activation.id
assert (
mock_client.update_task.call_args.args[0].status == TASK_ACTIVATION_STATUS_COMPLETE
)
assert mock_client.update_task.call_args.args[1] is None
@override_options({"taskworker.fetch_next.disabled_pools": ["testing"]})
def test_run_once_with_fetch_next_disabled(self) -> None:
# Cover the scenario where taskworker.fetch_next.disabled_pools is defined
max_runtime = 5
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=1,
process_type="fork",
processing_pool_name="testing",
)
with mock.patch.object(taskworker, "client") as mock_client:
mock_client.update_task.return_value = None
mock_client.get_task.return_value = SIMPLE_TASK
taskworker.start_result_thread()
taskworker.start_spawn_children_thread()
# Run until two tasks have been processed
start = time.time()
while True:
taskworker.run_once()
if mock_client.update_task.call_count >= 2:
break
if time.time() - start > max_runtime:
taskworker.shutdown()
raise AssertionError("Timeout waiting for update_task to be called")
taskworker.shutdown()
assert mock_client.get_task.called
assert mock_client.update_task.call_count == 2
assert mock_client.update_task.call_args.args[0].host == "localhost:50051"
assert mock_client.update_task.call_args.args[0].task_id == SIMPLE_TASK.activation.id
assert (
mock_client.update_task.call_args.args[0].status == TASK_ACTIVATION_STATUS_COMPLETE
)
assert mock_client.update_task.call_args.args[1] is None
def test_run_once_with_update_failure(self) -> None:
# Cover the scenario where update_task fails a few times in a row
# We should retain the result until RPC succeeds.
max_runtime = 5
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=1,
process_type="fork",
)
with mock.patch.object(taskworker, "client") as mock_client:
def update_task_response(*args, **kwargs):
if mock_client.update_task.call_count <= 2:
# Use setattr() because internally grpc uses _InactiveRpcError
# but it isn't exported.
err = grpc.RpcError("update task failed")
setattr(err, "code", lambda: grpc.StatusCode.UNAVAILABLE)
raise err
return None
def get_task_response(*args, **kwargs):
# Only one task that fails to update
if mock_client.get_task.call_count == 1:
return SIMPLE_TASK
return None
mock_client.update_task.side_effect = update_task_response
mock_client.get_task.side_effect = get_task_response
taskworker.start_result_thread()
taskworker.start_spawn_children_thread()
# Run until the update has 'completed'
start = time.time()
while True:
taskworker.run_once()
if mock_client.update_task.call_count >= 3:
break
if time.time() - start > max_runtime:
taskworker.shutdown()
raise AssertionError("Timeout waiting for get_task to be called")
taskworker.shutdown()
assert mock_client.get_task.called
assert mock_client.update_task.call_count == 3
def test_run_once_current_task_state(self) -> None:
# Run a task that uses retry_task() helper
# to raise and catch a NoRetriesRemainingError
max_runtime = 5
taskworker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=["127.0.0.1:50051"],
max_child_task_count=1,
process_type="fork",
)
with mock.patch.object(taskworker, "client") as mock_client:
def update_task_response(*args, **kwargs):
return None
mock_client.update_task.side_effect = update_task_response
mock_client.get_task.return_value = RETRY_STATE_TASK
taskworker.start_result_thread()
taskworker.start_spawn_children_thread()
# Run until two tasks have been processed
start = time.time()
while True:
taskworker.run_once()
if mock_client.update_task.call_count >= 1:
break
if time.time() - start > max_runtime:
taskworker.shutdown()
raise AssertionError("Timeout waiting for update_task to be called")
taskworker.shutdown()
assert mock_client.get_task.called
assert mock_client.update_task.call_count == 1
# status is complete, as retry_state task handles the NoRetriesRemainingError
assert mock_client.update_task.call_args.args[0].host == "localhost:50051"
assert (
mock_client.update_task.call_args.args[0].task_id == RETRY_STATE_TASK.activation.id
)
assert (
mock_client.update_task.call_args.args[0].status == TASK_ACTIVATION_STATUS_COMPLETE
)
redis = redis_clusters.get("default")
assert current_task() is None, "should clear current task on completion"
assert redis.get("no-retries-remaining"), "key should exist if except block was hit"
redis.delete("no-retries-remaining")
@pytest.mark.django_db
@mock.patch("sentry.taskworker.workerchild.capture_checkin")
def test_child_process_complete(mock_capture_checkin: mock.MagicMock) -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(SIMPLE_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == SIMPLE_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
assert mock_capture_checkin.call_count == 0
@pytest.mark.django_db
def test_child_process_remove_start_time_kwargs() -> None:
activation = InflightTaskActivation(
host="localhost:50051",
receive_timestamp=0,
activation=TaskActivation(
id="6789",
taskname="examples.will_retry",
namespace="examples",
parameters='{"args": ["stuff"], "kwargs": {"__start_time": 123}}',
processing_deadline_duration=100000,
),
)
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(activation)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == activation.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
@pytest.mark.django_db
def test_child_process_retry_task() -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(RETRY_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == RETRY_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_RETRY
@mock.patch("sentry.taskworker.workerchild.sentry_sdk.capture_exception")
@pytest.mark.django_db
def test_child_process_retry_task_max_attempts(mock_capture: mock.Mock) -> None:
# Create an activation that is on its final attempt and
# will raise an error again.
activation = InflightTaskActivation(
host="localhost:50051",
receive_timestamp=0,
activation=TaskActivation(
id="6789",
taskname="examples.will_retry",
namespace="examples",
parameters='{"args": ["raise"], "kwargs": {}}',
processing_deadline_duration=100000,
retry_state=RetryState(
attempts=2,
max_attempts=3,
),
),
)
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(activation)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == activation.activation.id
assert result.status == TASK_ACTIVATION_STATUS_FAILURE
assert mock_capture.call_count == 1
capture_call = mock_capture.call_args[0]
# Error type and chained error should be captured.
assert isinstance(capture_call[0], NoRetriesRemainingError)
assert isinstance(capture_call[0].__cause__, RuntimeError)
@pytest.mark.django_db
def test_child_process_failure_task() -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(FAIL_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == FAIL_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_FAILURE
@pytest.mark.django_db
def test_child_process_shutdown() -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
shutdown.set()
todo.put(SIMPLE_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
# When shutdown has been set, the child should not process more tasks.
assert todo.qsize() == 1
assert processed.qsize() == 0
@pytest.mark.django_db
def test_child_process_unknown_task() -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(UNDEFINED_TASK)
todo.put(SIMPLE_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
result = processed.get()
assert result.task_id == UNDEFINED_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_FAILURE
result = processed.get()
assert result.task_id == SIMPLE_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
@pytest.mark.django_db
def test_child_process_at_most_once() -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(AT_MOST_ONCE_TASK)
todo.put(AT_MOST_ONCE_TASK)
todo.put(SIMPLE_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=2,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get(block=False)
assert result.task_id == AT_MOST_ONCE_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
result = processed.get(block=False)
assert result.task_id == SIMPLE_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
@pytest.mark.django_db
@mock.patch("sentry.taskworker.workerchild.capture_checkin")
def test_child_process_record_checkin(mock_capture_checkin: mock.Mock) -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(SCHEDULED_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == SIMPLE_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
assert mock_capture_checkin.call_count == 1
mock_capture_checkin.assert_called_with(
monitor_slug="simple-task",
check_in_id="abc123",
duration=mock.ANY,
status=MonitorStatus.OK,
)
@pytest.mark.django_db
@mock.patch("sentry.taskworker.workerchild.sentry_sdk.capture_exception")
def test_child_process_terminate_task(mock_capture: mock.Mock) -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
sleepy = InflightTaskActivation(
host="localhost:50051",
receive_timestamp=0,
activation=TaskActivation(
id="111",
taskname="examples.timed",
namespace="examples",
parameters='{"args": [3], "kwargs": {}}',
processing_deadline_duration=1,
),
)
todo.put(sleepy)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get(block=False)
assert result.task_id == sleepy.activation.id
assert result.status == TASK_ACTIVATION_STATUS_FAILURE
assert mock_capture.call_count == 1
assert type(mock_capture.call_args.args[0]) is ProcessingDeadlineExceeded
@pytest.mark.django_db
@mock.patch("sentry.taskworker.workerchild.capture_checkin")
def test_child_process_decompression(mock_capture_checkin: mock.MagicMock) -> None:
todo: queue.Queue[InflightTaskActivation] = queue.Queue()
processed: queue.Queue[ProcessingResult] = queue.Queue()
shutdown = Event()
todo.put(COMPRESSED_TASK)
child_process(
"sentry.taskworker.runtime:app",
todo,
processed,
shutdown,
max_task_count=1,
processing_pool_name="test",
process_type="fork",
)
assert todo.empty()
result = processed.get()
assert result.task_id == COMPRESSED_TASK.activation.id
assert result.status == TASK_ACTIVATION_STATUS_COMPLETE
assert mock_capture_checkin.call_count == 0
| TestTaskWorker |
python | patrick-kidger__equinox | equinox/_jit.py | {
"start": 6206,
"end": 6442
} | class ____(logging.Filterer):
def filter(self, record: logging.LogRecord):
return not (
record.name == "jax._src.callback"
and record.getMessage() == "jax.pure_callback failed"
)
| _FilterCallback |
python | run-llama__llama_index | llama-index-core/tests/memory/blocks/test_vector.py | {
"start": 577,
"end": 2417
} | class ____(BasePydanticVectorStore):
"""Mock vector store for testing."""
stores_text: bool = True
is_embedding_query: bool = True
def __init__(self):
super().__init__()
self._nodes = {}
@property
def client(self) -> Any:
return self
@property
def nodes(self) -> Dict[str, BaseNode]:
return self._nodes
def add(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]:
"""Add nodes to vector store."""
ids = []
for node in nodes:
self._nodes[node.id_] = node
ids.append(node.id_)
return ids
async def async_add(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]:
"""Async add nodes to vector store."""
return self.add(nodes, **kwargs)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes with ref_doc_id."""
for node_id in list(self._nodes.keys()):
if self._nodes[node_id].ref_doc_id == ref_doc_id:
del self._nodes[node_id]
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
# For simplicity, return all nodes
nodes = list(self._nodes.values())
if query.similarity_top_k and len(nodes) > query.similarity_top_k:
nodes = nodes[: query.similarity_top_k]
# Simulate similarity scores
similarities = [0.9 - 0.1 * i for i in range(len(nodes))]
ids = [node.id_ for node in nodes]
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""Async query vector store."""
return self.query(query, **kwargs)
| MockVectorStore |
python | PyCQA__pylint | tests/regrtest_data/max_inferable_limit_for_classes/nodes/roles.py | {
"start": 577,
"end": 635
} | class ____(ExpressionElementRole):
...
| BinaryElementRole |
python | kamyu104__LeetCode-Solutions | Python/unique-word-abbreviation.py | {
"start": 151,
"end": 851
} | class ____(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
"""
self.lookup_ = collections.defaultdict(set)
for word in dictionary:
abbr = self.abbreviation(word)
self.lookup_[abbr].add(word)
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
abbr = self.abbreviation(word)
return self.lookup_[abbr] <= {word}
def abbreviation(self, word):
if len(word) <= 2:
return word
return word[0] + str(len(word)-2) + word[-1]
| ValidWordAbbr |
python | pytorch__pytorch | test/jit/test_freezing.py | {
"start": 66969,
"end": 118214
} | class ____(JitTestCase):
def setUp(self):
super().setUp()
self.default_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.double)
def tearDown(self):
torch.set_default_dtype(self.default_dtype)
super().tearDown()
def test_conv_bn_folding(self):
conv_bias = [True, False]
module_pairs = [
(nn.Conv1d, nn.BatchNorm1d),
(nn.Conv2d, nn.BatchNorm2d),
(nn.Conv3d, nn.BatchNorm3d),
]
use_tracing = [True, False]
bn_running_stats = [True, False]
for use_bias, modules, tracing, track_stats in product(
conv_bias, module_pairs, use_tracing, bn_running_stats
):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = modules[0](
in_channels, out_channels, bias=use_bias, **kwargs
)
self.bn = modules[1](
out_channels, eps=0.001, track_running_stats=track_stats
)
def forward(self, x):
x = self.conv(x)
return self.bn(x)
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if modules[0] is nn.Conv2d:
inps.append(inps[-1])
if modules[0] is nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
self.run_pass("peephole", scripted_mod.graph)
self.run_pass("constant_propagation", scripted_mod.graph)
FileCheck().check("conv").check("batch").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("fold_frozen_conv_bn", scripted_mod.graph)
FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_conv_bn", scripted_mod.graph)
if track_stats:
FileCheck().check("conv").check_not("aten::batch_norm").run(
scripted_mod.graph
)
else:
FileCheck().check("conv").check("aten::batch_norm").run(
scripted_mod.graph
)
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
def test_conv_bn_folding_not_forward(self):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, bias=True, **kwargs
)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
self.amt = 3.2
def forward(self, x):
x = self.conv(x)
return self.bn(x)
@torch.jit.export
def make_prediction(self, x):
return self.forward(x) + self.amt
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).eval()
scripted_mod = torch.jit.script(mod_eager)
torch._C._jit_pass_inline(scripted_mod.make_prediction.graph)
FileCheck().check("conv").check("aten::batch_norm").run(
scripted_mod.make_prediction.graph
)
# _jit_pass_optimize_frozen_graph should not be called on non-method attributes (e.g. "amt")
scripted_mod = torch.jit.freeze(
scripted_mod, preserved_attrs=["make_prediction", "amt"]
)
FileCheck().check("conv").check_not("aten::batch_norm").run(
scripted_mod.make_prediction.graph
)
# During freezing this creates tensors constants that are attached to the frozen graph,
# which is then kept alive by the compilation unit (which causes a leak)
@skipCUDAMemoryLeakCheckIf(True)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_conv_bn_folding_autocast_scenario_cuda(self):
# CUDA conv takes input tensors which must all be the same dtype,
# which can cause issues if folding produces inputs of different dtypes.
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, bias=False, dtype=torch.half, **kwargs
)
self.bn = torch.nn.BatchNorm2d(
out_channels, eps=0.001, dtype=torch.float
)
def forward(self, x):
return self.bn(self.conv(x))
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).cuda().eval()
scripted_mod = torch.jit.script(mod_eager)
scripted_mod = torch.jit.freeze(scripted_mod)
FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.graph)
conv_node = scripted_mod.graph.findNode("aten::conv2d", True)
self.assertTrue(conv_node is not None)
bias_input = conv_node.namedInput("bias")
self.assertTrue(bias_input is not None)
self.assertTrue(bias_input.type().dtype() == torch.half)
x = torch.rand((3, 3, 32, 32), dtype=torch.half).cuda()
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
def test_conv_add_folding(self):
@torch.no_grad()
def test_conv_fusion(
use_bias, module, tracing, op, scalar, add_tensor, expect_success
):
class ConvOp(torch.nn.Module):
__constants__ = ["use_scalar"]
def __init__(self, in_channels, out_channels, tensor=None, **kwargs):
super().__init__()
self.conv = module(
in_channels, out_channels, bias=use_bias, **kwargs
)
self.conv2 = module(
in_channels, out_channels, bias=use_bias, **kwargs
)
self.use_scalar = scalar
tensor_size = [1 for _ in range(self.conv.weight.ndim)]
tensor_size[1] = self.conv.weight.size(0)
self.tensor = (
add_tensor
if add_tensor is not None
else torch.rand(tensor_size)
)
self.op = op
def forward(self, x):
x = self.conv(x)
if self.use_scalar:
return self.op(x, 2.0)
else:
return self.op(x, self.tensor)
mod_eager = ConvOp(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if module is nn.Conv2d:
inps.append(inps[-1])
if module is nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp,))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
op_str = "aten::" + op.__name__
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
# successively no-ops with non-const inputs
self.run_pass("fold_frozen_conv_mul_or_div", scripted_mod.graph)
self.run_pass("fold_frozen_conv_add_or_sub", scripted_mod.graph)
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_conv_mul_or_div", scripted_mod.graph)
self.run_pass("fold_frozen_conv_add_or_sub", scripted_mod.graph)
if expect_success:
FileCheck().check("conv").check_not(op_str).run(scripted_mod.graph)
else:
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
conv_bias = [True, False]
modules = [nn.Conv1d, nn.Conv2d, nn.Conv3d]
use_tracing = [False, True]
use_scalar = [False, True]
ops = [torch.add, torch.sub, torch.mul, torch.div]
for use_bias, module, tracing, pytorch_op, scalar in product(
conv_bias, modules, use_tracing, ops, use_scalar
):
test_conv_fusion(
use_bias,
module,
tracing,
pytorch_op,
scalar,
add_tensor=None,
expect_success=True,
)
for use_bias, pytorch_op in product(conv_bias, ops):
# broadcasting add
test_conv_fusion(
use_bias,
nn.Conv2d,
False,
pytorch_op,
False,
add_tensor=torch.rand(32, 1, 32),
expect_success=False,
)
# broadcasting add
test_conv_fusion(
use_bias,
nn.Conv2d,
False,
pytorch_op,
False,
add_tensor=torch.rand(1, 1),
expect_success=True,
)
# add with different dtype
test_conv_fusion(
use_bias,
nn.Conv2d,
False,
pytorch_op,
False,
add_tensor=torch.tensor([2]).to(torch.int),
expect_success=True,
)
def test_conv_mul_add_bn(self):
class Conv_Mul_Add_Bn(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
self.tensor1 = torch.tensor(2.2)
self.tensor2 = torch.tensor(2)
def forward(self, x):
return self.bn(
torch.add(torch.mul(self.conv(x), self.tensor1), self.tensor2)
)
input = torch.randn(8, 3, 64, 64)
model = Conv_Mul_Add_Bn(3, 32, kernel_size=3, stride=1).eval()
with torch.no_grad():
result = model(input)
traced_model = torch.jit.trace(model, input).eval()
traced_model = torch.jit.freeze(traced_model)
tresult = traced_model(input)
self.assertEqual(result, tresult)
FileCheck().check("conv").check_not("aten::batch_norm").run(
traced_model.graph
)
FileCheck().check("conv").check_not("aten::add").run(traced_model.graph)
def test_linear_bn_folding(self):
module_pairs = [
(nn.Linear, nn.BatchNorm1d),
(nn.Linear, nn.BatchNorm2d),
(nn.Linear, nn.BatchNorm3d),
]
use_tracing = [True, False]
bn_running_stats = [True, False]
for modules, tracing, track_stats in product(
module_pairs, use_tracing, bn_running_stats
):
class LinearBN(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = modules[0](in_features, out_features)
self.bn = modules[1](
out_features, eps=0.001, track_running_stats=track_stats
)
def forward(self, x):
x = self.linear(x)
return self.bn(x)
mod_eager = LinearBN(32, 32).eval()
inps = [3, 32]
if modules[1] is nn.BatchNorm2d:
inps.append(inps[-1])
inps.append(inps[-1])
if modules[1] is nn.BatchNorm3d:
inps.append(inps[-1])
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
self.run_pass("peephole", scripted_mod.graph)
self.run_pass("constant_propagation", scripted_mod.graph)
FileCheck().check("linear").check("batch").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
FileCheck().check("linear").check("aten::batch_norm").run(
scripted_mod.graph
)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
if track_stats:
FileCheck().check("linear").check_not("aten::batch_norm").run(
scripted_mod.graph
)
else:
FileCheck().check("linear").check("aten::batch_norm").run(
scripted_mod.graph
)
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
def test_bn_not_broadcast_with_linear(self):
module_pairs = [
(nn.Linear, nn.BatchNorm1d),
(nn.Linear, nn.BatchNorm2d),
(nn.Linear, nn.BatchNorm3d),
]
use_tracing = [True, False]
linear_in = 3
# (linear_out, bn_in)
# case 1: linear_out < bn_in
# case 2: linear_out > bn_in
# case 3: linear_out != bn_in && linear_out = 1
dims = [(2, 4), (4, 2), (1, 2)]
for modules, tracing, dim in product(module_pairs, use_tracing, dims):
linear_out, bn_in = dim[0], dim[1]
linear = modules[0](linear_in, linear_out)
bn = modules[1](bn_in)
mod_eager = nn.Sequential(linear, bn).eval()
N, C = 3, bn_in
input_shape = [N, C]
if modules[1] is nn.BatchNorm1d:
H = linear_in
input_shape.append(H)
elif modules[1] is nn.BatchNorm2d:
H, W = 4, linear_in
input_shape.append(H)
input_shape.append(W)
elif modules[1] is nn.BatchNorm3d:
D, H, W = 4, 4, linear_in
input_shape.append(D)
input_shape.append(H)
input_shape.append(W)
inp = torch.rand(input_shape)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
self.run_pass("peephole", scripted_mod.graph)
self.run_pass("constant_propagation", scripted_mod.graph)
FileCheck().check("linear").check("batch").run(scripted_mod.graph)
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
FileCheck().check("linear").check("aten::batch_norm").run(
scripted_mod.graph
)
frozen_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_linear_bn", frozen_mod.graph)
# successfully skipped folding
FileCheck().check("linear").check("aten::batch_norm").run(frozen_mod.graph)
self.assertEqual(mod_eager(inp), frozen_mod(inp))
self.assertEqual(mod_eager(inp), frozen_mod(inp))
# successfully failed folding
with self.assertRaisesRegex(
AssertionError,
"To fuse, linear.out_features == bn.num_features or bn.num_features == 1",
):
nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
@skipCUDAMemoryLeakCheckIf(True)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_bn_folding_autocast_scenario_cuda(self):
module_pairs = [
(nn.Linear, nn.BatchNorm1d),
(nn.Linear, nn.BatchNorm2d),
(nn.Linear, nn.BatchNorm3d),
]
use_tracing = [True, False]
bn_running_stats = [True, False]
for modules, tracing, track_stats in product(
module_pairs, use_tracing, bn_running_stats
):
class LinearBN(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = modules[0](
in_features, out_features, bias=False, dtype=torch.half
)
self.bn = modules[1](out_features, eps=0.001, dtype=torch.float)
def forward(self, x):
x = self.linear(x)
return self.bn(x)
mod_eager = LinearBN(32, 32).cuda().eval()
inps = [3, 32]
if modules[1] is nn.BatchNorm2d:
inps.append(inps[-1])
inps.append(inps[-1])
if modules[1] is nn.BatchNorm3d:
inps.append(inps[-1])
inps.append(inps[-1])
inps.append(inps[-1])
x = torch.rand(inps, dtype=torch.half).cuda()
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (x))
else:
scripted_mod = torch.jit.script(mod_eager)
scripted_mod = torch.jit.freeze(scripted_mod)
FileCheck().check("linear").check_not("aten::batch_norm").run(
scripted_mod.graph
)
lin_node = scripted_mod.graph.findNode("aten::linear", True)
self.assertTrue(lin_node is not None)
weight_input = lin_node.namedInput("weight")
bias_input = lin_node.namedInput("bias")
self.assertTrue(bias_input is not None)
self.assertTrue(weight_input.type().dtype() == torch.half)
self.assertTrue(bias_input.type().dtype() == torch.half)
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat(self):
out_dimms = [[5, 10], [1, 5]]
for w1_dim, w2_dim in out_dimms:
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super().__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b2)
return res1, res2
mod_eager = ModMultLinear(w1_dim, w2_dim).eval()
test_val1 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 2, 1, (test_val1,))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_complex(self):
"""
Testing that the interleaving of multiple optimizations does not
cause errors, and gets optimized as expected
"""
class ModMultLinear(nn.Module):
def __init__(self) -> None:
super().__init__()
w1_dim = 5
w2_dim = 10
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res3 = torch._C._nn.linear(res1, self.w2, self.b2)
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b2)
res4 = torch._C._nn.linear(res1, self.w1, self.b1)
return res2, res3, res4
mod_eager = ModMultLinear().eval()
test_val1 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 4, 2, (test_val1,))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_different_input(self):
"""
There should be no change to the graph due to the optimization pass
due to the two input tensors being different
"""
# Freezing requires that the graph be a module
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super().__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1, in_tensor2):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res2 = torch._C._nn.linear(in_tensor2, self.w2, self.b2)
return res1, res2
mod_eager = ModMultLinear(5, 5).eval()
test_val1 = torch.rand([50, 5])
test_val2 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 2, 2, (test_val1, test_val2))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_multiple_blocks(self):
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super().__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1, in_tensor2, cond: bool):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
if cond:
res3 = torch._C._nn.linear(in_tensor2, self.w2, self.b2)
res4 = torch._C._nn.linear(in_tensor1, self.w2, self.b1)
else:
raise AssertionError
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b1)
return res1, res2, res3, res4
mod_eager = ModMultLinear(5, 5).eval()
test_val1 = torch.rand([50, 5])
test_val2 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 4, 3, (test_val1, test_val2, True))
def check_linear_optimizations(
self, eager_mod, orig_linears, new_linears, test_vals
):
for is_cuda in [False, True]:
if is_cuda:
mod_to_device = eager_mod.cuda()
test_vals_to_device = [
t.cuda() if isinstance(t, torch.Tensor) else t for t in test_vals
]
else:
mod_to_device = eager_mod
test_vals_to_device = test_vals
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
op_graph
)
# successively no-ops with non-const inputs
self.run_pass("concat_frozen_linear", op_graph)
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
op_graph
)
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass("concat_frozen_linear", op_graph)
if is_cuda:
FileCheck().check_count("aten::linear", new_linears, exactly=True).run(
op_graph
)
else:
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
op_graph
)
self.assertEqual(
mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device)
)
def test_optimize_freeze_module(self):
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=2, bias=True
)
bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
mod = torch.nn.Sequential(conv, bn)
# set optimize to False here, by default freezing runs run_frozen_optimizations
frozen_mod = torch.jit.freeze(
torch.jit.script(mod.eval()), optimize_numerics=False
)
# inspect frozen mod
FileCheck().check("batch_norm").run(frozen_mod.graph)
torch.jit.run_frozen_optimizations(frozen_mod)
FileCheck().check_not("batch_norm").run(frozen_mod.graph)
# run_frozen_optimizations should be run
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()))
FileCheck().check_not("batch_norm").run(frozen_mod.graph)
def test_freeze_remove_dropout(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.dropout = nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
mod = torch.jit.script(Net())
# inspect mod
torch._C._jit_pass_inline(mod.graph)
FileCheck().check("aten::dropout").run(mod.graph)
frozen_mod = torch.jit.freeze(mod.eval())
FileCheck().check_not("aten::dropout").run(frozen_mod.graph)
input = torch.randn(2)
output_s = mod.forward(input)
output_f = frozen_mod.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_remove_feature_dropout(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.dropout = nn.Dropout2d(0.5)
def forward(self, x):
return self.dropout(x)
mod = torch.jit.script(Net().eval())
# inspect mod
torch._C._jit_pass_inline(mod.graph)
FileCheck().check("aten::feature_dropout").run(mod.graph)
frozen_mod = torch.jit.freeze(mod)
FileCheck().check_not("aten::feature_dropout").run(frozen_mod.graph)
input = torch.randn(2, 2, 1, 1)
output_s = mod.forward(input)
output_f = frozen_mod.forward(input)
self.assertEqual(output_s, output_f)
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_freeze_mkdlnn(self):
conv = torch.nn.Conv2d(3, 32, kernel_size=3, stride=2).eval().float()
convmkl = mkldnn_utils.to_mkldnn(conv)
out = torch.jit.freeze(torch.jit.script(convmkl.eval()))
inp = torch.rand([4, 3, 4, 4]).float()
self.assertEqual(out(inp.to_mkldnn()).to_dense(), conv(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_conv_to_mkldnn(self):
with set_default_dtype(torch.float):
for module, trace in product([nn.Conv2d, nn.Conv3d], [False, True]):
mod = module(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if module is nn.Conv2d:
inps.append(inps[-1])
if module is nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if trace:
scripted_mod = torch.jit.script(mod)
else:
scripted_mod = torch.jit.trace(mod, (inp,))
self.run_pass("inline", scripted_mod.graph)
FileCheck().check("conv").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check_not("to_mkldnn").run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check(
"to_dense"
).run(scripted_mod.graph)
self.assertEqual(mod(inp), scripted_mod(inp))
self.assertEqual(mod(inp), scripted_mod(inp))
def test_linear_transpose(self):
class ModLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.bias = torch.nn.Parameter(torch.rand(30))
self.weight = torch.nn.Parameter(torch.rand([30, 20]))
def forward(self, x):
return torch._C._nn.linear(x, self.weight, self.bias)
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
self.check_linear_optimizations_2(
mod_eager, 1, 0, "transpose_frozen_linear", (test_val,)
)
def test_linear_non_constant_weight(self):
class ModLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.bias = torch.nn.Parameter(torch.rand(30))
def forward(self, x, weight):
return torch._C._nn.linear(x, weight, self.bias)
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
test_weight = torch.rand([30, 20])
self.check_linear_optimizations_2(
mod_eager, 1, 1, "transpose_frozen_linear", (test_val, test_weight)
)
def check_linear_optimizations_2(
self, eager_mod, orig_linears, new_linears, opt_pass, test_vals
):
# TODO: merge with check_linear_optimizations once both diffs land
mod_to_device = eager_mod
test_vals_to_device = test_vals
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
op_graph
)
# successively no-ops with non-const inputs
self.run_pass(opt_pass, op_graph)
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
op_graph
)
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass(opt_pass, op_graph)
FileCheck().check_count("aten::linear", new_linears, exactly=True).run(op_graph)
self.assertEqual(
mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device)
)
@staticmethod
def conv():
# Generic composable conv for testing purposes
return nn.Conv2d(8, 8, 1)
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_collapse_adjacent_conversions(self):
with set_default_dtype(torch.float):
mod = nn.Sequential(self.conv(), self.conv()).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check(
"prim::mkldnn_convolution"
).check("to_dense").run(scripted_mod.graph)
FileCheck().check_count("to_mkldnn", 1, exactly=True).run(
scripted_mod.graph
)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_mkldnn_fuser_broadcasting(self):
class Add(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
return x + self.tensor
with set_default_dtype(torch.float):
for add_inp in [8], [8, 8, 1]:
mod = nn.Sequential(self.conv(), Add(torch.rand(add_inp))).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("prim::BroadcastMKLDNNTensors").run(
scripted_mod.graph
)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
# for good measure, check that broadcasting does not work without this op
# so we can remove the op if it ever gets supported
with self.assertRaisesRegex(RuntimeError, ""):
(
torch.rand([1, 8, 8, 8]).to_mkldnn()
+ torch.rand(add_inp).to_mkldnn()
)
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_mkldnn_inplace_removal(self):
class AddMul(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
return x.add_(self.tensor).div_(self.tensor) - 4
with set_default_dtype(torch.float):
mod = nn.Sequential(self.conv(), AddMul(torch.rand([8]))).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
# add gets uninplaced and reinplaced
FileCheck().check("aten::to_mkldnn").check("aten::add_").check(
"aten::div_"
).run(scripted_mod.graph)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
@skipIfNoTorchVision
def test_maxpool_mkldnn(self):
with set_default_dtype(torch.float):
model = torchvision.models.resnet18()
sub_model = torch.nn.Sequential(
model.conv1, model.bn1, model.relu, model.maxpool
)
mod = torch.jit.freeze(torch.jit.script(sub_model.eval()))
(
N,
C,
H,
W,
) = (
10,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("max_pool").check("to_dense").run(mod.graph)
FileCheck().check_count("to_dense", 1, exactly=True).run(mod.graph)
self.assertEqual(mod(inp), sub_model(inp))
@unittest.skipIf(torch.backends.mkldnn.is_available(), "Testing no mkldnn")
def test_conv_to_mkldnn_no_mkldnn(self):
# test no error when mkldnn not available
with set_default_dtype(torch.float):
mod = torch.jit.script(nn.Conv2d(3, 32, kernel_size=3, stride=2).eval())
frozen = torch.jit.freeze(mod)
self.run_pass("convert_frozen_ops_to_mkldnn", frozen.graph)
inp = torch.rand([4, 3, 4, 4])
self.assertEqual(frozen(inp), mod(inp))
@unittest.skipIf(not (TEST_CUDNN or TEST_WITH_ROCM), "requires CUDNN")
def test_freeze_conv_relu_fusion(self):
with set_default_dtype(torch.float):
conv_bias = [True, False]
conv_ops = [nn.Conv2d, nn.Conv3d]
use_add_z = [True, False]
use_tracing = [True, False]
for use_bias, conv, add_z, tracing in product(
conv_bias, conv_ops, use_add_z, use_tracing
):
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = conv(
in_channels, out_channels, bias=use_bias, **kwargs
)
self.relu = nn.ReLU(inplace=True)
self.add_z = add_z
def forward(self, x):
z = self.conv(x)
out = self.conv(x)
if self.add_z:
out += z
out = self.relu(out)
return out
mod_eager = Net(3, 6, kernel_size=3, stride=2).eval().cuda()
inps = [5, 3, 4, 4]
if conv is nn.Conv3d:
inps.append(inps[-1])
inp = torch.rand(inps).cuda()
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
frozen_mod = torch.jit.optimize_for_inference(scripted_mod)
if TEST_WITH_ROCM:
if add_z:
FileCheck().check("aten::miopen_convolution_add_relu").run(
frozen_mod.graph
)
else:
FileCheck().check("aten::miopen_convolution_relu").run(
frozen_mod.graph
)
else:
if add_z:
FileCheck().check("aten::cudnn_convolution_add_relu").run(
frozen_mod.graph
)
else:
FileCheck().check("aten::cudnn_convolution_relu").run(
frozen_mod.graph
)
self.assertEqual(mod_eager(inp), frozen_mod(inp))
@unittest.skipIf(not (TEST_CUDNN or TEST_WITH_ROCM), "requires CUDNN")
def test_freeze_conv_relu_fusion_not_forward(self):
with set_default_dtype(torch.float):
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, bias=None, **kwargs
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
z = self.conv(x)
out = self.conv(x)
out = self.relu(out)
return out
@torch.jit.export
def make_prediction(self, x):
return self.forward(x)
mod_eager = Net(3, 6, kernel_size=3, stride=2).eval().cuda()
inps = [5, 3, 4, 4]
inp = torch.rand(inps).cuda()
scripted_mod = torch.jit.script(mod_eager)
frozen_mod = torch.jit.freeze(
scripted_mod, preserved_attrs=["make_prediction"]
)
optimized_mod = torch.jit.optimize_for_inference(
frozen_mod, other_methods=["make_prediction"]
)
if TEST_WITH_ROCM:
FileCheck().check("aten::miopen_convolution_relu").run(
optimized_mod.make_prediction.graph
)
else:
FileCheck().check("aten::cudnn_convolution_relu").run(
optimized_mod.make_prediction.graph
)
self.assertEqual(
mod_eager.make_prediction(inp), optimized_mod.make_prediction(inp)
)
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_numel_less_than_size_with_padding(self):
with set_default_dtype(torch.float):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(
1,
2,
kernel_size=(2, 4),
stride=2,
padding=2,
dilation=(2, 1),
)
def forward(self, i0):
x = self.conv1(i0)
o0 = torch.max(x, i0)
o1 = torch.clip(x, -1.5, 1.5)
return o0, o1
i0 = torch.zeros((1, 1, 1, 2), dtype=torch.float32)
mod = MyModule()
out = mod(i0)
exported = torch.jit.trace(mod, [i0])
exported = torch.jit.optimize_for_inference(exported)
eout = exported(i0)
self.assertTrue(all(torch.allclose(x, y) for x, y in zip(out, eout)))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_incompatible_perf_formats(self):
with set_default_dtype(torch.float):
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 64, 3, 2)
self.max_pool = torch.nn.MaxPool2d(111, 111)
def forward(self, x):
a = self.conv(x)
b = self.max_pool(a)
return a + b
model = Mod()
model.eval()
mod = torch.jit.freeze(torch.jit.script(model))
(
N,
C,
H,
W,
) = (
10,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
self.assertEqual(model(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_pool2d_batchnorm(self):
with set_default_dtype(torch.float):
pooling_layers = [
torch.nn.AdaptiveAvgPool2d(4),
# torch.nn.AdaptiveMaxPool2d(4), # return tuples
torch.nn.MaxPool2d(4),
torch.nn.AvgPool2d(4),
torch.nn.BatchNorm2d(64).eval(),
]
for pl in pooling_layers:
sub_model = torch.nn.Sequential(
torch.nn.Conv2d(3, 64, 2, 2),
torch.nn.ReLU(),
pl,
torch.nn.Hardswish(),
)
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
(
N,
C,
H,
W,
) = (
10,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
self.run_pass("dce", mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_pool3d_batchnorm(self):
with set_default_dtype(torch.float):
pooling_layers = [
torch.nn.MaxPool3d(4),
# torch.nn.AdaptiveAvgPool3d(4), # no ideep bindings
# torch.nn.AdaptiveMaxPool3d(4), # return tuples
torch.nn.AvgPool3d(4),
torch.nn.BatchNorm3d(64).eval(),
]
for pl in pooling_layers:
sub_model = torch.nn.Sequential(
torch.nn.Conv3d(3, 64, 2, 2),
torch.nn.ReLU(),
pl,
torch.nn.Hardswish(),
)
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
N, C, H, W, D = 10, 3, 64, 64, 64
inp = torch.randn(N, C, D, H, W)
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
self.run_pass("dce", mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
@skipIfNoTorchVision
def test_conv_hardswish(self):
with set_default_dtype(torch.float):
class Clamp(torch.nn.Module):
def __init__(self, min_val, max_val, **kwargs):
super().__init__()
self.min_val = min_val
self.max_val = max_val
def forward(self, x):
return torch.clamp(x, self.min_val, self.max_val)
(
N,
C,
H,
W,
) = (
10,
3,
224,
224,
)
activations = [
torch.nn.Hardswish(),
torch.nn.Hardsigmoid(),
torch.nn.ReLU6(),
torch.nn.Tanh(),
torch.nn.Hardtanh(0.0, 6.0),
torch.nn.Hardtanh(1.0, 100.0),
torch.nn.Hardtanh(-100.0, -1.0),
torch.nn.GELU(),
Clamp(-100.0, -1.0),
Clamp(1.0, 100.0),
Clamp(0.0, 6.0),
Clamp(-1.0, 0.0),
]
model = torchvision.models.resnet18()
for activation in activations:
sub_model = torch.nn.Sequential(model.conv1, activation)
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check_count("aten::to_dense", 1, exactly=True).run(
mod.graph
)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_hardswish_hardsigmoid(self):
with set_default_dtype(torch.float):
op_map = {
"prim::MKLDNNHardSwish": F.hardswish,
"prim::MKLDNNHardSigmoid": F.hardsigmoid,
}
input_sizes = ([0], [1], [3], [1, 3, 8, 8])
for mkldnn_opname, aten_op in op_map.items():
for size in input_sizes:
for inplace in (True, False):
inplace_str = "_" if inplace else ""
inplace_tgt = "%34" if inplace else "%35"
graph_str = f"""graph(%input.1 : Tensor):
%33 : None = prim::Constant()
%34 : Tensor = aten::to_mkldnn(%input.1, %33)
%35 : Tensor = {mkldnn_opname}{inplace_str}(%34)
return ({inplace_tgt})
"""
g = torch._C.parse_ir(graph_str)
m = self.createFunctionFromGraph(g)
x = torch.rand(size)
# `inplace=False` is intentional, otherwise we modify the input
# and we aren't testing aten impls anyways
self.assertEqual(aten_op(x, inplace=False), m(x).to_dense())
@unittest.skipIf(
not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
)
def test_scalar_mul(self):
with set_default_dtype(torch.float):
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = nn.Conv2d(8, 8, 1, padding=1)
def forward(self, x):
a1 = self.mod(x) * 4
return a1 * 4 + a1 * 5.0
mod = Mod().eval()
scripted = torch.jit.freeze(torch.jit.script(mod))
optimized = torch.jit.optimize_for_inference(scripted)
inp = torch.rand([1, 8, 8, 8])
# a1 can't be inplaced for first use, can for second
FileCheck().check("ScalarMul(").check("ScalarMul_").run(optimized.graph)
self.assertEqual(optimized(inp), mod(inp))
def test_remove_detach(self):
class Mod(nn.Module):
def forward(self, x):
y = x.detach()
return y * y
mod = Mod().eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
inp = torch.randn((2, 2))
FileCheck().check_not("aten::detach").run(frozen_mod.graph)
self.assertEqual(frozen_mod(inp), mod(inp))
def test_remove_detach_not_applied(self):
class Mod(nn.Module):
def forward(self, x):
y = x.detach()
return x is y
mod = Mod().eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
inp = torch.randn((2, 2))
FileCheck().check("aten::detach").run(frozen_mod.graph)
self.assertEqual(frozen_mod(inp), mod(inp))
@skipIfTorchDynamo("somehow causing hanging during python shutdown")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
| TestFrozenOptimizations |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 2836,
"end": 3315
} | class ____(_Union):
as_name: Annotated[str, 10]
as_int: Annotated[int, 20]
# In most cases we will use the "as_name" field to store arguments which are
# SymFloats.
# The "as_float" field is used in the case where we have a list containing a mix
# of SymFloat and float (ex. [1.0, s0, ...]). We will serialize this type of list to
# be List[SymFloatArgument] and map the SymFloats to the "as_name" field, and ints
# to the "as_float" field.
@_union_dataclass
| SymIntArgument |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataflow.py | {
"start": 54969,
"end": 64746
} | class ____(GoogleBaseAsyncHook, DataflowJobTerminalStateHelper):
"""Async hook class for dataflow service."""
sync_hook_class = DataflowHook
async def initialize_client(self, client_class):
"""
Initialize object of the given class.
Method is used to initialize asynchronous client. Because of the big amount of the classes which are
used for Dataflow service it was decided to initialize them the same way with credentials which are
received from the method of the GoogleBaseHook class.
:param client_class: Class of the Google cloud SDK
"""
credentials = (await self.get_sync_hook()).get_credentials()
return client_class(
credentials=credentials,
)
async def get_project_id(self) -> str:
project_id = (await self.get_sync_hook()).project_id
return project_id
async def get_job(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
job_view: int = JobView.JOB_VIEW_SUMMARY,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> Job:
"""
Get the job with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param job_view: Optional. JobView object which determines representation of the returned data
:param location: Optional. The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
"""
project_id = project_id or (await self.get_project_id())
client = await self.initialize_client(JobsV1Beta3AsyncClient)
request = GetJobRequest(
{
"project_id": project_id,
"job_id": job_id,
"view": job_view,
"location": location,
}
)
job = await client.get_job(
request=request,
)
return job
async def get_job_status(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
job_view: int = JobView.JOB_VIEW_SUMMARY,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> JobState:
"""
Get the job status with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param job_view: Optional. JobView object which determines representation of the returned data
:param location: Optional. The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
"""
job = await self.get_job(
project_id=project_id,
job_id=job_id,
job_view=job_view,
location=location,
)
state = job.current_state
return state
async def list_jobs(
self,
jobs_filter: int | None = None,
project_id: str | None = PROVIDE_PROJECT_ID,
location: str | None = DEFAULT_DATAFLOW_LOCATION,
page_size: int | None = None,
page_token: str | None = None,
) -> ListJobsAsyncPager:
"""
List jobs.
For detail see:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.types.ListJobsRequest
:param jobs_filter: Optional. This field filters out and returns jobs in the specified job state.
:param project_id: Optional. The Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Optional. The location of the Dataflow job (for example europe-west1).
:param page_size: Optional. If there are many jobs, limit response to at most this many.
:param page_token: Optional. Set this to the 'next_page_token' field of a previous response to request
additional results in a long list.
"""
project_id = project_id or (await self.get_project_id())
client = await self.initialize_client(JobsV1Beta3AsyncClient)
request: ListJobsRequest = ListJobsRequest(
{
"project_id": project_id,
"location": location,
"filter": jobs_filter,
"page_size": page_size,
"page_token": page_token,
}
)
page_result: ListJobsAsyncPager = await client.list_jobs(request=request)
return page_result
async def list_job_messages(
self,
job_id: str,
project_id: str | None = PROVIDE_PROJECT_ID,
minimum_importance: int = JobMessageImportance.JOB_MESSAGE_BASIC,
page_size: int | None = None,
page_token: str | None = None,
start_time: Timestamp | None = None,
end_time: Timestamp | None = None,
location: str | None = DEFAULT_DATAFLOW_LOCATION,
) -> ListJobMessagesAsyncPager:
"""
Return ListJobMessagesAsyncPager object from MessagesV1Beta3AsyncClient.
This method wraps around a similar method of MessagesV1Beta3AsyncClient. ListJobMessagesAsyncPager can be iterated
over to extract messages associated with a specific Job ID.
For more details see the MessagesV1Beta3AsyncClient method description at:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.services.messages_v1_beta3.MessagesV1Beta3AsyncClient
:param job_id: ID of the Dataflow job to get messages about.
:param project_id: Optional. The Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param minimum_importance: Optional. Filter to only get messages with importance >= level.
For more details see the description at:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.types.JobMessageImportance
:param page_size: Optional. If specified, determines the maximum number of messages to return.
If unspecified, the service may choose an appropriate default, or may return an arbitrarily large number of results.
:param page_token: Optional. If supplied, this should be the value of next_page_token returned by an earlier call.
This will cause the next page of results to be returned.
:param start_time: Optional. If specified, return only messages with timestamps >= start_time.
The default is the job creation time (i.e. beginning of messages).
:param end_time: Optional. If specified, return only messages with timestamps < end_time. The default is the current time.
:param location: Optional. The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains
the job specified by job_id.
"""
project_id = project_id or (await self.get_project_id())
client = await self.initialize_client(MessagesV1Beta3AsyncClient)
request = ListJobMessagesRequest(
{
"project_id": project_id,
"job_id": job_id,
"minimum_importance": minimum_importance,
"page_size": page_size,
"page_token": page_token,
"start_time": start_time,
"end_time": end_time,
"location": location,
}
)
page_results: ListJobMessagesAsyncPager = await client.list_job_messages(request=request)
return page_results
async def get_job_metrics(
self,
job_id: str,
project_id: str | None = PROVIDE_PROJECT_ID,
start_time: Timestamp | None = None,
location: str | None = DEFAULT_DATAFLOW_LOCATION,
) -> JobMetrics:
"""
Return JobMetrics object from MetricsV1Beta3AsyncClient.
This method wraps around a similar method of MetricsV1Beta3AsyncClient.
For more details see the MetricsV1Beta3AsyncClient method description at:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.services.metrics_v1_beta3.MetricsV1Beta3AsyncClient
:param job_id: ID of the Dataflow job to get metrics for.
:param project_id: Optional. The Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param start_time: Optional. Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
:param location: Optional. The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains
the job specified by job_id.
"""
project_id = project_id or (await self.get_project_id())
client: MetricsV1Beta3AsyncClient = await self.initialize_client(MetricsV1Beta3AsyncClient)
request = GetJobMetricsRequest(
{
"project_id": project_id,
"job_id": job_id,
"start_time": start_time,
"location": location,
}
)
job_metrics: JobMetrics = await client.get_job_metrics(request=request)
return job_metrics
| AsyncDataflowHook |
python | sphinx-doc__sphinx | sphinx/directives/admonitions.py | {
"start": 1434,
"end": 1498
} | class ____(SphinxAdmonition):
node_class = nodes.danger
| Danger |
python | astropy__astropy | astropy/units/physical.py | {
"start": 5830,
"end": 22185
} | class ____:
"""
Represents the physical type(s) that are dimensionally compatible
with a set of units.
Instances of this class should be accessed through either
`get_physical_type` or by using the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
This class is not intended to be instantiated directly in user code.
For a list of physical types, see `astropy.units.physical`.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
physical_types : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Notes
-----
A physical type will be considered equal to an equivalent
`PhysicalType` instance (recommended) or a string that contains a
name of the physical type. The latter method is not recommended
in packages, as the names of some physical types may change in the
future.
To maintain backwards compatibility, two physical type names may be
included in one string if they are separated with a slash (e.g.,
``"momentum/impulse"``). String representations of physical types
may include underscores instead of spaces.
Examples
--------
`PhysicalType` instances may be accessed via the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
>>> import astropy.units as u
>>> u.meter.physical_type
PhysicalType('length')
`PhysicalType` instances may also be accessed by calling
`get_physical_type`. This function will accept a unit, a string
containing the name of a physical type, or the number one.
>>> u.get_physical_type(u.m ** -3)
PhysicalType('number density')
>>> u.get_physical_type("volume")
PhysicalType('volume')
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
Some units are dimensionally compatible with multiple physical types.
A pascal is intended to represent pressure and stress, but the unit
decomposition is equivalent to that of energy density.
>>> pressure = u.get_physical_type("pressure")
>>> pressure
PhysicalType({'energy density', 'pressure', 'stress'})
>>> 'energy density' in pressure
True
Physical types can be tested for equality against other physical
type objects or against strings that may contain the name of a
physical type.
>>> area = (u.m ** 2).physical_type
>>> area == u.barn.physical_type
True
>>> area == "area"
True
Multiplication, division, and exponentiation are enabled so that
physical types may be used for dimensional analysis.
>>> length = u.pc.physical_type
>>> area = (u.cm ** 2).physical_type
>>> length * area
PhysicalType('volume')
>>> area / length
PhysicalType('length')
>>> length ** 3
PhysicalType('volume')
may also be performed using a string that contains the name of a
physical type.
>>> "length" * area
PhysicalType('volume')
>>> "area" / length
PhysicalType('length')
Unknown physical types are labelled as ``"unknown"``.
>>> (u.s ** 13).physical_type
PhysicalType('unknown')
Dimensional analysis may be performed for unknown physical types too.
>>> length_to_19th_power = (u.m ** 19).physical_type
>>> length_to_20th_power = (u.m ** 20).physical_type
>>> length_to_20th_power / length_to_19th_power
PhysicalType('length')
"""
def __init__(self, unit: core.UnitBase, physical_types: str | set[str]) -> None:
self._unit = _replace_temperatures_with_kelvin(unit)
self._physical_type = sorted(_standardize_physical_type_names(physical_types))
def __iter__(self) -> Iterator[str]:
yield from self._physical_type
def __eq__(self, other: object) -> bool:
"""
Return `True` if ``other`` represents a physical type that is
consistent with the physical type of the `PhysicalType` instance.
"""
if self is other:
return True
if isinstance(other, PhysicalType):
return self._unit._physical_type_id == other._unit._physical_type_id
elif isinstance(other, str):
other = _standardize_physical_type_names(other)
return other.issubset(self._physical_type)
else:
return NotImplemented
def __repr__(self) -> str:
if len(self._physical_type) == 1:
names = "'" + self._physical_type[0] + "'"
else:
names = "{" + str(self._physical_type)[1:-1] + "}"
return f"PhysicalType({names})"
def __str__(self) -> str:
return "/".join(self._physical_type)
@staticmethod
def _dimensionally_compatible_unit(obj: object) -> core.UnitBase | None:
"""
Return a unit that corresponds to the provided argument.
"""
if isinstance(obj, core.UnitBase):
return _replace_temperatures_with_kelvin(obj)
elif isinstance(obj, PhysicalType):
return obj._unit
elif isinstance(obj, numbers.Real) and obj == 1:
return core.dimensionless_unscaled
elif isinstance(obj, str):
return _physical_type_from_str(obj)._unit
return None
def __mul__(
self, other: Union["PhysicalType", core.UnitBase, numbers.Real, str]
) -> "PhysicalType":
if other_unit := self._dimensionally_compatible_unit(other):
return (self._unit * other_unit).physical_type
return NotImplemented
def __rmul__(
self, other: Union["PhysicalType", core.UnitBase, str]
) -> "PhysicalType":
return self.__mul__(other)
def __truediv__(
self, other: Union["PhysicalType", core.UnitBase, numbers.Real, str]
) -> "PhysicalType":
if other_unit := self._dimensionally_compatible_unit(other):
return (self._unit / other_unit).physical_type
return NotImplemented
def __rtruediv__(
self, other: Union["PhysicalType", core.UnitBase, numbers.Real, str]
) -> "PhysicalType":
if other_unit := self._dimensionally_compatible_unit(other):
return (other_unit / self._unit).physical_type
return NotImplemented
def __pow__(self, power: UnitPowerLike) -> "PhysicalType":
return (self._unit**power).physical_type
def __hash__(self) -> int:
return hash(self._unit._physical_type_id)
def __len__(self) -> int:
return len(self._physical_type)
# We need to prevent operations like where a Unit instance left
# multiplies a PhysicalType instance from returning a `Quantity`
# instance with a PhysicalType as the value. We can do this by
# preventing np.array from casting a PhysicalType instance as
# an object array.
__array__: Final = None
_physical_unit_mapping: Final[dict[PhysicalTypeID, PhysicalType]] = {}
_unit_physical_mapping: Final[dict[str, PhysicalTypeID]] = {}
_name_physical_mapping: Final[dict[str, PhysicalType]] = {}
# mapping from attribute-accessible name (no spaces, etc.) to the actual name.
_attrname_physical_mapping: Final[dict[str, PhysicalType]] = {}
def _physical_type_from_str(name: str) -> PhysicalType:
"""
Return the `PhysicalType` instance associated with the name of a
physical type.
"""
if name == "unknown":
raise ValueError("cannot uniquely identify an 'unknown' physical type.")
elif name in _attrname_physical_mapping:
return _attrname_physical_mapping[name] # convert attribute-accessible
elif name in _name_physical_mapping:
return _name_physical_mapping[name]
else:
raise ValueError(f"{name!r} is not a known physical type.")
def _replace_temperatures_with_kelvin(unit: core.UnitBase) -> core.UnitBase:
"""Replace °F, and °C in the bases of `unit` with K.
The Kelvin, Celsius and Fahrenheit scales have different zero points,
which is a problem for the unit conversion machinery (without the
`temperature` equivalency). Replacing °F, and °C with kelvin allows the
physical type to be treated consistently. The Rankine scale has the
same zero point as the Kelvin scale, so degrees Rankine do not have to
be special-cased.
"""
physical_type_id = unit._physical_type_id
physical_type_id_components = []
substitution_was_made = False
for base, power in physical_type_id:
if base in ["deg_F", "deg_C"]:
base = "K"
substitution_was_made = True
physical_type_id_components.append((base, power))
if substitution_was_made:
return core.Unit._from_physical_type_id(tuple(physical_type_id_components))
else:
return unit
def _standardize_physical_type_names(physical_type_input: str | set[str]) -> set[str]:
"""
Convert a string or `set` of strings into a `set` containing
string representations of physical types.
The strings provided in ``physical_type_input`` can each contain
multiple physical types that are separated by a regular slash.
Underscores are treated as spaces so that variable names could
be identical to physical type names.
"""
if isinstance(physical_type_input, str):
physical_type_input = {physical_type_input}
standardized_physical_types = set()
for ptype_input in physical_type_input:
if not isinstance(ptype_input, str):
raise ValueError(f"expecting a string, but got {ptype_input}")
input_set = set(ptype_input.split("/"))
processed_set = {s.strip().replace("_", " ") for s in input_set}
standardized_physical_types |= processed_set
return standardized_physical_types
def def_physical_type(unit: core.UnitBase, name: str | set[str]) -> None:
"""
Add a mapping between a unit and the corresponding physical type(s).
If a physical type already exists for a unit, add new physical type
names so long as those names are not already in use for other
physical types.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
name : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Raises
------
ValueError
If a physical type name is already in use for another unit, or
if attempting to name a unit as ``"unknown"``.
Notes
-----
For a list of physical types, see `astropy.units.physical`.
"""
physical_type_id = unit._physical_type_id
physical_type_names = _standardize_physical_type_names(name)
if "unknown" in physical_type_names:
raise ValueError("cannot uniquely define an unknown physical type")
names_for_other_units = set(_unit_physical_mapping.keys()).difference(
_physical_unit_mapping.get(physical_type_id, {})
)
names_already_in_use = physical_type_names & names_for_other_units
if names_already_in_use:
raise ValueError(
"the following physical type names are already in use: "
f"{names_already_in_use}."
)
unit_already_in_use = physical_type_id in _physical_unit_mapping
if unit_already_in_use:
physical_type = _physical_unit_mapping[physical_type_id]
physical_type._physical_type = sorted(physical_type_names | set(physical_type))
else:
physical_type = PhysicalType(unit, physical_type_names)
_physical_unit_mapping[physical_type_id] = physical_type
for ptype in physical_type:
_unit_physical_mapping[ptype] = physical_type_id
_name_physical_mapping[ptype] = physical_type
# attribute-accessible name
attr_name = ptype.replace(" ", "_").replace("(", "").replace(")", "")
_attrname_physical_mapping[attr_name] = physical_type
def get_physical_type(
obj: PhysicalType | str | core.UnitBase | QuantityLike,
) -> PhysicalType:
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Notes
-----
For a list of physical types, see `astropy.units.physical`.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
if isinstance(obj, core.UnitBase):
unit = obj
else:
try:
unit = quantity.Quantity(obj, copy=COPY_IF_NEEDED).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._physical_type_id
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown")
# ------------------------------------------------------------------------------
# Script section creating the physical types and the documentation
# define the physical types
for unit, physical_type in _units_and_physical_types:
def_physical_type(unit, physical_type)
del unit, physical_type
# For getting the physical types.
def __getattr__(name):
"""Checks for physical types using lazy import.
This also allows user-defined physical types to be accessible from the
:mod:`astropy.units.physical` module.
See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_
Parameters
----------
name : str
The name of the attribute in this module. If it is already defined,
then this function is not called.
Returns
-------
ptype : `~astropy.units.physical.PhysicalType`
Raises
------
AttributeError
If the ``name`` does not correspond to a physical type
"""
if name in _attrname_physical_mapping:
return _attrname_physical_mapping[name]
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
def __dir__() -> list[str]:
"""Return contents directory (__all__ + all physical type names)."""
return list(set(__all__) | set(_attrname_physical_mapping.keys()))
# This generates a docstring addition for this module that describes all of the
# standard physical types defined here.
if __doc__ is not None:
doclines = [
".. list-table:: Defined Physical Types",
" :header-rows: 1",
" :widths: 30 10 50",
"",
" * - Physical type",
" - Unit",
" - Other physical type(s) with same unit",
]
for name in sorted(_name_physical_mapping.keys()):
ptype = _name_physical_mapping[name]
doclines += [
f" * - _`{name}`",
f" - :math:`{ptype._unit.to_string('latex')[1:-1]}`",
f" - {', '.join([n for n in ptype if n != name])}",
]
__doc__ += "\n\n" + "\n".join(doclines)
| PhysicalType |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 19837,
"end": 19941
} | class ____(ApeException):
"""
Raised when issues occur in a query engine.
"""
| QueryEngineError |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_html.py | {
"start": 26547,
"end": 38439
} | class ____:
def test_html_repr_min_rows_default(self, datapath):
# gh-27991
# default setting no truncation even if above min_rows
df = DataFrame({"a": range(20)})
result = df._repr_html_()
expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
assert result == expected
# default of max_rows 60 triggers truncation if above
df = DataFrame({"a": range(61)})
result = df._repr_html_()
expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
assert result == expected
@pytest.mark.parametrize(
"max_rows,min_rows,expected",
[
# truncated after first two rows
(10, 4, "html_repr_max_rows_10_min_rows_4"),
# when set to None, follow value of max_rows
(12, None, "html_repr_max_rows_12_min_rows_None"),
# when set value higher as max_rows, use the minimum
(10, 12, "html_repr_max_rows_10_min_rows_12"),
# max_rows of None -> never truncate
(None, 12, "html_repr_max_rows_None_min_rows_12"),
],
)
def test_html_repr_min_rows(self, datapath, max_rows, min_rows, expected):
# gh-27991
df = DataFrame({"a": range(61)})
expected = expected_html(datapath, expected)
with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
result = df._repr_html_()
assert result == expected
def test_repr_html_ipython_config(self, ip):
code = textwrap.dedent(
"""\
from pandas import DataFrame
df = DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
"""
)
result = ip.run_cell(code, silent=True)
assert not result.error_in_exec
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
with option_context("display.max_rows", 5, "display.max_columns", 2):
repstr = df._repr_html_()
assert "class" in repstr # info fallback
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
with option_context("display.max_rows", 1, "display.max_columns", 1):
df._repr_html_()
with option_context("display.notebook_repr_html", False):
df._repr_html_()
df = DataFrame([[1, 2], [3, 4]])
with option_context("display.show_dimensions", True):
assert "2 rows" in df._repr_html_()
with option_context("display.show_dimensions", False):
assert "2 rows" not in df._repr_html_()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
assert "mathjax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
assert "mathjax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame([["a" * 25] * (max_cols + 1)] * 10)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame([["a" * 25] * len(mcols)] * 10, columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame([["a" * 25] * len(mcols)] * 10, columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.default_rng(2).standard_normal((max_L1 * 2, 2)),
index=idx,
columns=["A", "B"],
)
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.default_rng(2).standard_normal(((max_L1 + 1) * 2, 2)),
index=idx,
columns=["A", "B"],
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
ymd.columns.name = "foo"
ymd.to_html()
ymd.T.to_html()
@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
def test_to_html_na_rep_and_float_format(na_rep, datapath):
# https://github.com/pandas-dev/pandas/issues/13828
df = DataFrame(
[
["A", 1.2225],
["A", None],
],
columns=["Group", "Data"],
)
result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
expected = expected_html(datapath, "gh13828_expected_output")
expected = expected.format(na_rep=na_rep)
assert result == expected
def test_to_html_na_rep_non_scalar_data(datapath):
# GH47103
df = DataFrame([{"a": 1, "b": [1, 2, 3]}])
result = df.to_html(na_rep="-")
expected = expected_html(datapath, "gh47103_expected_output")
assert result == expected
def test_to_html_float_format_object_col(datapath):
# GH#40024
df = DataFrame(data={"x": [1000.0, "test"]})
result = df.to_html(float_format=lambda x: f"{x:,.0f}")
expected = expected_html(datapath, "gh40024_expected_output")
assert result == expected
def test_to_html_multiindex_col_with_colspace():
# GH#53885
df = DataFrame([[1, 2]])
df.columns = MultiIndex.from_tuples([(1, 1), (2, 1)])
result = df.to_html(col_space=100)
expected = (
'<table border="1" class="dataframe">\n'
" <thead>\n"
" <tr>\n"
' <th style="min-width: 100px;"></th>\n'
' <th style="min-width: 100px;">1</th>\n'
' <th style="min-width: 100px;">2</th>\n'
" </tr>\n"
" <tr>\n"
' <th style="min-width: 100px;"></th>\n'
' <th style="min-width: 100px;">1</th>\n'
' <th style="min-width: 100px;">1</th>\n'
" </tr>\n"
" </thead>\n"
" <tbody>\n"
" <tr>\n"
" <th>0</th>\n"
" <td>1</td>\n"
" <td>2</td>\n"
" </tr>\n"
" </tbody>\n"
"</table>"
)
assert result == expected
def test_to_html_tuple_col_with_colspace():
# GH#53885
df = DataFrame({("a", "b"): [1], "b": [2]})
result = df.to_html(col_space=100)
expected = (
'<table border="1" class="dataframe">\n'
" <thead>\n"
' <tr style="text-align: right;">\n'
' <th style="min-width: 100px;"></th>\n'
' <th style="min-width: 100px;">(a, b)</th>\n'
' <th style="min-width: 100px;">b</th>\n'
" </tr>\n"
" </thead>\n"
" <tbody>\n"
" <tr>\n"
" <th>0</th>\n"
" <td>1</td>\n"
" <td>2</td>\n"
" </tr>\n"
" </tbody>\n"
"</table>"
)
assert result == expected
def test_to_html_empty_complex_array():
# GH#54167
df = DataFrame({"x": np.array([], dtype="complex")})
result = df.to_html(col_space=100)
expected = (
'<table border="1" class="dataframe">\n'
" <thead>\n"
' <tr style="text-align: right;">\n'
' <th style="min-width: 100px;"></th>\n'
' <th style="min-width: 100px;">x</th>\n'
" </tr>\n"
" </thead>\n"
" <tbody>\n"
" </tbody>\n"
"</table>"
)
assert result == expected
| TestReprHTML |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_bedrock.py | {
"start": 7113,
"end": 8884
} | class ____:
MODEL_ARN = "testProvisionedModelArn"
@pytest.fixture
def mock_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(BedrockHook, "conn") as _conn:
_conn.create_provisioned_model_throughput.return_value = {"provisionedModelArn": self.MODEL_ARN}
yield _conn
@pytest.fixture
def bedrock_hook(self) -> Generator[BedrockHook, None, None]:
with mock_aws():
hook = BedrockHook(aws_conn_id="aws_default")
yield hook
def setup_method(self):
self.operator = BedrockCreateProvisionedModelThroughputOperator(
task_id="provision_throughput",
model_units=1,
provisioned_model_name="testProvisionedModelName",
model_id="test_model_arn",
)
self.operator.defer = mock.MagicMock()
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(BedrockHook, "get_waiter")
def test_provisioned_model_wait_combinations(
self, _, wait_for_completion, deferrable, mock_conn, bedrock_hook
):
self.operator.wait_for_completion = wait_for_completion
self.operator.deferrable = deferrable
response = self.operator.execute({})
assert response == self.MODEL_ARN
assert bedrock_hook.get_waiter.call_count == wait_for_completion
assert self.operator.defer.call_count == deferrable
def test_template_fields(self):
validate_template_fields(self.operator)
| TestBedrockCreateProvisionedModelThroughputOperator |
python | allegroai__clearml | clearml/backend_api/services/v2_20/auth.py | {
"start": 372,
"end": 2664
} | class ____(NonStrictDataModel):
"""
:param access_key: Credentials access key
:type access_key: str
:param secret_key: Credentials secret key
:type secret_key: str
:param label: Optional credentials label
:type label: str
"""
_schema = {
"properties": {
"access_key": {
"description": "Credentials access key",
"type": ["string", "null"],
},
"label": {
"description": "Optional credentials label",
"type": ["string", "null"],
},
"secret_key": {
"description": "Credentials secret key",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
access_key: Optional[str] = None,
secret_key: Optional[str] = None,
label: Optional[str] = None,
**kwargs: Any
) -> None:
super(Credentials, self).__init__(**kwargs)
self.access_key = access_key
self.secret_key = secret_key
self.label = label
@schema_property("access_key")
def access_key(self) -> Optional[str]:
return self._property_access_key
@access_key.setter
def access_key(self, value: Optional[str]) -> None:
if value is None:
self._property_access_key = None
return
self.assert_isinstance(value, "access_key", six.string_types)
self._property_access_key = value
@schema_property("secret_key")
def secret_key(self) -> Optional[str]:
return self._property_secret_key
@secret_key.setter
def secret_key(self, value: Optional[str]) -> None:
if value is None:
self._property_secret_key = None
return
self.assert_isinstance(value, "secret_key", six.string_types)
self._property_secret_key = value
@schema_property("label")
def label(self) -> Optional[str]:
return self._property_label
@label.setter
def label(self, value: Optional[str]) -> None:
if value is None:
self._property_label = None
return
self.assert_isinstance(value, "label", six.string_types)
self._property_label = value
| Credentials |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.