language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pyca__cryptography | src/cryptography/x509/base.py | {
"start": 3366,
"end": 3908
} | class ____:
def __init__(
self,
attributes: Iterable[Attribute],
) -> None:
self._attributes = list(attributes)
__len__, __iter__, __getitem__ = _make_sequence_methods("_attributes")
def __repr__(self) -> str:
return f"<Attributes({self._attributes})>"
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> Attribute:
for attr in self:
if attr.oid == oid:
return attr
raise AttributeNotFound(f"No {oid} attribute was found", oid)
| Attributes |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py | {
"start": 16675,
"end": 16768
} | class ____:
"""Check results."""
comments: list[str]
labels: dict[str, bool]
| Results |
python | PrefectHQ__prefect | tests/server/models/deprecated/test_work_queues.py | {
"start": 2176,
"end": 17571
} | class ____:
@pytest.fixture
async def tb12_work_queue(self, session):
work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(
name="TB12",
description="The GOAT",
filter=schemas.core.QueueFilter(tags=["tb12"]),
),
)
await session.commit()
return work_queue
@pytest.fixture
async def flow_run_1_id(self):
return uuid4()
@pytest.fixture
async def flow_run_2_id(self):
return uuid4()
@pytest.fixture
async def flow_run_3_id(self):
return uuid4()
@pytest.fixture
async def flow_run_4_id(self):
return uuid4()
@pytest.fixture
async def flow_run_5_id(self):
return uuid4()
@pytest.fixture
async def flow_run_6_id(self):
return uuid4()
@pytest.fixture
async def flow_run_7_id(self):
return uuid4()
@pytest.fixture(autouse=True)
async def flow_runs(
self,
session,
deployment,
flow_run_1_id,
flow_run_2_id,
flow_run_3_id,
flow_run_4_id,
flow_run_5_id,
flow_run_6_id,
flow_run_7_id,
):
# flow run 1 is in a SCHEDULED state 5 seconds ago
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_1_id,
flow_id=deployment.flow_id,
deployment_id=deployment.id,
flow_version="0.1",
),
)
current_time = now("UTC")
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_1.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(seconds=1)
),
),
)
# flow run 2 is in a SCHEDULED state 1 minute ago with tags ["tb12", "goat"]
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_2_id,
flow_id=deployment.flow_id,
deployment_id=deployment.id,
flow_version="0.1",
tags=["tb12", "goat"],
next_scheduled_start_time=current_time - datetime.timedelta(minutes=1),
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_2.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(minutes=1),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(minutes=1)
),
),
)
# flow run 3 is in a PENDING state with tags ["tb12", "goat"]
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_3_id,
flow_id=deployment.flow_id,
deployment_id=deployment.id,
flow_version="0.1",
tags=["tb12", "goat"],
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_3.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(seconds=1)
),
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_3.id,
state=schemas.states.Pending(),
)
# flow run 4 is in a RUNNING state with no tags
flow_run_4 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_4_id,
flow_id=deployment.flow_id,
deployment_id=deployment.id,
flow_version="0.1",
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_4.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(seconds=1)
),
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_4.id,
state=schemas.states.Pending(),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_4.id,
state=schemas.states.Running(),
)
# flow run 5 is in a SCHEDULED state 1 year in the future
flow_run_5 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_5_id,
flow_id=deployment.flow_id,
deployment_id=deployment.id,
flow_version="0.1",
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_5.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time + datetime.timedelta(days=365)
),
),
)
# flow run 6 is in a SCHEDULED state 5 seconds ago but has no
# deployment_id, it should never be returned by the queue
flow_run_6 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_6_id,
flow_id=deployment.flow_id,
flow_version="0.1",
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_6.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(seconds=1)
),
),
)
# flow run 7 is in a RUNNING state but has no
# deployment_id, it should never be returned by the queue
# or count against concurrency limits
flow_run_7 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
id=flow_run_7_id,
flow_id=deployment.flow_id,
flow_version="0.1",
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_7.id,
state=schemas.states.State(
type=schemas.states.StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(seconds=5),
state_details=dict(
scheduled_time=current_time - datetime.timedelta(seconds=1)
),
),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_7.id,
state=schemas.states.Pending(),
)
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run_7.id,
state=schemas.states.Running(),
)
await session.commit()
async def test_get_runs_in_work_queue_returns_scheduled_runs(
self,
session,
work_queue,
flow_run_1_id,
flow_run_2_id,
):
# should only return SCHEDULED runs before NOW with
# a deployment_id
current_time = now("UTC")
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=current_time,
)
assert {run.id for run in runs} == {flow_run_1_id, flow_run_2_id}
# should respect limit param
_, limited_runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=current_time,
limit=1,
)
# flow run 2 is scheduled to start before flow run 1
assert {run.id for run in limited_runs} == {flow_run_2_id}
# should respect scheduled before param
_, runs_from_babylon = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=current_time - datetime.timedelta(days=365 * 2000),
limit=1,
)
assert runs_from_babylon == []
async def test_get_runs_in_work_queue_filters_on_tags(
self,
session,
tb12_work_queue,
flow_run_2_id,
):
# should only return SCHEDULED runs before NOW with
# a deployment_id and tags ["tb12"]
current_time = now("UTC")
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=tb12_work_queue.id,
scheduled_before=current_time,
)
assert {run.id for run in runs} == {flow_run_2_id}
async def test_get_runs_in_work_queue_filters_on_deployment_ids(
self,
session,
deployment,
flow_run_1_id,
flow_run_2_id,
):
# should only return SCHEDULED runs before NOW with
# the correct deployment_id
current_time = now("UTC")
deployment_work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(
name=f"Work Queue for Deployment {deployment.name}",
filter=schemas.core.QueueFilter(
deployment_ids=[deployment.id, uuid4()]
),
),
)
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=deployment_work_queue.id,
scheduled_before=current_time,
)
assert {run.id for run in runs} == {flow_run_1_id, flow_run_2_id}
bad_deployment_work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(
name="Work Queue for Deployment that doesn't exist",
filter=schemas.core.QueueFilter(deployment_ids=[uuid4()]),
),
)
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=bad_deployment_work_queue.id,
scheduled_before=current_time,
)
assert runs == []
async def test_get_runs_in_work_queue_uses_union_of_filter_criteria(self, session):
# tags "tb12" will match but the deployment ids should not match any flow runs
current_time = now("UTC")
conflicting_filter_work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(
name="Work Queue for Deployment that doesn't exist",
filter=schemas.core.QueueFilter(
deployment_ids=[uuid4()], tags=["tb12"]
),
),
)
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=conflicting_filter_work_queue.id,
scheduled_before=current_time,
)
assert runs == []
async def test_get_runs_in_work_queue_respects_concurrency_limit(
self,
session,
work_queue,
flow_run_1_id,
flow_run_2_id,
):
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
)
assert {run.id for run in runs} == {flow_run_1_id, flow_run_2_id}
# add a concurrency limit
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(concurrency_limit=2),
)
# since there is one PENDING and one RUNNING flow run, no runs
# should be returned
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
)
assert runs == []
# since there is one PENDING and one RUNNING flow run, no runs
# should be returned, even if a larger limit has been provided
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
limit=9001,
)
assert runs == []
# increase the concurrency limit
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(concurrency_limit=3),
)
# since there is one PENDING and one RUNNING flow run, one
# flow run should be returned
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
)
assert {run.id for run in runs} == {flow_run_2_id}
async def test_get_runs_in_work_queue_respects_concurrency_limit_of_0(
self,
session,
work_queue,
):
# set concurrency limit to 0
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(concurrency_limit=0),
)
_, runs = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
)
assert runs == []
async def test_get_runs_in_work_queue_raises_object_not_found_error(self, session):
with pytest.raises(ObjectNotFoundError):
await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=uuid4(),
scheduled_before=now("UTC"),
)
| TestGetRunsInWorkQueue |
python | keras-team__keras | keras/src/optimizers/base_optimizer.py | {
"start": 373,
"end": 49975
} | class ____(KerasSaveable):
"""Abstract optimizer base class.
If you intend to create your own optimization algorithm, please inherit from
this class and override the following methods:
- `build`: Create your optimizer-related variables, such as momentum
variables in the SGD optimizer.
- `update_step`: Implement your optimizer's variable updating logic.
- `get_config`: serialization of the optimizer.
Example:
```python
class SGD(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.momentum = 0.9
def build(self, variables):
super().build(variables)
self.momentums = []
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = self.momentums[self._get_variable_index(variable)]
self.assign(
m,
ops.subtract(
ops.multiply(m, ops.cast(self.momentum, variable.dtype)),
ops.multiply(gradient, learning_rate),
),
)
self.assign_add(variable, m)
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
```
"""
def __init__(
self,
learning_rate,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name=None,
**kwargs,
):
self._lock = False
if kwargs.pop("decay", None) is not None:
warnings.warn(
"Argument `decay` is no longer supported and will be ignored."
)
if kwargs:
raise ValueError(f"Argument(s) not recognized: {kwargs}")
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
self.weight_decay = weight_decay
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
self.use_ema = use_ema
self.loss_scale_factor = loss_scale_factor
self.gradient_accumulation_steps = gradient_accumulation_steps
if gradient_accumulation_steps:
if not gradient_accumulation_steps >= 2:
raise ValueError(
"`gradient_accumulation_steps` must be an integer >= 2. "
"Received: gradient_accumulation_steps="
f"{gradient_accumulation_steps}"
)
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError(
"`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}"
)
if ema_overwrite_frequency and (
not isinstance(ema_overwrite_frequency, int)
or ema_overwrite_frequency < 1
):
raise ValueError(
"`ema_overwrite_frequency` must be an integer >= 1 or "
"None. Received: ema_overwrite_frequency="
f"{ema_overwrite_frequency}"
)
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
clip_args_sum = sum(
a is not None for a in [clipnorm, clipvalue, global_clipnorm]
)
if clip_args_sum > 1:
raise ValueError(
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can "
f"be set. Received: clipnorm={clipnorm}, "
f"clipvalue={clipvalue}, global_clipnorm={global_clipnorm}"
)
self.built = False
# Set up variable tracking.
self._variables = []
self._trainable_variables = []
self._tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
}
)
self._trainable_variables_indices = {}
# Create iteration variable
# Note: dtype="int" will resolve to int32 in JAX
# (since int64 is disallowed in JAX) and to int64 in TF.
with backend.name_scope(self.name, caller=self):
iterations = backend.Variable(
0,
name="iteration",
dtype="int",
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(iterations)
self._iterations = iterations
# Create learning rate (schedule or variable)
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if not isinstance(learning_rate, float):
raise ValueError(
"Argument `learning_rate` should be float, or an instance "
"of LearningRateSchedule, or a callable "
"(that takes in the current iteration value "
"and returns the corresponding learning rate value). "
f"Received instead: learning_rate={learning_rate}"
)
with backend.name_scope(self.name, caller=self):
learning_rate = backend.Variable(
learning_rate,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
aggregation="only_first_replica",
)
self._track_variable(learning_rate)
self._learning_rate = learning_rate
@property
def iterations(self):
if self.gradient_accumulation_steps:
return ops.floor_divide(
self._iterations, self.gradient_accumulation_steps
)
return self._iterations
def _track_variable(self, variable):
self._tracker.add_to_store("variables", variable)
def _overwrite_variable_with_gradient(self, variable):
return getattr(variable, "overwrite_with_gradient", False)
@tracking.no_automatic_dependency_tracking
def build(self, variables):
if self.use_ema:
self._model_variables_moving_average = self.add_optimizer_variables(
variables, "average"
)
if self.gradient_accumulation_steps:
self._accumulated_gradients = []
for i, variable in enumerate(variables):
self._trainable_variables_indices[self._var_key(variable)] = i
if self.gradient_accumulation_steps:
self._accumulated_gradients.append(
self.add_variable_from_reference(
variable,
name="gradient_accumulator",
)
)
self._trainable_variables = variables[:]
self.built = True
def _var_key(self, variable):
# Helper function to get a stable ID and the variable instance mapping.
return id(variable)
@property
def variables(self):
return self._variables[:]
def _get_variable_index(self, variable):
return self._trainable_variables_indices[self._var_key(variable)]
def add_variable(
self,
shape,
initializer="zeros",
dtype=None,
aggregation="none",
layout=None,
name=None,
):
"""Add a variable to the optimizer.
Args:
shape: Shape tuple for the variable. Must be fully-defined
(no `None` entries).
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). Defaults to `"zeros"`.
dtype: Dtype of the variable to create, e.g. `"float32"`. If
unspecified, defaults to the `keras.backend.floatx()`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"`. Annotates the variable with
the type of multi-replica aggregation to be used for this
variable when writing custom data parallel training loops.
Defaults to `"none"`.
layout: Optional tensor layout. Defaults to `None`.
name: String name of the variable. Useful for debugging purposes.
Returns:
An optimizer variable, in the format of `keras.Variable`.
"""
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
aggregation=aggregation,
layout=layout,
name=name,
)
self._track_variable(variable)
return variable
def add_variable_from_reference(
self, reference_variable, name=None, initializer="zeros"
):
"""Add an optimizer variable from the model variable.
Create an optimizer variable based on the information of model variable.
For example, in SGD optimizer momemtum, for each model variable, a
corresponding momemtum variable is created of the same shape and dtype.
Args:
reference_variable: `keras.Variable`. The corresponding model
variable to the optimizer variable to be created.
name: Optional string. The name prefix of the optimizer variable to
be created. If not provided, it will be set to `"var"`. The
variable name will follow the pattern
`{variable_name}_{reference_variable.name}`,
e.g., `momemtum/dense_1`. Defaults to `None`.
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"zeros"`.
Returns:
An optimizer variable, in the format of `keras.Variable`.
"""
name = name or "var"
if hasattr(reference_variable, "path"):
name = f"{reference_variable.path.replace('/', '_')}_{name}"
else:
sanitised_ref_name = (
str(reference_variable.name).replace("/", "_").replace(":", "_")
)
name = f"{sanitised_ref_name}_{name}"
return self.add_variable(
shape=reference_variable.shape,
initializer=initializer,
dtype=reference_variable.dtype,
name=name,
layout=getattr(reference_variable, "_layout", None),
)
def add_optimizer_variables(
self, trainable_variables, name, initializer="zeros"
):
"""Add optimizer variables from the list of trainable model variables.
Create an optimizer variable based on the information of the supplied
model variables. For example, in SGD optimizer momemtum, for each model
variable, a corresponding momemtum variable is created of the same shape
and dtype.
Note that trainable variables with `v.overwrite_with_gradient == True`
will insert `None`, into the output list, since the optimizer variable
will not be used anyways, and could be wasteful.
Args:
trainable_variables: `keras.Variable`, the corresponding model
variable to the optimizer variable to be created.
name: The name prefix(es) of the optimizer variable(s) to be
created. Can be a single string or list of strings. If a
list of strings, will create an optimizer variable for each
prefix. The variable name will follow the pattern
`{variable_name}_{trainable_variable.name}`, e.g.,
`momemtum/dense_1`.
initializer: Initializer object(s) to use to populate the initial
variable value(s), or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"zeros"`.
Returns:
A list of optimizer variables, in the format of `keras.Variable`s.
If multiple names are provide, returns a tuple of lists.
"""
name_list = name
initializer_list = initializer
if isinstance(name, str):
# Single name/initializer.
name_list = [name]
initializer_list = [initializer]
else:
# Multiple names/initializers.
# If there is only one initializer, use it for all names.
if isinstance(initializer, str) or isinstance(
initializer, initializers.Initializer
):
initializer_list = [initializer] * len(name_list)
if len(name_list) != len(initializer_list):
raise ValueError(
f"The number of provided names must match the number of "
f"provided initializers. Received name='{name}', "
f"initializer='{initializer}'"
)
# Build up lists of optimizer variables.
optimizer_variables = tuple([] for _ in name_list)
for variable in trainable_variables:
# Interleaves adding variables for backward-compatibility.
if not self._overwrite_variable_with_gradient(variable):
for i, (var_name, var_init) in enumerate(
zip(name_list, initializer_list)
):
optimizer_variables[i].append(
self.add_variable_from_reference(
variable,
name=var_name,
initializer=var_init,
)
)
else:
for i in range(len(name_list)):
optimizer_variables[i].append(None)
# If single input name, return the single list.
if isinstance(name, str):
return optimizer_variables[0]
return optimizer_variables
def _check_variables_are_known(self, variables):
for v in variables:
if self._var_key(v) not in self._trainable_variables_indices:
raise ValueError(
f"Unknown variable: {v}. This optimizer can only "
"be called for the variables it was originally built with. "
"When working with a new set of variables, you should "
"recreate a new optimizer instance."
)
def assign(self, variable, value):
"""Assign a value to a variable.
This should be used in optimizers instead of `variable.assign(value)` to
support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign(value)
def assign_add(self, variable, value):
"""Add a value to a variable.
This should be used in optimizers instead of
`variable.assign_add(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_add(value)
def assign_sub(self, variable, value):
"""Subtract a value from a variable.
This should be used in optimizers instead of
`variable.assign_sub(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_sub(value)
def update_step(self, gradient, variable, learning_rate):
raise NotImplementedError
def apply_gradients(self, grads_and_vars):
grads, trainable_variables = zip(*grads_and_vars)
self.apply(grads, trainable_variables)
# Return iterations for compat with tf.keras.
return self._iterations
def apply(self, grads, trainable_variables=None):
"""Update traininable variables according to provided gradient values.
`grads` should be a list of gradient tensors
with 1:1 mapping to the list of variables the optimizer was built with.
`trainable_variables` can be provided
on the first call to build the optimizer.
"""
if len(grads) == 0:
# It is possible that the grad is empty. In this case,
# `apply_gradients` is a no-op.
return
if trainable_variables is None:
if not self.built:
raise ValueError(
"When passing `grads` without `variables`, the optimizer "
"must already be built on a list of variables. "
"Call `optimizer.build(trainable_variables)` first. "
)
if len(grads) != len(self._trainable_variables_indices):
raise ValueError(
"When passing `grads` as a list of gradient tensors, the "
f"gradients must match `optimizer.variables` one-to-on. "
f"Received a list of {len(grads)} gradients, but the "
f"optimizer is tracking {len(self._trainable_variables)} "
"trainable variables."
)
trainable_variables = self._trainable_variables
else:
trainable_variables = list(trainable_variables)
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
self._check_variables_are_known(trainable_variables)
with backend.name_scope(self.name, caller=self):
# Filter empty gradients.
grads, trainable_variables = self._filter_empty_gradients(
grads, trainable_variables
)
# Overwrite targeted variables directly with their gradients if
# their `overwrite_with_gradient` is set.
grads, trainable_variables = (
self._overwrite_variables_directly_with_gradients(
grads, trainable_variables
)
)
if len(list(grads)) > 0:
# Unscale gradients.
scale = self.loss_scale_factor
if scale is not None:
grads = [g if g is None else g / scale for g in grads]
# Apply gradient updates.
self._backend_apply_gradients(grads, trainable_variables)
# Apply variable constraints after applying gradients.
for variable in trainable_variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
# Update iteration counter.
self._iterations.assign_add(1)
def _backend_apply_gradients(self, grads, trainable_variables):
"""Apply method that can be overridden by different backends.
JAX overrides it in order to deal with statelessness in gradient
accumulation and EMA handling.
The below implementation is intended to be generally backend-agnostic,
but may not work with all backends.
This method does 4 things:
- Call the optimizer's update_step() to update trainable variables
and optimizer variables.
- Update EMA variables, if EMA is configured.
- Update gradient accumulators, if gradient accumulation is configured.
- Update the iteration counter.
"""
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
def _update_step_fn(grads, trainable_variables):
# Run update step with accumulated grads + reset accumulators
steps = self.gradient_accumulation_steps
grads = [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
]
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
self._backend_reset_gradient_accumulators()
ops.cond(
is_update_step,
lambda: _update_step_fn(grads, trainable_variables),
lambda: self._backend_increment_gradient_accumulators(
grads, acc_grads
),
)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
# Run update step.
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
ops.cond(
should_overwrite_model_vars,
lambda: self._overwrite_model_variables_with_average_value(
self._trainable_variables
),
lambda: None,
)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
"""Collective update_step that can be overridden by the backend.
It is overridden by torch for performance reasons, and
by TF to support tf.distribute.
"""
for grad, var in zip(grads, trainable_variables):
self.update_step(grad, var, learning_rate)
def _backend_reset_gradient_accumulators(self):
for g_acc in self._accumulated_gradients:
if g_acc is not None:
g_acc.assign(ops.zeros(g_acc.shape, dtype=g_acc.dtype))
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
new_g_accs = [(g + acc_g) for g, acc_g in zip(grads, acc_grads)]
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
"""Stateless version of `apply` that returns modified variables.
Args:
optimizer_variables: list of tensors containing the current values
for the optimizer variables. These are native tensors and not
`keras.Variable`s.
grads: list of gradients to apply.
trainable_variables: list of tensors containing the current values
for the model variables. These are native tensors and not
`keras.Variable`s.
Returns: A tuple containing two list of tensors, the updated
`trainable_variables` and the updated `optimizer_variables`.
"""
self._check_super_called()
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
if len(optimizer_variables) != len(self.variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(optimizer_variables)}, but "
f"expected {len(self.variables)} variables."
)
if len(trainable_variables) != len(self._trainable_variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
"corresponding 1:1 to the trainable variables list that "
"the optimizer was built with. Received "
f"len(trainable_variables) == {len(trainable_variables)} "
"whereas the optimizer was built with "
f"{len(self._trainable_variables)} variables."
)
# Gather variable mapping
mapping = list(
zip(self._trainable_variables, trainable_variables)
) + list(zip(self.variables, optimizer_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.apply(grads)
# Gather updated variables
trainable_variables = []
for v in self._trainable_variables:
new_v = scope.get_current_value(v)
if new_v is not None:
trainable_variables.append(new_v)
else:
trainable_variables.append(v)
optimizer_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
optimizer_variables.append(new_v)
else:
optimizer_variables.append(v)
return trainable_variables, optimizer_variables
def scale_loss(self, loss):
"""Scale the loss before computing gradients.
Scales the loss before gradients are computed in a `train_step`. This
is primarily useful during mixed precision training to prevent numeric
underflow.
"""
if self.loss_scale_factor is not None:
return loss * self.loss_scale_factor
return loss
@property
def learning_rate(self):
return self._get_current_learning_rate()
@learning_rate.setter
def learning_rate(self, learning_rate):
if isinstance(self._learning_rate, backend.Variable):
prev_lr_var = self._learning_rate
else:
prev_lr_var = None
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
raise TypeError(
"This optimizer was created with a `LearningRateSchedule`"
" object as its `learning_rate` constructor argument, "
"hence its learning rate is not settable. If you need the"
" learning rate to be settable, you should instantiate "
"the optimizer with a float `learning_rate` argument."
)
self._learning_rate.assign(learning_rate)
if prev_lr_var is not None and not isinstance(
self._learning_rate, backend.Variable
):
# Untrack learning rate variable
self._untrack_variable(prev_lr_var)
def set_weights(self, weights):
"""Set the weights of the optimizer."""
if not self.built:
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
def save_own_variables(self, store):
"""Get the state of this optimizer object."""
for i, variable in enumerate(self.variables):
store[str(i)] = variable.numpy()
def load_own_variables(self, store):
"""Set the state of this optimizer object."""
if len(store.keys()) != len(self.variables):
msg = (
f"Skipping variable loading for optimizer '{self.name}', "
f"because it has {len(self.variables)} variables whereas "
f"the saved optimizer has {len(store.keys())} variables. "
)
if len(self.variables) == 0:
msg += (
"This is likely because the optimizer has not been "
"called/built yet."
)
warnings.warn(msg, stacklevel=2)
return
for i, variable in enumerate(self.variables):
variable.assign(store[str(i)])
def _get_current_learning_rate(self):
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
return self._learning_rate(self._iterations)
elif isinstance(self._learning_rate, backend.Variable):
return self._learning_rate
elif callable(self._learning_rate):
return self._learning_rate()
return self._learning_rate
def _overwrite_variables_directly_with_gradients(self, grads, vars):
"""Overwrite the variables directly by their gradients.
This method is designed for a special case where we want to overwrite
the variable directly with its computed gradient. For example, in float8
training, new `scale` and `amax_history` are computed as gradients, and
we want to overwrite them directly instead of following the typical
procedure such as gradient descent with a learning rate, gradient
clipping and weight decaying.
After the update, the processed pairs will be filtered out.
"""
# Shortcut for `tf.Variable` because it doesn't have a
# `overwrite_with_gradient` attr.
if not any(self._overwrite_variable_with_gradient(v) for v in vars):
return grads, vars
# Shallow copies
filtered_grads = list(grads)
filtered_vars = list(vars)
# Iterate from right to left for safe popping
for i in range(len(filtered_grads) - 1, -1, -1):
g, v = filtered_grads[i], filtered_vars[i]
if self._overwrite_variable_with_gradient(v):
if self.gradient_accumulation_steps:
# Utilize a stateless manner for JAX compatibility
steps = self.gradient_accumulation_steps
is_update_step = (self._iterations + 1) % steps == 0
acc_g = self._accumulated_gradients[
self._get_variable_index(v)
]
# `ops.maximum` is utilized for gradient accumulation for
# `overwrite_with_gradient=True` variables
new_g_acc = ops.cond(
is_update_step,
lambda: ops.zeros(g.shape, dtype=g.dtype),
lambda: ops.maximum(g, acc_g),
)
new_g = ops.cond(
is_update_step,
lambda: ops.maximum(g, acc_g),
lambda: g,
)
new_v = ops.cond(
is_update_step, lambda: new_g, lambda: v.value
)
v.assign(new_v)
acc_g.assign(new_g_acc)
else:
v.assign(g)
filtered_grads.pop(i)
filtered_vars.pop(i)
return filtered_grads, filtered_vars
def _filter_empty_gradients(self, grads, vars):
filtered_grads = list(grads)
filtered_vars = list(vars)
missing_grad_vars = []
# Iterate from right to left for safe popping
for i in range(len(filtered_grads) - 1, -1, -1):
if filtered_grads[i] is None:
filtered_grads.pop(i)
v = filtered_vars.pop(i)
try:
missing_grad_vars.append(v.path)
except AttributeError:
# `tf.Variable` doesn't have `path` attr.
missing_grad_vars.append(v.name)
if not filtered_grads:
raise ValueError("No gradients provided for any variable.")
if missing_grad_vars:
warnings.warn(
"Gradients do not exist for variables "
f"{list(reversed(missing_grad_vars))} when minimizing the loss."
" If using `model.compile()`, did you forget to provide a "
"`loss` argument?"
)
return filtered_grads, filtered_vars
def _clip_gradients(self, grads):
if self.clipnorm and self.clipnorm > 0:
return [
self._clip_by_norm(g) if g is not None else g for g in grads
]
elif self.global_clipnorm and self.global_clipnorm > 0:
return clip_by_global_norm(grads, self.global_clipnorm)
elif self.clipvalue and self.clipvalue > 0:
v = self.clipvalue
return [ops.clip(g, -v, v) if g is not None else g for g in grads]
else:
return grads
def exclude_from_weight_decay(self, var_list=None, var_names=None):
"""Exclude variables from weight decay.
This method must be called before the optimizer's `build` method is
called. You can set specific variables to exclude out, or set a list of
strings as the anchor words, if any of which appear in a variable's
name, then the variable is excluded.
Args:
var_list: A list of `Variable`s to exclude from weight decay.
var_names: A list of strings. If any string in `var_names` appear
in the model variable's name, then this model variable is
excluded from weight decay. For example, `var_names=['bias']`
excludes all bias variables from weight decay.
"""
if hasattr(self, "_built") and self._built:
raise ValueError(
"`exclude_from_weight_decay()` can only be configured before "
"the optimizer is built."
)
# Use a `set` for the ids of `var_list` to speed up the searching
if var_list:
self._exclude_from_weight_decay = set(
self._var_key(variable) for variable in var_list
)
else:
self._exclude_from_weight_decay = set()
# Precompile the pattern for `var_names` to speed up the searching
if var_names and len(var_names) > 0:
self._exclude_from_weight_decay_pattern = re.compile(
"|".join(set(var_names))
)
else:
self._exclude_from_weight_decay_pattern = None
# Reset cache
self._exclude_from_weight_decay_cache = dict()
def _use_weight_decay(self, variable):
variable_id = self._var_key(variable)
# Immediately return the value if `variable_id` hits the cache
if not hasattr(self, "_exclude_from_weight_decay_cache"):
self._exclude_from_weight_decay_cache = dict()
if variable_id in self._exclude_from_weight_decay_cache:
return self._exclude_from_weight_decay_cache[variable_id]
# Determine whether the variable should apply weight decay or not
exclude_from_weight_decay = getattr(
self, "_exclude_from_weight_decay", set()
)
exclude_from_weight_decay_pattern = getattr(
self, "_exclude_from_weight_decay_pattern", None
)
if variable_id in exclude_from_weight_decay:
self._exclude_from_weight_decay_cache[variable_id] = False
return False
if exclude_from_weight_decay_pattern is not None:
if (
re.search(exclude_from_weight_decay_pattern, variable.name)
is not None
):
self._exclude_from_weight_decay_cache[variable_id] = False
return False
self._exclude_from_weight_decay_cache[variable_id] = True
return True
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
for variable in variables:
if self._use_weight_decay(variable):
lr = ops.cast(self.learning_rate, variable.dtype)
wd = ops.cast(self.weight_decay, variable.dtype)
variable.assign(variable - variable * wd * lr)
def _check_super_called(self):
if not hasattr(self, "_lock"):
raise RuntimeError(
f"In optimizer '{self.__class__.__name__}', you forgot to call "
"`super().__init__()` as the first statement "
"in the `__init__()` method. "
"Go add it!"
)
def _update_model_variables_moving_average(self, trainable_variables):
"""Update the stored moving average using the latest value."""
if self.use_ema:
for var, average in zip(
trainable_variables, self._model_variables_moving_average
):
if average is not None:
not_first_step = ops.not_equal(self.iterations, 0)
momentum = ops.multiply(
ops.cast(not_first_step, var.dtype), self.ema_momentum
)
average.assign(
ops.add(
ops.multiply(momentum, average),
ops.multiply(ops.subtract(1, momentum), var),
)
)
def _overwrite_model_variables_with_average_value(
self, trainable_variables
):
"""Overwrite model variables with its moving average."""
if len(trainable_variables) != len(
self._model_variables_moving_average
):
raise ValueError(
f"The length of model variables ({len(trainable_variables)}) "
"to override does not match the length of model variables "
"stored in the optimizer "
f"({len(self._model_variables_moving_average)}). Please "
"check if the optimizer was called on your model."
)
for var, average_var in zip(
trainable_variables, self._model_variables_moving_average
):
if average_var is not None:
var.assign(average_var)
def finalize_variable_values(self, var_list):
"""Set the final value of model's trainable variables.
Sometimes there are some extra steps before ending the variable updates,
such as overriding the model variables with its average value.
Args:
var_list: list of model variables.
"""
if self.use_ema:
# If the optimizer uses EMA, then when finalizing, we replace the
# model variable value with its moving average stored inside
# optimizer.
self._overwrite_model_variables_with_average_value(var_list)
def _obj_type(self):
return "Optimizer"
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Subclass optimizer should override this method to include other
hyperparameters.
Returns:
Python dictionary.
"""
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
learning_rate = learning_rate_schedule.serialize(
self._learning_rate
)
elif isinstance(self._learning_rate, backend.Variable):
learning_rate = float(self._learning_rate.numpy())
elif ops.is_tensor(self._learning_rate):
learning_rate = float(self._learning_rate)
elif callable(self._learning_rate):
learning_rate = serialization_lib.serialize_keras_object(
self._learning_rate
)
else:
learning_rate = 0.5
config = {
"name": self.name,
"learning_rate": learning_rate,
"weight_decay": self.weight_decay,
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
"loss_scale_factor": self.loss_scale_factor,
"gradient_accumulation_steps": self.gradient_accumulation_steps,
}
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same optimizer from the config dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional
user-defined Python objects needed to recreate this optimizer.
Returns:
An optimizer instance.
"""
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = (
serialization_lib.deserialize_keras_object(
config["learning_rate"], custom_objects=custom_objects
)
)
return cls(**config)
def __setattr__(self, name, value):
# Prevent users from attaching state to the
# layer before `super()` is called -- since that
# state would silently not be tracked.
if name != "_lock":
self._check_super_called()
# Track Variables.
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
def _clip_by_norm(self, values, axes=None):
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = ops.sum(ops.square(values), axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = ops.where(pred, l2sum, ops.ones_like(l2sum))
l2norm = ops.where(pred, ops.sqrt(l2sum_safe), l2sum)
intermediate = ops.multiply(values, self.clipnorm)
values_clip = ops.convert_to_tensor(intermediate) / ops.maximum(
l2norm, self.clipnorm
)
return values_clip
def _untrack_variable(self, variable):
previous_lock_state = self._tracker.locked
self._tracker.unlock()
self._tracker.untrack(variable)
if previous_lock_state is True:
self._tracker.lock()
base_optimizer_keyword_args = """name: String. The name to use
for momentum accumulator weights created by
the optimizer.
weight_decay: Float. If set, weight decay is applied.
clipnorm: Float. If set, the gradient of each weight is individually
clipped so that its norm is no higher than this value.
clipvalue: Float. If set, the gradient of each weight is clipped to be
no higher than this value.
global_clipnorm: Float. If set, the gradient of all weights is clipped
so that their global norm is no higher than this value.
use_ema: Boolean, defaults to `False`.
If `True`, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving
average of the weights of the model (as the weight values change
after each training batch), and periodically overwriting the
weights with their moving average.
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.
This is the momentum to use when computing
the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: Int or None, defaults to None. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,
we overwrite the model variable by its moving average.
If None, the optimizer
does not overwrite model variables in the middle of training,
and you need to explicitly overwrite the variables
at the end of training by calling
`optimizer.finalize_variable_values()` (which updates the model
variables in-place). When using the built-in `fit()` training loop,
this happens automatically after the last epoch,
and you don't need to do anything.
loss_scale_factor: Float or `None`. If a float, the scale factor will
be multiplied the loss before computing gradients, and the inverse
of the scale factor will be multiplied by the gradients before
updating variables. Useful for preventing underflow during
mixed precision training. Alternately,
`keras.optimizers.LossScaleOptimizer` will
automatically set a loss scale factor.
gradient_accumulation_steps: Int or `None`. If an int, model & optimizer
variables will not be updated at every step; instead they will be
updated every `gradient_accumulation_steps` steps, using the average
value of the gradients since the last update. This is known as
"gradient accumulation". This can be useful
when your batch size is very small, in order to reduce gradient
noise at each update step. EMA frequency will look at "accumulated"
iterations value (optimizer steps // gradient_accumulation_steps).
Learning rate schedules will look at "real" iterations value
(optimizer steps).
"""
def global_norm(value_list):
"""Computes the global norm of multiple tensors."""
squared_norms = [
ops.sum(ops.square(v)) for v in value_list if v is not None
]
squared_norm = ops.sum(ops.stack(squared_norms))
return ops.sqrt(squared_norm)
def clip_by_global_norm(value_list, clip_norm):
use_norm = global_norm(value_list)
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale_for_finite = clip_norm * ops.minimum(1.0 / use_norm, 1.0 / clip_norm)
# If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,
# this will make scale NaN.
scale = scale_for_finite + (use_norm - use_norm)
return [v * scale if v is not None else v for v in value_list]
| BaseOptimizer |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 21204,
"end": 22165
} | class ____:
"""Descriptor for getting and setting the split property.
The split property allows you to specify which edge you want to split.
"""
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> DockEdge:
"""Get the Split property.
Args:
obj: The ``Styles`` object.
objtype: The ``Styles`` class.
Returns:
The edge name as a string. Returns "none" if unset or if "none" has been explicitly set.
"""
return obj.get_rule("split", "none") # type: ignore[return-value]
def __set__(self, obj: StylesBase, dock_name: str):
"""Set the Dock property.
Args:
obj: The ``Styles`` object.
dock_name: The name of the dock to attach this widget to.
"""
_rich_traceback_omit = True
if obj.set_rule("split", dock_name):
obj.refresh(layout=True)
| SplitProperty |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 6290,
"end": 6617
} | class ____(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField("date published")
places = models.ManyToManyField("Place")
history = HistoricalRecords(
m2m_fields=[places],
inherit=True,
)
class Meta:
abstract = True
| PollParentWithManyToMany |
python | PrefectHQ__prefect | tests/server/models/test_flow_runs.py | {
"start": 13809,
"end": 46518
} | class ____:
@pytest.fixture
async def flow_runs(self, flow, session, db):
await session.execute(sa.delete(db.FlowRun))
flow_2 = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(name="another-test"),
)
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow_2.id),
)
await session.commit()
return [flow_run_1, flow_run_2, flow_run_3]
async def test_read_flow_runs(self, flow_runs, session):
read_flow_runs = await models.flow_runs.read_flow_runs(session=session)
assert len(read_flow_runs) == 3
async def test_read_flow_runs_applies_limit(self, flow_runs, session):
read_flow_runs = await models.flow_runs.read_flow_runs(session=session, limit=1)
assert len(read_flow_runs) == 1
async def test_read_flow_runs_returns_empty_list(self, session):
read_flow_runs = await models.flow_runs.read_flow_runs(session=session)
assert len(read_flow_runs) == 0
async def test_read_flow_runs_filters_by_ids(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
# any_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_1.id])
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_1.id, flow_run_2.id])
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[uuid4()])
),
)
assert len(result) == 0
# not_any_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(
not_any_=[flow_run_1.id, flow_run_2.id]
)
),
)
assert {res.id for res in result} == {flow_run_3.id}
async def test_read_flow_runs_filters_by_name(self, flow, session):
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, name="my flow run 1"),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, name="my flow run 2"),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
name=schemas.filters.FlowRunFilterName(any_=["my flow run 2"])
),
)
assert {res.id for res in result} == {flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
name=schemas.filters.FlowRunFilterName(any_=["adkljfldkajfkldjs"])
),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_tags(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, tags=["db", "blue"]),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, tags=["db"]),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
# all_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
tags=schemas.filters.FlowRunFilterTags(all_=["db", "blue"])
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
tags=schemas.filters.FlowRunFilterTags(all_=["db"])
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
tags=schemas.filters.FlowRunFilterTags(all_=["green"])
),
)
assert len(result) == 0
# is_null_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
tags=schemas.filters.FlowRunFilterTags(is_null_=True)
),
)
assert {res.id for res in result} == {flow_run_3.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
tags=schemas.filters.FlowRunFilterTags(is_null_=False)
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
async def test_read_flow_runs_filters_by_states_any(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Running(name="My Running State"),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Completed(name="My Completed State"),
),
)
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, state=schemas.states.Failed(name="RIP")
),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
state=dict(
type=schemas.filters.FlowRunFilterStateType(any_=["RUNNING"])
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
state=dict(
type=schemas.filters.FlowRunFilterStateType(
any_=["RUNNING", "COMPLETED"]
)
)
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
state=dict(
type=schemas.filters.FlowRunFilterStateType(any_=["SCHEDULED"])
)
),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_flow_versions_any(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, flow_version="alpha"),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, flow_version="beta"),
)
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
flow_version=schemas.filters.FlowRunFilterFlowVersion(any_=["alpha"])
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
flow_version=schemas.filters.FlowRunFilterFlowVersion(
any_=["alpha", "beta"]
)
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
flow_version=schemas.filters.FlowRunFilterFlowVersion(any_=["omega"])
),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_start_time(self, flow, session):
now_dt = now()
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
start_time=now_dt - datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State 1",
),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
start_time=now_dt,
state=schemas.states.State(
type="COMPLETED",
name="My Completed State 2",
),
),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
start_time=now_dt + datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State 3",
),
),
)
flow_run_4 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.State(
type="COMPLETED",
name="My Completed State 4",
),
),
)
# before_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
start_time=schemas.filters.FlowRunFilterStartTime(
before_=now_dt - datetime.timedelta(seconds=1)
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
# after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
start_time=schemas.filters.FlowRunFilterStartTime(after_=now_dt)
),
)
assert {res.id for res in result} == {
flow_run_2.id,
flow_run_3.id,
flow_run_4.id,
}
# before_ AND after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
start_time=schemas.filters.FlowRunFilterStartTime(
before_=now_dt + datetime.timedelta(minutes=10),
after_=now_dt + datetime.timedelta(seconds=1),
)
),
)
assert {res.id for res in result} == {flow_run_3.id}
# is_null_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
start_time=schemas.filters.FlowRunFilterStartTime(is_null_=True)
),
)
assert {res.id for res in result} == {flow_run_4.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
start_time=schemas.filters.FlowRunFilterStartTime(is_null_=False)
),
)
assert {res.id for res in result} == {
flow_run_1.id,
flow_run_2.id,
flow_run_3.id,
}
async def test_read_flow_runs_filters_by_next_scheduled_start_time(
self, flow, session
):
now_dt = now()
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
next_scheduled_start_time=now_dt - datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
next_scheduled_start_time=now_dt,
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
next_scheduled_start_time=now_dt + datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
# before_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
next_scheduled_start_time=schemas.filters.FlowRunFilterNextScheduledStartTime(
before_=now_dt - datetime.timedelta(seconds=1)
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
# after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
next_scheduled_start_time=schemas.filters.FlowRunFilterNextScheduledStartTime(
after_=now_dt
)
),
)
assert {res.id for res in result} == {flow_run_2.id, flow_run_3.id}
# before_ AND after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
next_scheduled_start_time=schemas.filters.FlowRunFilterNextScheduledStartTime(
before_=now_dt + datetime.timedelta(minutes=10),
after_=now_dt + datetime.timedelta(seconds=1),
)
),
)
assert {res.id for res in result} == {flow_run_3.id}
async def test_read_flow_runs_filters_by_expected_start_time(self, flow, session):
now_dt = now()
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
expected_start_time=now_dt - datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
expected_start_time=now_dt,
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
expected_start_time=now_dt + datetime.timedelta(minutes=1),
state=schemas.states.State(
type="COMPLETED",
name="My Completed State",
),
),
)
# before_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
expected_start_time=schemas.filters.FlowRunFilterExpectedStartTime(
before_=now_dt - datetime.timedelta(seconds=1)
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
# after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
expected_start_time=schemas.filters.FlowRunFilterExpectedStartTime(
after_=now_dt
)
),
)
assert {res.id for res in result} == {flow_run_2.id, flow_run_3.id}
# before_ AND after_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
expected_start_time=schemas.filters.FlowRunFilterExpectedStartTime(
before_=now_dt + datetime.timedelta(minutes=10),
after_=now_dt + datetime.timedelta(seconds=1),
)
),
)
assert len(result) == 1
assert result[0].id == flow_run_3.id
async def test_read_flows_filters_by_deployment_id(self, flow, session):
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="",
flow_id=flow.id,
),
)
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, deployment_id=deployment.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
# test any_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
deployment_id=schemas.filters.FlowRunFilterDeploymentId(
any_=[deployment.id]
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
# test is_null_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
deployment_id=schemas.filters.FlowRunFilterDeploymentId(is_null_=True)
),
)
assert {res.id for res in result} == {flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
deployment_id=schemas.filters.FlowRunFilterDeploymentId(is_null_=False)
),
)
assert {res.id for res in result} == {flow_run_1.id}
async def test_read_flow_runs_filters_by_parent_task_run_ids(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
),
)
task_run = await models.task_runs.create_task_run(
session=session,
task_run=schemas.actions.TaskRunCreate(
flow_run_id=flow_run_1.id, task_key="my-key", dynamic_key="0"
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, parent_task_run_id=task_run.id
),
)
# test any_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
parent_task_run_id=schemas.filters.FlowRunFilterParentTaskRunId(
any_=[task_run.id]
)
),
)
assert {res.id for res in result} == {flow_run_2.id}
# test is_null_
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
parent_task_run_id=schemas.filters.FlowRunFilterParentTaskRunId(
is_null_=True
)
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
parent_task_run_id=schemas.filters.FlowRunFilterParentTaskRunId(
is_null_=False
)
),
)
assert {res.id for res in result} == {flow_run_2.id}
async def test_read_flow_runs_filters_by_multiple_criteria(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, tags=["db", "blue"]),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, tags=["db"]),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_1.id]),
tags=schemas.filters.FlowRunFilterTags(all_=["db"]),
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_2.id]),
tags=schemas.filters.FlowRunFilterTags(all_=["blue"]),
),
)
assert len(result) == 0
# filter using OR
result = await models.flow_runs.read_flow_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
operator="or_",
id=schemas.filters.FlowRunFilterId(any_=[flow_run_2.id]),
tags=schemas.filters.FlowRunFilterTags(all_=["blue"]),
),
)
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
async def test_read_flow_runs_filters_by_flow_criteria(self, flow, session):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_filter=schemas.filters.FlowFilter(
id=schemas.filters.FlowFilterId(any_=[flow.id])
),
)
assert len(result) == 2
assert {res.id for res in result} == {flow_run_1.id, flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_filter=schemas.filters.FlowFilter(
id=schemas.filters.FlowFilterId(any_=[uuid4()])
),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_deployment_criteria(
self, flow, deployment, session
):
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
deployment_id=deployment.id,
state=schemas.states.State(
type="SCHEDULED",
),
),
)
result = await models.flow_runs.read_flow_runs(
session=session,
deployment_filter=schemas.filters.DeploymentFilter(
id=dict(any_=[deployment.id])
),
)
assert {res.id for res in result} == {flow_run_1.id}
result = await models.flow_runs.read_flow_runs(
session=session,
deployment_filter=schemas.filters.DeploymentFilter(id=dict(any_=[uuid4()])),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_flow_and_task_run_criteria(
self, flow, session
):
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
task_run_1 = await models.task_runs.create_task_run(
session=session,
task_run=schemas.actions.TaskRunCreate(
flow_run_id=flow_run_2.id, task_key="my-key", dynamic_key="0"
),
)
result = await models.flow_runs.read_flow_runs(
session=session,
flow_filter=schemas.filters.FlowFilter(
id=schemas.filters.FlowFilterId(any_=[flow.id])
),
task_run_filter=schemas.filters.TaskRunFilter(
id=schemas.filters.TaskRunFilterId(any_=[task_run_1.id])
),
)
assert {res.id for res in result} == {flow_run_2.id}
result = await models.flow_runs.read_flow_runs(
session=session,
flow_filter=schemas.filters.FlowFilter(
id=schemas.filters.FlowFilterId(any_=[flow.id])
),
task_run_filter=schemas.filters.TaskRunFilter(
id=schemas.filters.TaskRunFilterId(any_=[uuid4()])
),
)
assert len(result) == 0
async def test_read_flow_runs_filters_by_work_pool_name(self, flow, session):
work_pool = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(name="work-pool"),
)
work_queue = await models.workers.create_work_queue(
session=session,
work_pool_id=work_pool.id,
work_queue=schemas.actions.WorkQueueCreate(name="work-pool-queue"),
)
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, work_queue_id=work_queue.id),
)
result = await models.flow_runs.read_flow_runs(
session=session,
work_pool_filter=schemas.filters.WorkPoolFilter(
name=schemas.filters.WorkPoolFilterName(any_=[work_pool.name])
),
)
assert {res.id for res in result} == {flow_run_2.id}
async def test_read_flow_runs_filters_by_work_queue_id(self, session, flow):
work_pool = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(name="work-pool"),
)
work_queue = await models.workers.create_work_queue(
session=session,
work_pool_id=work_pool.id,
work_queue=schemas.actions.WorkQueueCreate(name="work-pool-queue"),
)
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id, work_queue_id=work_queue.id),
)
result = await models.flow_runs.read_flow_runs(
session=session,
work_queue_filter=schemas.filters.WorkQueueFilter(
id=schemas.filters.WorkQueueFilterId(any_=[work_queue.id])
),
)
assert {res.id for res in result} == {flow_run_2.id}
async def test_read_flow_runs_applies_sort(self, flow, session):
now_dt = now()
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.State(
type="SCHEDULED",
timestamp=now_dt - datetime.timedelta(minutes=1),
),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.State(
type="SCHEDULED",
timestamp=now_dt + datetime.timedelta(minutes=1),
),
),
)
await session.commit()
result = await models.flow_runs.read_flow_runs(
session=session,
sort=schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC,
limit=1,
)
assert result[0].id == flow_run_2.id
@pytest.mark.filterwarnings(
# SQLAlchemy will create an unawaited coroutine on attribute access failure
"ignore:coroutine '.*' was never awaited"
)
async def test_read_flow_runs_with_only_one_column(self, flow_runs, db, session):
# clear the session to erase cached versions of these flow runs and
# force all data to be reloaded
session.expunge_all()
result = await models.flow_runs.read_flow_runs(
session=session, columns=[db.FlowRun.id]
)
assert {r.id for r in result} == {fr.id for fr in flow_runs}
# name and state_type were not loaded and raise an error
# because the async session is closed
for r in result:
with pytest.raises(sa.exc.MissingGreenlet):
r.name
with pytest.raises(sa.exc.MissingGreenlet):
r.state_type
@pytest.mark.filterwarnings(
# SQLAlchemy will create an unawaited coroutine on attribute access failure
"ignore:coroutine '.*' was never awaited"
)
async def test_read_flow_runs_with_only_two_columns(self, flow_runs, db, session):
# clear the session to erase cached versions of these flow runs and
# force all data to be reloaded
session.expunge_all()
result = await models.flow_runs.read_flow_runs(
session=session, columns=[db.FlowRun.id, db.FlowRun.name]
)
assert {r.id for r in result} == {fr.id for fr in flow_runs}
assert {r.name for r in result} == {fr.name for fr in flow_runs}
# state_type was not loaded and raises an error
# because the async session is closed
for r in result:
with pytest.raises(sa.exc.MissingGreenlet):
r.state_type
| TestReadFlowRuns |
python | ray-project__ray | python/ray/air/integrations/mlflow.py | {
"start": 7428,
"end": 13191
} | class ____(LoggerCallback):
"""MLflow Logger to automatically log Tune results and config to MLflow.
MLflow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune ``LoggerCallback``
sends information (config parameters, training results & metrics,
and artifacts) to MLflow for automatic experiment tracking.
Keep in mind that the callback will open an MLflow session on the driver and
not on the trainable. Therefore, it is not possible to call MLflow functions
like ``mlflow.log_figure()`` inside the trainable as there is no MLflow session
on the trainable. For more fine grained control, use
:func:`ray.air.integrations.mlflow.setup_mlflow`.
Args:
tracking_uri: The tracking URI for where to manage experiments
and runs. This can either be a local file path or a remote server.
This arg gets passed directly to mlflow
initialization. When using Tune in a multi-node setting, make sure
to set this to a remote server and not a local file path.
registry_uri: The registry URI that gets passed directly to
mlflow initialization.
experiment_name: The experiment name to use for this Tune run.
If the experiment with the name already exists with MLflow,
it will be reused. If not, a new experiment will be created with
that name.
tags: An optional dictionary of string keys and values to set
as tags on the run
tracking_token: Tracking token used to authenticate with MLflow.
save_artifact: If set to True, automatically save the entire
contents of the Tune local_dir as an artifact to the
corresponding run in MlFlow.
log_params_on_trial_end: If set to True, log parameters to MLflow
at the end of the trial instead of at the beginning
Example:
.. code-block:: python
from ray.air.integrations.mlflow import MLflowLoggerCallback
tags = { "user_name" : "John",
"git_commit_hash" : "abc123"}
tune.run(
train_fn,
config={
# define search space here
"parameter_1": tune.choice([1, 2, 3]),
"parameter_2": tune.choice([4, 5, 6]),
},
callbacks=[MLflowLoggerCallback(
experiment_name="experiment1",
tags=tags,
save_artifact=True,
log_params_on_trial_end=True)])
"""
def __init__(
self,
tracking_uri: Optional[str] = None,
*,
registry_uri: Optional[str] = None,
experiment_name: Optional[str] = None,
tags: Optional[Dict] = None,
tracking_token: Optional[str] = None,
save_artifact: bool = False,
log_params_on_trial_end: bool = False,
):
self.tracking_uri = tracking_uri
self.registry_uri = registry_uri
self.experiment_name = experiment_name
self.tags = tags
self.tracking_token = tracking_token
self.should_save_artifact = save_artifact
self.log_params_on_trial_end = log_params_on_trial_end
self.mlflow_util = _MLflowLoggerUtil()
if ray.util.client.ray.is_connected():
logger.warning(
"When using MLflowLoggerCallback with Ray Client, "
"it is recommended to use a remote tracking "
"server. If you are using a MLflow tracking server "
"backed by the local filesystem, then it must be "
"setup on the server side and not on the client "
"side."
)
def setup(self, *args, **kwargs):
# Setup the mlflow logging util.
self.mlflow_util.setup_mlflow(
tracking_uri=self.tracking_uri,
registry_uri=self.registry_uri,
experiment_name=self.experiment_name,
tracking_token=self.tracking_token,
)
if self.tags is None:
# Create empty dictionary for tags if not given explicitly
self.tags = {}
self._trial_runs = {}
def log_trial_start(self, trial: "Trial"):
# Create run if not already exists.
if trial not in self._trial_runs:
# Set trial name in tags
tags = self.tags.copy()
tags["trial_name"] = str(trial)
run = self.mlflow_util.start_run(tags=tags, run_name=str(trial))
self._trial_runs[trial] = run.info.run_id
run_id = self._trial_runs[trial]
# Log the config parameters.
config = trial.config
if not self.log_params_on_trial_end:
self.mlflow_util.log_params(run_id=run_id, params_to_log=config)
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
run_id = self._trial_runs[trial]
self.mlflow_util.log_metrics(run_id=run_id, metrics_to_log=result, step=step)
def log_trial_end(self, trial: "Trial", failed: bool = False):
run_id = self._trial_runs[trial]
# Log the artifact if set_artifact is set to True.
if self.should_save_artifact:
self.mlflow_util.save_artifacts(run_id=run_id, dir=trial.local_path)
# Stop the run once trial finishes.
status = "FINISHED" if not failed else "FAILED"
# Log the config parameters.
config = trial.config
if self.log_params_on_trial_end:
self.mlflow_util.log_params(run_id=run_id, params_to_log=config)
self.mlflow_util.end_run(run_id=run_id, status=status)
| MLflowLoggerCallback |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 15297,
"end": 18456
} | class ____(ArceePreTrainedModel):
def __init__(self, config: ArceeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[ArceeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = ArceeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
| ArceeModel |
python | nedbat__coveragepy | tests/mixins.py | {
"start": 3109,
"end": 4051
} | class ____:
"""Auto-restore the imported modules at the end of each test."""
@pytest.fixture(autouse=True)
def _module_saving(self) -> Iterable[None]:
"""Remove modules we imported during the test."""
self._sys_module_saver = SysModuleSaver()
try:
yield
finally:
self._sys_module_saver.restore()
def clean_local_file_imports(self) -> None:
"""Clean up the results of calls to `import_local_file`.
Use this if you need to `import_local_file` the same file twice in
one test.
"""
# So that we can re-import files, clean them out first.
self._sys_module_saver.restore()
# Also have to clean out the .pyc files, since the time stamp
# resolution is only one second, a changed file might not be
# picked up.
remove_tree("__pycache__")
importlib.invalidate_caches()
| RestoreModulesMixin |
python | keon__algorithms | algorithms/tree/bst/bst.py | {
"start": 286,
"end": 3095
} | class ____(object):
def __init__(self):
self.root = None
def get_root(self):
return self.root
"""
Get the number of elements
Using recursion. Complexity O(logN)
"""
def size(self):
return self.recur_size(self.root)
def recur_size(self, root):
if root is None:
return 0
else:
return 1 + self.recur_size(root.left) + self.recur_size(root.right)
"""
Search data in bst
Using recursion. Complexity O(logN)
"""
def search(self, data):
return self.recur_search(self.root, data)
def recur_search(self, root, data):
if root is None:
return False
if root.data == data:
return True
elif data > root.data: # Go to right root
return self.recur_search(root.right, data)
else: # Go to left root
return self.recur_search(root.left, data)
"""
Insert data in bst
Using recursion. Complexity O(logN)
"""
def insert(self, data):
if self.root:
return self.recur_insert(self.root, data)
else:
self.root = Node(data)
return True
def recur_insert(self, root, data):
if root.data == data: # The data is already there
return False
elif data < root.data: # Go to left root
if root.left: # If left root is a node
return self.recur_insert(root.left, data)
else: # left root is a None
root.left = Node(data)
return True
else: # Go to right root
if root.right: # If right root is a node
return self.recur_insert(root.right, data)
else:
root.right = Node(data)
return True
"""
Preorder, Postorder, Inorder traversal bst
"""
def preorder(self, root):
if root:
print(str(root.data), end = ' ')
self.preorder(root.left)
self.preorder(root.right)
def inorder(self, root):
if root:
self.inorder(root.left)
print(str(root.data), end = ' ')
self.inorder(root.right)
def postorder(self, root):
if root:
self.postorder(root.left)
self.postorder(root.right)
print(str(root.data), end = ' ')
"""
The tree is created for testing:
10
/ \
6 15
/ \ / \
4 9 12 24
/ / \
7 20 30
/
18
"""
| BST |
python | boto__boto3 | boto3/dynamodb/conditions.py | {
"start": 6705,
"end": 6742
} | class ____(AttributeBase):
pass
| Key |
python | chroma-core__chroma | chromadb/test/property/test_embeddings.py | {
"start": 13210,
"end": 53202
} | class ____(EmbeddingStateMachineBase):
embedding_ids: Bundle[ID] = Bundle("embedding_ids")
def __init__(self, client: ClientAPI, use_search: bool = False):
super().__init__(client, use_search)
@initialize(collection=collection_st) # type: ignore
def initialize(self, collection: strategies.Collection):
super().initialize(collection)
print(
"[test_embeddings][initialize] Initialize collection id ",
self.collection._model["id"],
" hypothesis generated collection id ",
collection.id,
)
self.log_operation_count = 0
self.unique_ids_in_log: Set[ID] = set()
self.collection_version = self.collection.get_model()["version"]
@precondition(
lambda self: not NOT_CLUSTER_ONLY
and self.log_operation_count > 10
and len(self.unique_ids_in_log) > 3
)
@rule()
def wait_for_compaction(self) -> None:
current_version = get_collection_version(self.client, self.collection.name)
assert current_version >= self.collection_version # type: ignore[operator]
# This means that there was a compaction from the last time this was
# invoked. Ok to start all over again.
if current_version > self.collection_version: # type: ignore[operator]
print(
"[test_embeddings][wait_for_compaction] collection version has changed, so reset to 0"
)
self.collection_version = current_version
# This is fine even if the log has some records right now
self.log_operation_count = 0
self.unique_ids_in_log = set()
else:
print(
"[test_embeddings][wait_for_compaction] wait for version to increase from current version ",
current_version,
)
new_version = wait_for_version_increase(
self.client,
self.collection.name,
current_version,
additional_time=VERSION_INCREASE_WAIT_TIME,
)
# Everything got compacted.
self.log_operation_count = 0
self.unique_ids_in_log = set()
self.collection_version = new_version
@rule(
target=embedding_ids,
record_set=strategies.recordsets(collection_st),
)
def add_embeddings(self, record_set: strategies.RecordSet) -> MultipleResults[ID]:
res = super().add_embeddings(record_set)
normalized_record_set: strategies.NormalizedRecordSet = invariants.wrap_all(
record_set
)
print(
"[test_embeddings][add] Non Intersection ids ",
normalized_record_set["ids"],
" len ",
len(normalized_record_set["ids"]),
)
self.log_operation_count += len(normalized_record_set["ids"])
for id in normalized_record_set["ids"]:
if id not in self.unique_ids_in_log:
self.unique_ids_in_log.add(id)
return res # type: ignore[return-value]
@rule(ids=st.lists(consumes(embedding_ids), min_size=1))
def delete_by_ids(self, ids: IDs) -> None:
super().delete_by_ids(ids)
print("[test_embeddings][delete] ids ", ids, " len ", len(ids))
self.log_operation_count += len(ids)
for id in ids:
if id in self.unique_ids_in_log:
self.unique_ids_in_log.remove(id)
# Removing the precondition causes the tests to frequently fail as "unsatisfiable"
# Using a value < 5 causes retries and lowers the number of valid samples
@precondition(lambda self: len(self.record_set_state["ids"]) >= 5)
@rule(
record_set=strategies.recordsets(
collection_strategy=collection_st,
id_strategy=embedding_ids,
min_size=1,
max_size=5,
),
)
def update_embeddings(self, record_set: strategies.RecordSet) -> None:
super().update_embeddings(record_set)
print(
"[test_embeddings][update] ids ",
record_set["ids"],
" len ",
len(invariants.wrap(record_set["ids"])),
)
self.log_operation_count += len(invariants.wrap(record_set["ids"]))
# Using a value < 3 causes more retries and lowers the number of valid samples
@precondition(lambda self: len(self.record_set_state["ids"]) >= 3)
@rule(
record_set=strategies.recordsets(
collection_strategy=collection_st,
id_strategy=st.one_of(embedding_ids, strategies.safe_text),
min_size=1,
max_size=5,
)
)
def upsert_embeddings(self, record_set: strategies.RecordSet) -> None:
super().upsert_embeddings(record_set)
print(
"[test_embeddings][upsert] ids ",
record_set["ids"],
" len ",
len(invariants.wrap(record_set["ids"])),
)
self.log_operation_count += len(invariants.wrap(record_set["ids"]))
for id in invariants.wrap(record_set["ids"]):
if id not in self.unique_ids_in_log:
self.unique_ids_in_log.add(id)
def test_embeddings_state(caplog: pytest.LogCaptureFixture, client: ClientAPI) -> None:
create_isolated_database(client)
caplog.set_level(logging.ERROR)
run_state_machine_as_test(
lambda: EmbeddingStateMachine(client),
settings=settings(
deadline=90000, suppress_health_check=[HealthCheck.filter_too_much]
),
) # type: ignore
print_traces()
@pytest.mark.skipif(
NOT_CLUSTER_ONLY,
reason="Search API only available in distributed mode"
)
def test_embeddings_state_with_search(caplog: pytest.LogCaptureFixture, client: ClientAPI) -> None:
"""Test embeddings state machine using search API instead of query."""
create_isolated_database(client)
caplog.set_level(logging.ERROR)
run_state_machine_as_test(
lambda: EmbeddingStateMachine(client, use_search=True),
settings=settings(
deadline=90000, suppress_health_check=[HealthCheck.filter_too_much]
),
) # type: ignore
print_traces()
def test_add_then_delete_n_minus_1(client: ClientAPI) -> None:
create_isolated_database(client)
state = EmbeddingStateMachine(client)
state.initialize(
collection=strategies.Collection(
name="A00",
metadata={
"hnsw:construction_ef": 128,
"hnsw:search_ef": 128,
"hnsw:M": 128,
},
embedding_function=None,
id=uuid.uuid4(),
dimension=2,
dtype=np.float16,
known_metadata_keys={},
known_document_keywords=[],
has_documents=False,
has_embeddings=True,
)
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
v1, v2, v3, v4, v5, v6 = state.add_embeddings( # type: ignore[misc]
record_set={
"ids": ["0", "1", "2", "3", "4", "5"],
"embeddings": [
[0.09765625, 0.430419921875],
[0.20556640625, 0.08978271484375],
[-0.1527099609375, 0.291748046875],
[-0.12481689453125, 0.78369140625],
[0.92724609375, -0.233154296875],
[0.92724609375, -0.233154296875],
],
"metadatas": [None, None, None, None, None, None],
"documents": None,
}
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.delete_by_ids(ids=[v1, v2, v3, v4, v5])
if not NOT_CLUSTER_ONLY:
state.wait_for_compaction()
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.teardown()
def test_embeddings_flake1(client: ClientAPI) -> None:
create_isolated_database(client)
state = EmbeddingStateMachine(client)
state.initialize(
collection=strategies.Collection(
name="fOIBy",
metadata={
"-7n": False,
"92WhVE_": "HtmY",
"J-sW": "RTip",
"wPGA8hY7uX": -171,
"4rA": "5KdoaYsUQ_EWStV4",
"hnsw:construction_ef": 128,
"hnsw:search_ef": 128,
"hnsw:M": 128,
},
embedding_function=None,
id=uuid.UUID("ff006990-82c3-494b-97d5-cbb05092c861"),
dimension=664,
dtype=np.float16,
known_metadata_keys={},
known_document_keywords=[],
has_documents=False,
has_embeddings=True,
)
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
(
embedding_ids_0,
embedding_ids_1,
embedding_ids_2,
embedding_ids_3,
embedding_ids_4,
embedding_ids_5,
) = state.add_embeddings(
record_set={
"ids": ["kgaT4d", "C2h2YoNSgUqRyE-Tmxf3MT", "ODI-yO", "t", "b", "vC"],
"embeddings": [
[0] * 664,
[0] * 664,
[0] * 664,
[0] * 664,
[0] * 664,
[0] * 664,
],
"metadatas": [
{
"s": False,
"d1wQJV-9": -2_021_928_494,
"hWf7gwQ": "5DkqA9o6",
"rbyHg": 0.0,
"Pe": 251,
"0r6qQ5XYxeq": -0.3333333432674408,
"PzXpiqB": "VT",
},
None,
{
"hqTZ6Ok767eCSwyvGEuig8a": -659321220,
"TRGxN": -0.3333333432674408,
"1h8I": "E",
},
{"ATRs": -0.3333333432674408, "KF0P": -23106},
{
"PcFwu": -14169,
"PS": 0.0,
"WCgx": -13116,
"EQt": False,
"upcOfhu": -1.5,
"e": "vReD",
"U": -2147,
"zI4tO": True,
"MfHM7uU58tW_muctZf": -22,
"SvOy": 2.220446049250313e-16,
},
{
"iuTAKznMg6IdUKxaPi": -58907,
"oy": "uDC",
"c0Zb3VTUktBu-uW": "OcywKhsi",
"6i": -42181,
"nn": 5.960464477539063e-08,
"bs": "-",
"om": -1000000.0,
"MXnpsEEE": True,
"Ful8JRj": -304752924,
"Hi7lrY": True,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 6, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
(embedding_ids_6,) = state.add_embeddings(
record_set={
"ids": "ua",
"embeddings": [[0] * 664],
"metadatas": None,
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 7, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
embedding_ids_7, embedding_ids_8 = state.add_embeddings(
record_set={
"ids": ["K_", "yFsH"],
"embeddings": [[0] * 664, [0] * 664],
"metadatas": [
None,
{
"RiaaN9MNpq": -634040344,
"g9Wx": True,
"uexOH": -2.220446049250313e-16,
"h2": True,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 9, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": ["SCeelWyLAWG_oHa", "lY", "3"],
"embeddings": [[0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{
"0ZbYq40P": 448094799,
"OT9sTxkM": 9.999999747378752e-06,
"-j": 158,
"rqsBEfrELJctJoVeLqtsPZp": -100,
"5M4": 64676,
"XFt": 227,
"ii": 168135.75,
"ly": True,
},
{"Dy6": "q7LZUW"},
{
"fP": "KuQG8m-T",
"APtmt": False,
"xKb6": -2_147_483_647,
"C": "xGw",
"G18V": False,
"s": True,
"c-": "k",
"G92n": -7024,
"YTTBWs31rbM_L_PQDSCu": False,
"xOGzFeG": True,
"gh7cuT_ruA3mn": 883101.75,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 12, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": [
"O3m3-X1",
"ZNt2PF6M5_q",
"Ij0Yh6",
embedding_ids_1,
embedding_ids_7,
],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{
"2fDAuv7": -46139,
"4Et": 19926,
"5hqGH60G-yZ6PWyM1B": False,
"OkMjjG": "34oWsr93EUl",
"yTk": 999999.0,
"wZvpmS5HbTAI": -9.999999747378752e-06,
"bvq": "Xc80e",
"zPhL": "e-QXuDdnxYMd",
},
{
"WK": -9.999999747378752e-06,
"y": "g",
"GNZphPCKay88gsh3x_": 1.899999976158142,
},
{"_zVO2i-N": -40, "tWHxo": False, "ltu_E_fg": "JDc", "9yGpik": -153},
{
"otM8": "ZnQ3ALwA",
"EGeKm": 50,
"skf71O0UKT": True,
"S8Kc8-l95Rpc": True,
"4bGz1QmzbKVySN1yrXFl56CmDS08F": 1_284_815_517,
},
None,
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 15, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [
embedding_ids_1,
embedding_ids_3,
embedding_ids_8,
embedding_ids_5,
embedding_ids_6,
],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{
"hBFXAIA": False,
"Wx4dcB5": -35,
"8w": False,
"8": False,
"mwQ5": "c7",
"G9g2": "J",
"VY": True,
"VQGb_r-hzoA": -0.9999899864196777,
"M0lMig": True,
"F": True,
"J": 1.100000023841858,
"d": "R",
"DugrcoZv": False,
"45B": -2.0000100135803223,
"UG-sSV": False,
"cri4cT1G": -1_067_180_133,
"I": -4411,
"FqFWR__": False,
"4": -23,
"vwo4WERBljY3aWjWnqL": "xM0jUV4U2r",
"WF": "msuFYMwj_SXc",
},
None,
{"m": -49054, "f4": 239658268, "Ut": False, "V_NVCw": "5"},
{"VWuP": -9.999999747378752e-06, "7uF8": 127, "3": False},
{
"a1": -6.103515625e-05,
"ML_Zl2Ir85KolESaX": False,
"iJvA": -1.5,
"O8o": 1_287_175_929,
"rMS": 200,
"0": -1000000.0,
"5AeE": 9.999999747378752e-06,
"2q": True,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 15, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [embedding_ids_1, embedding_ids_2, embedding_ids_8, embedding_ids_3],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{"Yx": "6T9tEEC84", "lGe5GMX": 3054},
{
"UvsAljL5V5ELRv": True,
embedding_ids_3: False,
"yeLTrhAIq": 1.5,
"iP": -0.5,
},
{"C": "Ri"},
{
"pzHn2": -9.999999747378752e-06,
"YfdftMEd0C5ekByb7mhdb": 9735,
"LJCViu": 333447280,
"LT": True,
"5Y": False,
"OoVwE": False,
"vq": 1.899999976158142,
"8Wf6": False,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 15, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [embedding_ids_5],
"embeddings": [[0] * 664],
"metadatas": {
"C1KbOOlKkzzLo9CGU2": -1_379_550_593,
"NH": "d",
"M": "ebEKOx",
"fpu77F70Icl": True,
"dz6fI-Gpp": True,
"qVVW": -63204,
"Qrcq645F": 296029.46875,
},
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 15, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
(
embedding_ids_9,
embedding_ids_10,
embedding_ids_11,
embedding_ids_12,
) = state.add_embeddings(
record_set={
"ids": ["F7", "Rig1", "RXi", "_nC8-"],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{
"FBtaPcQWV24v": -25365,
"ddLq1My3mbUL9I": 2019,
"fI": 908902.125,
"HLxuosT": False,
},
{"ATUP1": -1.5},
{"AhC": True, "wm9AwP": -0.9999899864196777},
{"K": -33427},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 19, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": ["4GJ", "r", "Aunf5", embedding_ids_5],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{"J8O0R8VGaY": True},
{
"K2cCg": 5.960464477539063e-08,
"oObAcp": -2.0000100135803223,
"ax": "nK67g",
"afzp": 1000000.0,
"xnRCSPJUF4JZ2sKOIRDc": True,
"nBaQ6F1O38etVMhss2angu-": 158622.671875,
},
{
"UwbDWM2_": 9.999999747378752e-06,
"3": -452142.625,
"nfoovt": 214128.375,
"elaMLbhEvW": 1.100000023841858,
"0": "iSNcMrT",
"UO": True,
"I": 176,
"3ssGS4rSKXsKqRPFTBGrRPPsu": 1000000.0,
"Gw": False,
"V": True,
},
{"F": "tTw"},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 22, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [embedding_ids_1, embedding_ids_9],
"embeddings": [[0] * 664, [0] * 664],
"metadatas": [
{
"ei": -6.103515625e-05,
"_": "qscyRBC_",
"TP": "IXd",
"N0FG7Nta1": -745247.375,
"woD": 66,
"IV": "0L3xImGg",
"9N--JBl0uH_au_": -0.5,
"KVmhtcA": -9.999999747378752e-06,
"qr": False,
"NfL6": -0.9999899864196777,
"taIVpC": True,
"XJX": "l",
"5": 66,
"8YaEynJznB": True,
"k": -177,
"N": 671709.375,
"ebB": 53239,
"fJ": 65709.09375,
"QK8l3l4yP-": False,
"2": "cRl59jW_O",
"-XP899RRn": -999999.0,
"A9": 1.1754943508222875e-38,
"UlxNwmc": True,
"G": 128,
"1NoCd": False,
"WRn5cD": -175840.15625,
},
{
"zAbCKkEvE4s": True,
"hnFN": "HExeVM0iM",
"Uc9": False,
"v": 1_759_514_963,
"X": False,
"W": 1.100000023841858,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 22, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [embedding_ids_2],
"embeddings": [[0] * 664],
"metadatas": None,
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 22, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [
embedding_ids_10,
embedding_ids_2,
embedding_ids_4,
embedding_ids_12,
embedding_ids_3,
],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
{"Y": "-iRt8"},
{"55m28": "8MxYq", "krQsTFdqMhYjhF": False},
None,
{
"9SnviLf": -6.103515625e-05,
"Y0Jw4pLTwr": -184,
"v3E": 6.103515625e-05,
"Fx3jsbcdqy": "VG7E7xm",
"H": 9071,
"-U": "1xXUHLklmIVSVgQd7EHUCu5wa",
"S": "kl6",
},
{
"U": -12,
"Qfm_6duL": False,
"Sh0LkduZt5qsRJrF": "sB",
"8DM": -64114,
"MZ": "xtLNrNyRo2",
"lY": -922831.5,
"7": False,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 22, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": [embedding_ids_0, embedding_ids_7, "Oia", "iD", embedding_ids_5],
"embeddings": [[0] * 664, [0] * 664, [0] * 664, [0] * 664, [0] * 664],
"metadatas": [
None,
{
"tVs": True,
"B": "4eK",
"zTR": True,
"bq6VslBBo2_12hgyKNPddxify34-np-": -22311,
"F7FcZpODwCTHg91o4mKTjBL": False,
"1Zjfys": -13897,
"lg3": -866314519,
},
{
"1qr": "_TG-YhAQ",
"TKV": "Q",
"8tLu": 1000000.0,
"QHsxa": 1.100000023841858,
"F": True,
},
{
"p": True,
"rR": "UepiV6K_",
"UDZ_uR": -1.5,
"fFG6cZvICaGc": True,
"unTbxz0qd2-AV1": -332950.25,
},
{
"EXXVBZU": 2_147_483_647,
"tJMO": "C9OePg",
"4o": False,
"F8g8n": -999999.0,
"5": "aBY",
"hv3i": -48091,
},
],
"documents": None,
}
)
state.ann_accuracy()
# recall: 1.0, missing 0 out of 24, accuracy threshold 1e-06
state.count()
state.fields_match()
state.no_duplicates()
state.teardown()
def test_update_none(caplog: pytest.LogCaptureFixture, client: ClientAPI) -> None:
create_isolated_database(client)
state = EmbeddingStateMachine(client)
state.initialize(
collection=strategies.Collection(
name="A00",
metadata={
"hnsw:construction_ef": 128,
"hnsw:search_ef": 128,
"hnsw:M": 128,
},
embedding_function=None,
id=uuid.UUID("2fb0c945-b877-42ab-9417-bfe0f6b172af"),
dimension=2,
dtype=np.float16,
known_metadata_keys={},
known_document_keywords=[],
has_documents=False,
has_embeddings=True,
)
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
v1, v2, v3, v4, v5 = state.add_embeddings( # type: ignore[misc]
record_set={
"ids": ["0", "1", "2", "3", "4"],
"embeddings": [
[0.09765625, 0.430419921875],
[0.20556640625, 0.08978271484375],
[-0.1527099609375, 0.291748046875],
[-0.12481689453125, 0.78369140625],
[0.92724609375, -0.233154296875],
],
"metadatas": [None, None, None, None, None],
"documents": None,
}
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.update_embeddings(
record_set={
"ids": [v5],
"embeddings": [[0.58349609375, 0.05780029296875]],
"metadatas": [{v1: v1}],
"documents": None,
}
)
state.ann_accuracy()
state.teardown()
def test_add_delete_add(client: ClientAPI) -> None:
create_isolated_database(client)
state = EmbeddingStateMachine(client)
state.initialize(
collection=strategies.Collection(
name="KR3cf",
metadata={
"Ufmxsi3": 999999.0,
"bMMvvrqM4MKmp5CJB8A": 62921,
"-": True,
"37PNi": "Vkn",
"5KZfkpod3ND5soL_": True,
"KA4zcZL9lRN9": 142,
"Oc8G7ysXmE8lp4Hos_": "POQe8Unz1uJ",
"BI930U": 31,
"te": False,
"tyM": -0.5,
"R0ZiZ": True,
"m": True,
"IOw": -25725,
"hnsw:construction_ef": 128,
"hnsw:search_ef": 128,
"hnsw:M": 128,
},
embedding_function=None,
id=uuid.UUID("284b6e99-b19e-49b2-96a4-a2a93a95447d"),
dimension=3,
dtype=np.float32,
known_metadata_keys={},
known_document_keywords=[],
has_documents=False,
has_embeddings=True,
)
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
embeddings = state.add_embeddings(
record_set={
"ids": ["255", "l", "3-", "i", "Nk", "9yPvT"],
"embeddings": [
[1.2, 2.3, 1.5],
[4.5, 6.0, 2],
[1, 2, 3],
[4, 5, 6],
[8.9, 9.0, 7],
[4.5, 6.0, 5.6],
],
"metadatas": None,
"documents": None,
}
)
i = 0
emb_list = {}
for embedding in embeddings:
emb_list[i] = embedding
i += 1
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": [
emb_list[0],
emb_list[4],
"KWcDaHUVD6MxEiJ",
emb_list[5],
"PdlP1d6w",
],
"embeddings": [[1, 23, 4], [3, 5, 9], [9, 3, 5], [3, 9, 8], [1, 5, 4]],
"documents": None,
"metadatas": None,
}
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
if not NOT_CLUSTER_ONLY:
state.wait_for_compaction()
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.upsert_embeddings(
record_set={
"ids": ["TpjiboLSuYWBJDbRW1zeNmC", emb_list[0], emb_list[4]],
"embeddings": [[4, 6, 7], [7, 9, 3], [1, 3, 6]],
"metadatas": None,
"documents": None,
}
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
state.delete_by_ids(
ids=[emb_list[2], emb_list[1], emb_list[5], emb_list[4], emb_list[3]]
)
state.ann_accuracy()
state.count()
state.fields_match()
state.no_duplicates()
embeddings = state.add_embeddings(
record_set={
"ids": ["o", "D3V84", "Rt", "TDwlc9C8_evn", emb_list[1]],
"embeddings": [
[9, 5.4, 3.22],
[1.33, 3.44, 5.66],
[9.90, 9.8, 1.3],
[9.7, 5.6, 4.5],
[3.4, 5.6, 9.65],
],
"documents": None,
"metadatas": None,
}
)
i = 6
for embedding in embeddings:
emb_list[i] = embedding
i += 1
state.ann_accuracy()
state.count()
state.fields_match()
if not NOT_CLUSTER_ONLY:
state.wait_for_compaction()
def test_multi_add(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
coll.add(ids=["a"], embeddings=[[0.0]]) # type: ignore[arg-type]
assert coll.count() == 1
# after the sqlite refactor - add silently ignores duplicates, no exception is raised
# partial adds are supported - i.e we will add whatever we can in the request
coll.add(ids=["a"], embeddings=[[0.0]]) # type: ignore[arg-type]
assert coll.count() == 1
results = coll.get()
assert results["ids"] == ["a"]
coll.delete(ids=["a"])
assert coll.count() == 0
def test_dup_add(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
with pytest.raises(errors.DuplicateIDError):
coll.add(ids=["a", "a"], embeddings=[[0.0], [1.1]]) # type: ignore[arg-type]
with pytest.raises(errors.DuplicateIDError):
coll.upsert(ids=["a", "a"], embeddings=[[0.0], [1.1]]) # type: ignore[arg-type]
def test_query_without_add(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
fields: Include = ["documents", "metadatas", "embeddings", "distances"] # type: ignore[list-item]
N = np.random.randint(1, 2000)
K = np.random.randint(1, 100)
query_embeddings = np.random.random((N, K)).tolist()
results = coll.query(
query_embeddings=cast(Embeddings, query_embeddings), include=fields
)
for field in fields:
field_results = results[field] # type: ignore[literal-required]
assert field_results is not None
assert all([len(result) == 0 for result in field_results])
def test_get_non_existent(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
result = coll.get(ids=["a"], include=["documents", "metadatas", "embeddings"]) # type: ignore[list-item]
assert len(result["ids"]) == 0
assert len(result["metadatas"]) == 0 # type: ignore[arg-type]
assert len(result["documents"]) == 0 # type: ignore[arg-type]
assert len(result["embeddings"]) == 0 # type: ignore[arg-type]
# TODO: Use SQL escaping correctly internally
@pytest.mark.xfail(reason="We don't properly escape SQL internally, causing problems")
def test_escape_chars_in_ids(client: ClientAPI) -> None:
create_isolated_database(client)
id = "\x1f"
coll = client.create_collection(name="foo")
coll.add(ids=[id], embeddings=[[0.0]]) # type: ignore[arg-type]
assert coll.count() == 1
coll.delete(ids=[id])
assert coll.count() == 0
def test_delete_empty_fails(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
with pytest.raises(ValueError):
coll.delete()
@pytest.mark.parametrize(
"kwargs",
[
{"ids": ["foo"]},
{"where": {"foo": "bar"}},
{"where_document": {"$contains": "bar"}},
{"ids": ["foo"], "where": {"foo": "bar"}},
{"ids": ["foo"], "where_document": {"$contains": "bar"}},
{
"ids": ["foo"],
"where": {"foo": "bar"},
"where_document": {"$contains": "bar"},
},
],
)
def test_delete_success(client: ClientAPI, kwargs: Any) -> None:
create_isolated_database(client)
coll = client.create_collection(name="foo")
# Should not raise
coll.delete(**kwargs)
@given(supported_types=st.sampled_from([np.float32, np.int32, np.int64, int, float]))
def test_autocasting_validate_embeddings_for_compatible_types(
supported_types: List[Any],
) -> None:
embds = strategies.create_embeddings(10, 10, supported_types)
validated_embeddings = validate_embeddings(
cast(
Embeddings,
normalize_embeddings(embds),
)
)
assert all(
[
isinstance(value, np.ndarray)
and (
value.dtype == np.float32
or value.dtype == np.float64
or value.dtype == np.int32
or value.dtype == np.int64
)
for value in validated_embeddings
]
)
@given(supported_types=st.sampled_from([np.float32, np.int32, np.int64, int, float]))
def test_autocasting_validate_embeddings_with_ndarray(
supported_types: List[Any],
) -> None:
embds = strategies.create_embeddings_ndarray(10, 10, supported_types)
validated_embeddings = validate_embeddings(
cast(Embeddings, normalize_embeddings(embds))
)
assert all(
[
isinstance(value, np.ndarray)
and (
value.dtype == np.float32
or value.dtype == np.float64
or value.dtype == np.int32
or value.dtype == np.int64
)
for value in validated_embeddings
]
)
@given(unsupported_types=st.sampled_from([str, bool]))
def test_autocasting_validate_embeddings_incompatible_types(
unsupported_types: List[Any],
) -> None:
embds = strategies.create_embeddings(10, 10, unsupported_types)
with pytest.raises(ValueError) as e:
validate_embeddings(cast(Embeddings, normalize_embeddings(embds)))
assert (
"Expected embeddings to be a list of floats or ints, a list of lists, a numpy array, or a list of numpy arrays, got "
in str(e.value)
)
def test_0dim_embedding_validation() -> None:
embds: Embeddings = [np.array([])]
with pytest.raises(ValueError) as e:
validate_embeddings(embds)
assert (
"Expected each embedding in the embeddings to be a 1-dimensional numpy array with at least 1 int/float value. Got a 1-dimensional numpy array with no values at pos"
in str(e)
)
def test_no_op_compaction(client: ClientAPI) -> None:
create_isolated_database(client)
coll = client.create_collection(name="noop")
initial_version = get_collection_version(client, coll.name)
for batch in range(0, 5000, 100):
coll.delete(ids=[str(i) for i in range(batch, batch + 100)])
if not NOT_CLUSTER_ONLY:
wait_for_version_increase(
client, coll.name, initial_version, VERSION_INCREASE_WAIT_TIME
)
def test_add_then_purge(client: ClientAPI) -> None:
create_isolated_database(client)
record_count = 5000
batch_count = 100
coll = client.create_collection(name="add_then_purge")
witness_version = get_collection_version(client, coll.name)
# Add records and wait for compaction
for batch in range(0, record_count, batch_count):
record_id_vals = [i for i in range(batch, batch + batch_count)]
record_ids = [str(i) for i in record_id_vals]
coll.add(
ids=record_ids, embeddings=[[2 * i, 2 * i + 1] for i in record_id_vals]
)
if not NOT_CLUSTER_ONLY:
wait_for_version_increase(
client, coll.name, witness_version, VERSION_INCREASE_WAIT_TIME
)
# Purge records and wait for compaction
witness_version = get_collection_version(client, coll.name)
for batch in range(0, record_count, batch_count):
record_id_vals = [i for i in range(batch, batch + batch_count)]
record_ids = [str(i) for i in record_id_vals]
coll.delete(ids=record_ids)
if not NOT_CLUSTER_ONLY:
wait_for_version_increase(
client, coll.name, witness_version, VERSION_INCREASE_WAIT_TIME
)
# There should be no records left
assert len(coll.get()["ids"]) == 0
def test_encompassing_delete(client: ClientAPI) -> None:
create_isolated_database(client)
col = client.create_collection("encompassing_delete")
initial_version = get_collection_version(client, col.name)
id_start = 0
# Add and then Delete 6 records
ids = [str(i) for i in range(id_start, id_start + 6)]
embeddings = [[i * 1.0, i * 1.0] for i in range(id_start, id_start + 6)]
id_start = id_start + 6
col.add(ids=ids, embeddings=embeddings) # type: ignore[arg-type]
col.delete(ids=ids)
if not NOT_CLUSTER_ONLY:
wait_for_version_increase(
client, col.name, initial_version, VERSION_INCREASE_WAIT_TIME
)
initial_version = get_collection_version(client, col.name)
# Add and then delete and then add 16
len_to_add = 16
ids = [str(i) for i in range(id_start, id_start + len_to_add)]
embeddings = [[i * 1.0, i * 1.0] for i in range(id_start, id_start + len_to_add)]
col.add(ids=ids, embeddings=embeddings) # type: ignore[arg-type]
col.delete(ids=ids)
col.add(ids=ids, embeddings=embeddings) # type: ignore[arg-type]
if not NOT_CLUSTER_ONLY:
wait_for_version_increase(
client, col.name, initial_version, VERSION_INCREASE_WAIT_TIME
)
# Ensure we can get all
get_results = col.get()
assert len(get_results["ids"]) == len_to_add
for id in ids:
assert id in get_results["ids"]
| EmbeddingStateMachine |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_index.py | {
"start": 17882,
"end": 18952
} | class ____(unittest.TestCase):
@staticmethod
def eval_codec(q, xb):
codes = q.compute_codes(xb)
decoded = q.decode(codes)
return ((xb - decoded) ** 2).sum()
def subtest_gpu_encoding(self, ngpus):
"""check that the error is in the same as cpu."""
ds = datasets.SyntheticDataset(32, 1000, 1000, 0)
xt = ds.get_train()
xb = ds.get_database()
M = 4
nbits = 8
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
err_cpu = self.eval_codec(lsq, xb)
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
lsq.icm_encoder_factory = faiss.GpuIcmEncoderFactory(ngpus)
err_gpu = self.eval_codec(lsq, xb)
# 13804.411 vs 13814.794, 1 gpu
print(err_gpu, err_cpu)
self.assertLess(err_gpu, err_cpu * 1.05)
def test_one_gpu(self):
self.subtest_gpu_encoding(1)
def test_multiple_gpu(self):
ngpu = faiss.get_num_gpus()
self.subtest_gpu_encoding(ngpu)
| TestLSQIcmEncoder |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 18096,
"end": 21218
} | class ____(TestJsonLinesItemExporter):
_expected_nested = [TestJsonLinesItemExporter._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(to_unicode(self.output.getvalue().strip()))
assert exported == [ItemAdapter(self.i).asdict()]
def assertTwoItemsExported(self, item):
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.export_item(item)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
assert exported == [ItemAdapter(item).asdict(), ItemAdapter(item).asdict()]
def test_two_items(self):
self.assertTwoItemsExported(self.i)
def test_two_dict_items(self):
self.assertTwoItemsExported(ItemAdapter(self.i).asdict())
def test_two_items_with_failure_between(self):
i1 = MyItem(name="Joseph\xa3", age="22")
i2 = MyItem(
name="Maria", age=1j
) # Invalid datetimes didn't consistently fail between Python versions
i3 = MyItem(name="Jesus", age="44")
self.ie.start_exporting()
self.ie.export_item(i1)
with pytest.raises(TypeError):
self.ie.export_item(i2)
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
assert exported == [dict(i1), dict(i3)]
def test_nested_item(self):
i1 = self.item_class(name="Joseph\xa3", age="22")
i2 = self.item_class(name="Maria", age=i1)
i3 = self.item_class(name="Jesus", age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {
"name": "Jesus",
"age": {"name": "Maria", "age": ItemAdapter(i1).asdict()},
}
assert exported == [expected]
def test_nested_dict_item(self):
i1 = {"name": "Joseph\xa3", "age": "22"}
i2 = self.item_class(name="Maria", age=i1)
i3 = {"name": "Jesus", "age": i2}
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {"name": "Jesus", "age": {"name": "Maria", "age": i1}}
assert exported == [expected]
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
item["time"] = str(item["time"])
assert exported == [item]
| TestJsonItemExporter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 160349,
"end": 160984
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CloseDiscussion"""
__schema__ = github_schema
__field_names__ = ("discussion_id", "reason", "client_mutation_id")
discussion_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="discussionId")
"""ID of the discussion to be closed."""
reason = sgqlc.types.Field(DiscussionCloseReason, graphql_name="reason")
"""The reason why the discussion is being closed."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CloseDiscussionInput |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 13915,
"end": 14417
} | class ____:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
def time_count(self, axis):
self.df.count(axis=axis)
def time_count_mixed_dtypes(self, axis):
self.df_mixed.count(axis=axis)
| Count |
python | pola-rs__polars | py-polars/src/polars/io/partition.py | {
"start": 13378,
"end": 16524
} | class ____(_SinkDirectory):
"""
Partitioning scheme to split parted dataframes.
This is a specialized version of :class:`PartitionByKey`. Where as
:class:`PartitionByKey` accepts data in any order, this scheme expects the input
data to be pre-grouped or pre-sorted. This scheme suffers a lot less overhead than
:class:`PartitionByKey`, but may not be always applicable.
Each new value of the key expressions starts a new partition, therefore repeating
the same value multiple times may overwrite previous partitions.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
Parameters
----------
base_path
The base path for the output files.
Use the `mkdir` option on the `sink_*` methods to ensure directories in
the path are created.
file_path
A callback to register or modify the output path for each partition
relative to the `base_path`.The callback provides a
:class:`polars.io.partition.KeyedPartitionContext` that contains information
about the partition.
If no callback is given, it defaults to
`{ctx.keys.hive_dirs()}/{ctx.in_part_idx}.{EXT}`.
by
The expressions to partition by.
include_key : bool
Whether to include the key columns in the output files.
per_partition_sort_by
Columns or expressions to sort over within each partition.
Note that this might increase the memory consumption needed for each partition.
finish_callback
A callback that gets called when the query finishes successfully.
For parquet files, the callback is given a dataframe with metrics about all
files written files.
Examples
--------
Split a parquet file by a column `year` into CSV files:
>>> pl.scan_parquet("/path/to/file.parquet").sink_csv(
... pl.PartitionParted("./out/", by="year"),
... mkdir=True,
... ) # doctest: +SKIP
See Also
--------
PartitionMaxSize
PartitionByKey
polars.io.partition.KeyedPartitionContext
"""
def __init__(
self,
base_path: str | Path,
*,
file_path: Callable[[KeyedPartitionContext], Path | str | IO[bytes] | IO[str]]
| None = None,
by: str | Expr | Sequence[str | Expr] | Mapping[str, Expr],
include_key: bool = True,
per_partition_sort_by: str | Expr | Sequence[str | Expr] | None = None,
finish_callback: Callable[[DataFrame], None] | None = None,
) -> None:
issue_unstable_warning("partitioning strategies are considered unstable.")
super().__init__(
base_path=base_path,
file_path_provider=_cast_keyed_file_path_cb(file_path),
partition_by=by,
partition_keys_sorted=True,
include_keys=include_key,
per_partition_sort_by=per_partition_sort_by,
finish_callback=finish_callback,
)
# TODO: Add `kw_only=True` after 3.9 support dropped
@dataclass
| PartitionParted |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 45711,
"end": 46210
} | class ____(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin):
"""
Execute Python code in the nearest Python or Cython frame.
"""
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@libpython.dont_suppress_errors
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
executor.xdecref(self.evalcode(expr, executor.Py_file_input))
| CyExec |
python | dask__distributed | distributed/pytest_resourceleaks.py | {
"start": 4407,
"end": 4987
} | class ____(ResourceChecker, name="fds"):
def measure(self) -> int:
# Note: WINDOWS constant doesn't work with `mypy --platform win32`
if sys.platform == "win32":
# Don't use num_handles(); you'll get tens of thousands of reported leaks
return 0
else:
return psutil.Process().num_fds()
def has_leak(self, before: int, after: int) -> bool:
return after > before
def format(self, before: int, after: int) -> str:
return f"leaked {after - before} file descriptor(s) ({before}->{after})"
| FDChecker |
python | walkccc__LeetCode | solutions/3256. Maximum Value Sum by Placing Three Rooks I/3256.py | {
"start": 0,
"end": 717
} | class ____:
def maximumValueSum(self, board: list[list[int]]) -> int:
rows = [heapq.nlargest(3, [(val, i, j)
for j, val in enumerate(row)])
for i, row in enumerate(board)]
cols = [heapq.nlargest(3, [(val, i, j)
for i, val in enumerate(col)])
for j, col in enumerate(zip(*board))]
topNine = heapq.nlargest(9,
set(itertools.chain(*rows)) &
set(itertools.chain(*cols)))
return max(
(val1 + val2 + val3 for
(val1, i1, j1),
(val2, i2, j2),
(val3, i3, j3) in (itertools.combinations(topNine, 3))
if len({i1, i2, i3}) == 3 and len({j1, j2, j3}) == 3))
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py | {
"start": 38161,
"end": 42311
} | class ____(AwsBaseOperator[EmrHook]):
"""
Operator to terminate EMR JobFlows.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrTerminateJobFlowOperator`
:param job_flow_id: id of the JobFlow to terminate. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check JobFlow status
:param waiter_max_attempts: The maximum number of times to poll for JobFlow status.
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
aws_hook_class = EmrHook
template_fields: Sequence[str] = aws_template_fields(
"job_flow_id",
)
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
job_flow_id: str,
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.job_flow_id = job_flow_id
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> None:
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
log_uri=get_log_uri(emr_client=self.hook.conn, job_flow_id=self.job_flow_id),
)
self.log.info("Terminating JobFlow %s", self.job_flow_id)
response = self.hook.conn.terminate_job_flows(JobFlowIds=[self.job_flow_id])
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"JobFlow termination failed: {response}")
self.log.info("Terminating JobFlow with id %s", self.job_flow_id)
if self.deferrable:
self.defer(
trigger=EmrTerminateJobFlowTrigger(
job_flow_id=self.job_flow_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error terminating JobFlow: {validated_event}")
self.log.info("Jobflow terminated successfully.")
| EmrTerminateJobFlowOperator |
python | joke2k__faker | faker/providers/job/tr_TR/__init__.py | {
"start": 42,
"end": 16176
} | class ____(BaseProvider):
"""
Source: https://www.turkcebilgi.com/meslekler_listesi
"""
jobs = [
"Acentacı",
"Acil durum yönetmeni",
"Adli tabip",
"Agronomist",
"Ağ yöneticisi",
"Aşçı",
"Aşçıbaşı",
"Ahşap tekne yapımcısı",
"Aile hekimi",
"Aktar",
"Akortçu",
"Aktör",
"Aktüer",
"Aktris",
"Akustikçi",
"Albay",
"Ambalajcı",
"Ambarcı",
"Ambulans şoförü",
"Amiral",
"Anahtarcı",
"Anestezi uzmanı",
"Anestezi teknikeri",
"Animatör",
"Antika satıcısı",
"Antropolog",
"Apartman yöneticisi",
"Araba satıcısı",
"Araba yıkayıcısı",
"Arabacı",
"Arabulucu",
"Araştırmacı",
"Arıcı",
"Arkeolog",
"Armatör",
"Arpist",
"Arşivci",
"Artist",
"Asansörcü",
"Asistan",
"Asker",
"Astrofizikçi",
"Astrolog",
"Astronom",
"Astronot",
"Astsubay",
"Atlet",
"Av bekçisi",
"Avcı",
"Avizeci",
"Avukat",
"Ayakçı (otogar, lokanta)",
"Ayakkabı boyacısı",
"Ayakkabı tamircisi",
"Ayakkabıcı",
"Ayı oynatıcısı",
"Araba tamircisi",
"Bacacı",
"Badanacı",
"Baharatçı",
"Bahçe bitkileri uzmanı",
"Bahçıvan",
"Bakan",
"Bakıcı",
"Bakırcı",
"Bakkal",
"Bakteriyolog",
"Balıkçı",
"Balerin",
"Balon pilotu",
"Bankacı",
"Banker",
"Barmen",
"Barmeyd",
"Basketbolcu",
"Başbakan",
"Başçavuş",
"Başdümenci",
"Başhemşire",
"Başkan",
"Başkomiser",
"Başpiskopos",
"Başrahip",
"Belediye başkanı",
"Belediye meclisi üyesi",
"Benzinci",
"Berber",
"Besteci",
"Biletçi",
"Bilgisayar mühendisi",
"Bilgisayar programcısı",
"Bilgisayar tamircisi",
"Bilim insanı",
"Bilirkişi",
"Binicilik",
"Biracı",
"Bisikletçi",
"Biyografi yazarı",
"Biyolog",
"Biyomedikal Mühendisi",
"Bobinajcı",
"Bombacı",
"Bomba imhacı",
"Borsacı",
"Borucu",
"Botanikçi",
"Boyacı",
"Bozacı",
"Böcekbilimci",
"Börekçi",
"Bulaşıkçı",
"Buldozer operatörü",
"Bütçe uzmanı",
"Büyükelçi",
"Besicilik",
"Bilgi İşlemci",
"Camcı",
"Cerrah",
"Celep",
"Cellat",
"Cost Control",
"Cillopçu",
"Cumhurbaşkanı",
"Çamaşırcı",
"Çantacı",
"Çarkçı",
"Çatıcı",
"Çaycı",
"Çevirmen",
"Çevrebilimci",
"Çevre mühendisi",
"Çeyizci",
"Çıkıkçı",
"Çıkrıkçı",
"Çiçekçi",
"Çiftçi",
"Çiftlik işletici",
"Çikolatacı",
"Çilingir",
"Çinici",
"Çitçi",
"Çoban",
"Çocuk doktoru",
"Çorapçı",
"Çöp işçisi",
"Çöpçü",
"Çırak",
"Çevik Kuvvet",
"Dadı",
"Daktilograf",
"Dalgıç",
"Damıtıcı",
"Danışman",
"Dansöz",
"Davulcu",
"Debbağ",
"Dedektif",
"Değirmen işçisi",
"Değirmenci",
"Demirci",
"Demiryolu işçisi",
"Denetçi",
"Denetleyici",
"Denizci",
"Depocu",
"Derici",
"Desinatör",
"Devlet memuru",
"Dilci",
"Dilenci",
"Diplomat",
"Diş hekimi",
"Diyetisyen",
"Dizgici",
"Doğalgazcı",
"Doğramacı",
"Doğum uzmanı",
"Dok işçisi",
"Dokumacı",
"Doktor",
"Dondurmacı",
"Dökümcü",
"Döşemeci",
"Dövizci",
"Dublajcı",
"Duvarcı",
"Dümenci",
"Diş teknisyeni",
"Ebe",
"Eczacı",
"Eczacı kalfası",
"Editör",
"Eğitimci",
"Eğitmen",
"Ekonomist",
"Elektrik mühendisi",
"Elektronik mühendisi",
"Elektrik-Elektronik mühendisi",
"Elektronik ve Haberleşme mühendisi",
"Elektrikçi",
"Eleştirmen",
"Embriyolog",
"Emlakçı",
"Emniyet amiri",
"Emniyet genel müdürü",
"Endüstri mühendisi",
"Endüstri sistemleri mühendisi",
"Enstrüman imalatçısı",
"Ergonomist",
"Eskici",
"Esnaf",
"Estetisyen",
"Etolojist",
"Etimolog",
"Etnolog",
"Ev hanımı",
"Fabrika işçisi",
"Fahişe",
"Falcı",
"Fermantasyon işçisi",
"Fıçıcı",
"Fırıncı",
"Figüran",
"Film yapımcısı",
"Film yönetmeni",
"Filozof",
"Finansör",
"Fizikçi",
"Fizyonomist",
"Fizyoterapist",
"Acil tıp teknisyeni",
"Fon yöneticisi",
"Forklift operatörü",
"Fotoğrafçı",
"Futbolcu",
"Gardiyan",
"Galerici",
"Garson",
"Gazete dağıtıcısı",
"Gazete satıcısı",
"Gazeteci",
"Gelir uzmanı",
"Gelir uzman yardımcısı",
"Gemici",
"General",
"Genetik mühendisi",
"Geyşa",
"Gezgin",
"Gezici vaiz",
"Gıda mühendisi",
"Gitarist",
"Gondolcu",
"Gökbilimci",
"Göz doktoru",
"Gözetmen",
"Gözlükçü",
"Grafiker",
"Gramer uzmanı",
"Greyder operatörü",
"Guru",
"Güfteci",
"Gümrük memuru",
"Gümrük müşaviri",
"Gümrük müşavir yardımcısı",
"Gümrük uzmanı",
"Gündelikçi",
"Güzellik uzmanı",
"Haberci",
"Haddeci",
"Haham",
"Hakem",
"Halıcı",
"Halkbilimci",
"Hamal",
"Hamamcı",
"Hamurkâr",
"Hareket memuru",
"Haritacı",
"Harita mühendisi",
"Hastabakıcı",
"Hattat",
"Hava trafikçisi",
"Havacı",
"Haydut",
"Hayvan bakıcısı",
"Hayvan terbiyecisi",
"Hemşire",
"Hesap uzmanı",
"Heykeltıraş",
"Hırdavatçı",
"Hırsız",
"Hidrolikçi",
"Hizmetçi",
"Hokkabaz",
"Host",
"Hostes",
"Hukukçu",
"Hurdacı",
"İcra memuru",
"İç mimar",
"İğneci",
"İhracatçı",
"İktisatçı",
"İlahiyatçı",
"İllüzyonist",
"İmam",
"İnsan kaynakları uzmanı",
"İnşaat mühendisi",
"İnşaatçı",
"İpçi",
"İplikçi",
"İstatistikçi",
"İstihkâmcı",
"İşaretçi",
"İşçi",
"İşletmeci",
"İşletme mühendisi",
"İşportacı",
"İş ve Uğraşı Terapisti",
"İtfaiyeci",
"İthalatçı",
"Jeofizik mühendisi",
"Jeoloji mühendisi",
"Jeolog",
"Jeomorfolog",
"Jinekolog",
"Jimnastikçi",
"Jokey",
"Kabin görevlisi",
"Kabuk soyucusu",
"Kadın berberi",
"Kadın terzisi",
"Kâğıtçı",
"Kahveci",
"Kâhya",
"Kalaycı",
"Kalıpçı",
"Kaloriferci",
"Kamarot",
"Kameraman",
"Kamyoncu",
"Kapı satıcısı",
"Kapıcı",
"Kaplamacı",
"Kaportacı",
"Kaptan",
"Kardinal",
"Kardiyolog",
"Karikatürist",
"Karoserci",
"Karpuzcu",
"Kasap",
"Kasiyer",
"Kat görevlisi",
"Kâtip",
"Kayıkçı",
"Kaymakam",
"Kaynakçı",
"Kazıcı",
"Kebapçı",
"Kemancı",
"Kesimci",
"Keskin Nişancı",
"Kırtasiyeci",
"Kimyager",
"Kimya mühendisi",
"Kitapçı",
"Klarnetçi",
"Koleksiyoncu",
"Komedyen",
"Komisyoncu",
"Komiser",
"Konserveci",
"Konsolos",
"Konsomatris",
"Kontrolör",
"Konveyör operatörü",
"Kopyalayıcı",
"Koreograf",
"Korgeneral",
"Koramiral",
"Korsan",
"Koruma görevlisi",
"Komiser",
"Komiser yardımcısı",
"Kozmolog",
"Köfteci",
"Kömürcü",
"Köpek eğiticisi",
"Köşe yazarı",
"Kuaför",
"Kuşçu",
"Kumarbaz",
"Kumaşçı",
"Kumcu",
"Kuru temizlemeci",
"Kuruyemişçi",
"Kurye",
"Kuşbilimci",
"Kuyumcu",
"Kürkçü",
"Kütüphaneci",
"Krupiye",
"Laborant",
"Laboratuvar işçisi",
"Lahmacuncu",
"Lehimci",
"Levazımcı",
"Lobici",
"Lokantacı",
"Lokomotifçi",
"Lostromo",
"Lostracı",
"Lokman",
"Madenci",
"Makasçı",
"Makastar",
"Maketçi",
"Makinist",
"Makine mühendisi",
"Makine zabiti",
"Makyajcı",
"Mali hizmetler uzmanı",
"Manastır baş rahibesi",
"Manav",
"Manifaturacı",
"Manikürcü",
"Manken",
"Marangoz",
"Masör",
"Masöz",
"Matador",
"Matbaacı",
"Matematikçi",
"Matkapçı",
"Medya Planlama Uzmanı",
"Memur",
"Menajer",
"Mermerci",
"Metalurji mühendisi",
"Meteoroloji uzmanı",
"Metin yazarı",
"Mevsimlik işçi",
"Meydancı",
"Meyhaneci",
"Mezarcı",
"Midyeci",
"Mikrobiyolog",
"Milletvekili",
"Mimar",
"Misyoner",
"Mobilyacı",
"Modacı",
"Model",
"Modelci",
"Modelist",
"Montajcı",
"Montör",
"Motor tamircisi",
"Motorcu",
"Muhabbet tellalı",
"Muhabir",
"Muhafız",
"Muhasebeci",
"Muhtar",
"Mumyalayıcı",
"Muzcu",
"Mübaşir",
"Müdür",
"Müezzin",
"Müfettiş",
"Müşavir",
"Mühendis",
"Müneccim",
"Mürebbiye",
"Müsteşar",
"Müteahhit",
"Mütercim",
"Müze müdürü",
"Müzik yönetmeni",
"Müzisyen",
"Nalıncı",
"Nakışçı",
"Nakliyeci",
"Nalbant",
"Nalbur",
"Noter",
"Obuacı",
"Ocakçı",
"Odacı",
"Oduncu",
"Okçu",
"Okul müdürü",
"Okutman",
"Operatör",
"Opera sanatçısı",
"Orgcu",
"Orgeneral",
"Orman mühendisi",
"Ornitolog",
"Otelci",
"Oto elektrikçisi",
"Oto lastik tamircisi",
"Oto tamircisi",
"Oto yedek parçacı",
"Overlokçu",
"Oymacı",
"Oyuncu",
"Oyun hostesi",
"Oyun yazarı",
"Oyuncakçı",
"Öğretmen",
"Öğretim elemanı",
"Öğretim görevlisi",
"Öğretim üyesi",
"Örmeci",
"Ön muhasebeci",
"Ön muhasebe sorumlusu",
"Ön muhasebe yardımcı elemanı",
"Ön büro elemanı",
"Özel şoför",
"Paketleyici",
"Palyaço",
"Pandomimci",
"Pansiyoncu",
"Pansumancı",
"Papa",
"Papaz",
"Paralı asker",
"Park bekçisi",
"Pastörizör",
"Patolog",
"Peçeteci",
"Pencereci",
"Perukçu",
"Peyzaj mimarı",
"Peyzaj teknikeri",
"Pideci",
"Pilavcı",
"Pilot",
"Piskopos",
"Piyade",
"Piyango satıcısı",
"Piyanist",
"Polis memuru",
"Polis şefi",
"Polisajcı",
"Pompacı",
"Postacı",
"Profesör",
"Proktolog",
"Protokol görevlisi",
"Psikiyatr",
"Psikolog",
"Psikolojik danışmanlık ve rehberlik",
"Paramedik",
"Radyolog",
"Redaktör",
"Rehber",
"Rejisör",
"Reklamcı",
"Rektör",
"Rektör yardımcısı",
"Remayözcü",
"Ressam",
"Resepsiyon memuru",
"Rot balansçı",
"Radyoloji teknisyeni/teknikeri",
"Saat tamircisi",
"Saatçi",
"Sağlık teknisyeni",
"Sahil koruma",
"Saksofoncu",
"Salepçi",
"Sanat yönetmeni",
"Sanayici",
"Sansürcü",
"Santral memuru",
"Saraç",
"Sarraf",
"Satış elemanı",
"Savcı",
"Saz şairi",
"Sekreter",
"Senarist",
"Sepetçi",
"Serbest muhasebeci mali müşavir",
"Ses teknisyeni",
"Seyis",
"Sınırlı baş makinist",
"Sicil memuru",
"Sigortacı",
"Sihirbaz",
"Silahçı",
"Silindir operatörü",
"Simitçi",
"Simyacı",
"Sistem mühendisi",
"Sistem yöneticisi",
"Siyasetçi",
"Soğuk demirci",
"Sokak çalgıcısı",
"Sokak satıcısı",
"Son ütücü",
"Sorgu hâkimi",
"Sosyal hizmet uzmanı",
"Sosyolog",
"Spiker",
"Stenograf",
"Stilist",
"Striptizci",
"Su tesisatçısı",
"Subay",
"Sucu",
"Suflör",
"Sulh hâkimi",
"Sunucu",
"Susuz araç yıkama",
"Sünnetçi",
"Sürveyan",
"Sütanne",
"Sütçü",
"Şahinci",
"Şair",
"Şapel papazı",
"Şapkacı",
"Şarap üreticisi",
"Şarkıcı",
"Şarkı sözü yazarı",
"Şarküter",
"Şekerci",
"Şemsiyeci",
"Şifre çözümleyici",
"Şimşirci",
"Şoför",
"Tabakçı",
"Tabelacı",
"Tahsildar",
"Taksici",
"Tarım işçisi",
"Tarihçi",
"Tasarımcı",
"Taşçı",
"Taşlayıcı",
"Tatlıcı",
"Tavukçu",
"Tayfa",
"Tefeci",
"Teğmen",
"Tekniker",
"Teknisyen",
"Teknoloji uzmani",
"Telefon operatörü",
"Telekız",
"Televizyon tamircisi",
"Tellal",
"Temizlikçi",
"Temsilci",
"Terapist",
"Tercüman",
"Terzi",
"Tesgahtar",
"Tesisatçı",
"Tesviyeci",
"Test mühendisi",
"Test pilotu",
"Teşrifatçı",
"Tiyatro yönetmeni",
"Tombalacı",
"Topçu",
"Tornacı",
"Turizmci",
"Tuğgeneral",
"Tuhafiyeci",
"Turşucu",
"Tuzcu",
"Tümamiral",
"Tümgeneral",
"Uçuş teknisyeni",
"Ulaşım sorumlusu",
"Ustabaşı",
"Uydu antenci",
"Uzay mühendisi",
"Uzay bilimcisi",
"Uzman Jandarma",
"Uzman Çavuş",
"Üretici",
"Ürolog",
"Ütücü",
"Vaiz",
"Vali",
"Vergi denetmeni",
"Vergi müfettişi",
"Vergi tahakkuk memuru",
"Veritabanı yöneticisi",
"Veri hazırlama ve kontrol işletmeni",
"Vestiyerci",
"Veteriner hekim",
"Veteriner sağlık teknikeri",
"Veteriner sağlık teknisyeni",
"Veznedar",
"Video editörü",
"Vinç operatörü",
"Vitrinci",
"Viyolonselci",
"Yarbay",
"Yardımcı hakem",
"Yardımcı hizmetli",
"Yardımcı pilot",
"Yargıç",
"Yatırım uzmanı",
"Yayıncı",
"Yazar",
"Yazı işleri müdürü",
"Yazılım mühendisi",
"Yelkenci",
"Yeminli mali müşavir",
"Yeminli tercüman",
"Yer gösterici",
"Yer teknisyeni",
"Yerölçmeci",
"Yoğurtçu",
"Yol bekçisi",
"Yorgancı",
"Yorumcu",
"Yönetici",
"Yüzücü",
"Yönetmen",
]
| Provider |
python | python-openxml__python-docx | src/docx/oxml/styles.py | {
"start": 2263,
"end": 3256
} | class ____(BaseOxmlElement):
"""``<w:lsdException>`` element, defining override visibility behaviors for a named
latent style."""
locked = OptionalAttribute("w:locked", ST_OnOff)
name = RequiredAttribute("w:name", ST_String)
qFormat = OptionalAttribute("w:qFormat", ST_OnOff)
semiHidden = OptionalAttribute("w:semiHidden", ST_OnOff)
uiPriority = OptionalAttribute("w:uiPriority", ST_DecimalNumber)
unhideWhenUsed = OptionalAttribute("w:unhideWhenUsed", ST_OnOff)
def delete(self):
"""Remove this `w:lsdException` element from the XML document."""
self.getparent().remove(self)
def on_off_prop(self, attr_name):
"""Return the boolean value of the attribute having `attr_name`, or |None| if
not present."""
return getattr(self, attr_name)
def set_on_off_prop(self, attr_name, value):
"""Set the on/off attribute having `attr_name` to `value`."""
setattr(self, attr_name, value)
| CT_LsdException |
python | getsentry__sentry | src/sentry/migrations/0980_integrations_json_field.py | {
"start": 244,
"end": 2356
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0979_add_apiapplication_version"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
mod.to_jsonb("sentry_docintegration", "metadata"),
mod.to_jsonb("sentry_integration", "metadata"),
mod.to_jsonb("sentry_organizationintegration", "config"),
],
state_operations=[
migrations.AlterField(
model_name="docintegration",
name="metadata",
field=models.JSONField(default=dict, null=True),
),
migrations.AlterField(
model_name="integration",
name="metadata",
field=models.JSONField(default=dict),
),
migrations.AlterField(
model_name="organizationintegration",
name="config",
field=models.JSONField(default=dict),
),
],
)
]
| Migration |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 875965,
"end": 876176
} | class ____(VegaLiteSchema):
"""Position2Def schema wrapper."""
_schema = {"$ref": "#/definitions/Position2Def"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| Position2Def |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/lambda_function.py | {
"start": 1441,
"end": 8292
} | class ____(Block):
"""Invoke a Lambda function. This block is part of the prefect-aws
collection. Install prefect-aws with `pip install prefect-aws` to use this
block.
Attributes:
function_name: The name, ARN, or partial ARN of the Lambda function to
run. This must be the name of a function that is already deployed
to AWS Lambda.
qualifier: The version or alias of the Lambda function to use when
invoked. If not specified, the latest (unqualified) version of the
Lambda function will be used.
aws_credentials: The AWS credentials to use to connect to AWS Lambda
with a default factory of AwsCredentials.
"""
_block_type_name = "Lambda Function"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
function_name: str = Field(
title="Function Name",
description=(
"The name, ARN, or partial ARN of the Lambda function to run. This"
" must be the name of a function that is already deployed to AWS"
" Lambda."
),
)
qualifier: Optional[str] = Field(
default=None,
title="Qualifier",
description=(
"The version or alias of the Lambda function to use when invoked. "
"If not specified, the latest (unqualified) version of the Lambda "
"function will be used."
),
)
aws_credentials: AwsCredentials = Field(
title="AWS Credentials",
default_factory=AwsCredentials,
description="The AWS credentials to invoke the Lambda with.",
)
def _get_lambda_client(self):
"""
Retrieve a boto3 session and Lambda client
"""
boto_session = self.aws_credentials.get_boto3_session()
lambda_client = boto_session.client("lambda")
return lambda_client
async def ainvoke(
self,
payload: Optional[dict] = None,
invocation_type: Literal[
"RequestResponse", "Event", "DryRun"
] = "RequestResponse",
tail: bool = False,
client_context: Optional[dict] = None,
) -> dict:
"""
Asynchronously invoke the Lambda function with the given payload.
Args:
payload: The payload to send to the Lambda function.
invocation_type: The invocation type of the Lambda function. This
can be one of "RequestResponse", "Event", or "DryRun". Uses
"RequestResponse" by default.
tail: If True, the response will include the base64-encoded last 4
KB of log data produced by the Lambda function.
client_context: The client context to send to the Lambda function.
Limited to 3583 bytes.
Returns:
The response from the Lambda function.
Examples:
```python
from prefect import flow
from prefect_aws.lambda_function import LambdaFunction
from prefect_aws.credentials import AwsCredentials
@flow
async def example_flow():
credentials = AwsCredentials()
lambda_function = LambdaFunction(
function_name="test_lambda_function",
aws_credentials=credentials,
)
response = await lambda_function.ainvoke(
payload={"foo": "bar"},
invocation_type="RequestResponse",
)
return response["Payload"].read()
```
"""
# Add invocation arguments
kwargs: dict[str, Any] = dict(FunctionName=self.function_name)
if payload:
kwargs["Payload"] = to_json(payload)
# Let boto handle invalid invocation types
kwargs["InvocationType"] = invocation_type
if self.qualifier is not None:
kwargs["Qualifier"] = self.qualifier
if tail:
kwargs["LogType"] = "Tail"
if client_context is not None:
# For some reason this is string, but payload is bytes
kwargs["ClientContext"] = json.dumps(client_context)
# Get client and invoke
lambda_client = await run_sync_in_worker_thread(self._get_lambda_client)
return await run_sync_in_worker_thread(lambda_client.invoke, **kwargs)
@async_dispatch(ainvoke)
def invoke(
self,
payload: Optional[dict] = None,
invocation_type: Literal[
"RequestResponse", "Event", "DryRun"
] = "RequestResponse",
tail: bool = False,
client_context: Optional[dict] = None,
) -> dict:
"""
Invoke the Lambda function with the given payload.
Args:
payload: The payload to send to the Lambda function.
invocation_type: The invocation type of the Lambda function. This
can be one of "RequestResponse", "Event", or "DryRun". Uses
"RequestResponse" by default.
tail: If True, the response will include the base64-encoded last 4
KB of log data produced by the Lambda function.
client_context: The client context to send to the Lambda function.
Limited to 3583 bytes.
Returns:
The response from the Lambda function.
Examples:
```python
from prefect_aws.lambda_function import LambdaFunction
from prefect_aws.credentials import AwsCredentials
credentials = AwsCredentials()
lambda_function = LambdaFunction(
function_name="test_lambda_function",
aws_credentials=credentials,
)
response = lambda_function.invoke(
payload={"foo": "bar"},
invocation_type="RequestResponse",
)
response["Payload"].read()
```
"""
# Add invocation arguments
kwargs: dict[str, Any] = dict(FunctionName=self.function_name)
if payload:
kwargs["Payload"] = to_json(payload)
# Let boto handle invalid invocation types
kwargs["InvocationType"] = invocation_type
if self.qualifier is not None:
kwargs["Qualifier"] = self.qualifier
if tail:
kwargs["LogType"] = "Tail"
if client_context is not None:
# For some reason this is string, but payload is bytes
kwargs["ClientContext"] = json.dumps(client_context)
# Get client and invoke
lambda_client = self._get_lambda_client()
return lambda_client.invoke(**kwargs)
| LambdaFunction |
python | huggingface__transformers | src/transformers/processing_utils.py | {
"start": 2755,
"end": 4359
} | class ____(dict):
"""
Lazy dictionary to avoid circular imports.
The mapping names are only imported when accessed.
"""
_MAPPING_NAMES = {
"image_processor": ("transformers.models.auto.image_processing_auto", "AutoImageProcessor"),
"video_processor": ("transformers.models.auto.video_processing_auto", "AutoVideoProcessor"),
"feature_extractor": ("transformers.models.auto.feature_extraction_auto", "AutoFeatureExtractor"),
"audio_processor": ("transformers.models.auto.feature_extraction_auto", "AutoFeatureExtractor"),
"tokenizer": ("transformers.models.auto.tokenization_auto", "AutoTokenizer"),
}
def __getitem__(self, key):
if key not in self._MAPPING_NAMES:
raise KeyError(key)
module_name, attr_name = self._MAPPING_NAMES[key]
module = __import__(module_name, fromlist=[attr_name])
return getattr(module, attr_name)
def __contains__(self, key):
return key in self._MAPPING_NAMES
def keys(self):
return self._MAPPING_NAMES.keys()
MODALITY_TO_AUTOPROCESSOR_MAPPING = _LazyAutoProcessorMapping()
MODALITY_TO_BASE_CLASS_MAPPING = {
"audio_tokenizer": "DacModel",
"audio_processor": "FeatureExtractionMixin",
"tokenizer": ("PreTrainedTokenizerBase", "MistralCommonBackend"),
"feature_extractor": "FeatureExtractionMixin",
"image_processor": "ImageProcessingMixin",
"video_processor": "BaseVideoProcessor",
}
if sys.version_info >= (3, 11):
Unpack = typing.Unpack
else:
Unpack = typing_extensions.Unpack
| _LazyAutoProcessorMapping |
python | ansible__ansible | lib/ansible/parsing/vault/__init__.py | {
"start": 14003,
"end": 15600
} | class ____(VaultSecret):
def __init__(self, filename=None, encoding=None, loader=None):
super(FileVaultSecret, self).__init__()
self.filename = filename
self.loader = loader
self.encoding = encoding or 'utf8'
# We could load from file here, but that is eventually a pain to test
self._bytes = None
self._text = None
@property
def bytes(self):
if self._bytes:
return self._bytes
if self._text:
return self._text.encode(self.encoding)
return None
def load(self):
self._bytes = self._read_file(self.filename)
def _read_file(self, filename):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
# TODO: replace with use of self.loader
try:
with open(filename, "rb") as f:
vault_pass = f.read().strip()
except OSError as ex:
raise AnsibleError(f"Could not read vault password file {filename!r}.") from ex
b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass)
vault_pass = b_vault_data.strip(b'\r\n')
verify_secret_is_not_empty(vault_pass,
msg='Invalid vault password was provided from file (%s)' % filename)
return vault_pass
def __repr__(self):
if self.filename:
return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
return "%s()" % (self.__class__.__name__)
| FileVaultSecret |
python | streamlit__streamlit | lib/tests/streamlit/components_test.py | {
"start": 27021,
"end": 28065
} | class ____(unittest.TestCase):
"""Test alternative component registry initialization."""
class AlternativeComponentRegistry(BaseComponentRegistry):
def __init__(self):
"""Dummy implementation"""
pass
def register_component(self, component: BaseCustomComponent) -> None:
return None
def get_component_path(self, name: str) -> str | None:
return None
def get_module_name(self, name: str) -> str | None:
return None
def get_component(self, name: str) -> BaseCustomComponent | None:
return None
def get_components(self) -> list[BaseCustomComponent]:
return []
def setUp(self) -> None:
super().setUp()
registry = AlternativeComponentRegistryTest.AlternativeComponentRegistry()
assert ComponentRegistry.instance() == registry
assert isinstance(
registry, AlternativeComponentRegistryTest.AlternativeComponentRegistry
)
| AlternativeComponentRegistryTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 943410,
"end": 943830
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("RepositoryRulesetBypassActor", graphql_name="node")
"""The item at the end of the edge."""
| RepositoryRulesetBypassActorEdge |
python | openai__gym | gym/envs/mujoco/walker2d_v3.py | {
"start": 249,
"end": 4942
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 125,
}
def __init__(
self,
xml_file="walker2d.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=1e-3,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.8, 2.0),
healthy_angle_range=(-1.0, 1.0),
reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True,
**kwargs
):
utils.EzPickle.__init__(
self,
xml_file,
forward_reward_weight,
ctrl_cost_weight,
healthy_reward,
terminate_when_unhealthy,
healthy_z_range,
healthy_angle_range,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64
)
MuJocoPyEnv.__init__(
self, xml_file, 4, observation_space=observation_space, **kwargs
)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.sim.data.qpos[1:3]
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = healthy_z and healthy_angle
return is_healthy
@property
def terminated(self):
terminated = not self.is_healthy if self._terminate_when_unhealthy else False
return terminated
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost
observation = self._get_obs()
reward = rewards - costs
terminated = self.terminated
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
}
if self.render_mode == "human":
self.render()
return observation, reward, terminated, False, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
assert self.viewer is not None
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| Walker2dEnv |
python | Pylons__pyramid | src/pyramid/testing.py | {
"start": 7938,
"end": 8820
} | class ____(dict):
created = None
new = True
def changed(self):
pass
def invalidate(self):
self.clear()
def flash(self, msg, queue='', allow_duplicate=True):
storage = self.setdefault('_f_' + queue, [])
if allow_duplicate or (msg not in storage):
storage.append(msg)
def pop_flash(self, queue=''):
storage = self.pop('_f_' + queue, [])
return storage
def peek_flash(self, queue=''):
storage = self.get('_f_' + queue, [])
return storage
def new_csrf_token(self):
token = '0123456789012345678901234567890123456789'
self['_csrft_'] = token
return token
def get_csrf_token(self):
token = self.get('_csrft_', None)
if token is None:
token = self.new_csrf_token()
return token
@implementer(IRequest)
| DummySession |
python | getsentry__sentry | tests/sentry/incidents/models/test_alert_rule.py | {
"start": 11201,
"end": 11326
} | class ____(AlertRuleTriggerActionActivateBaseTest, unittest.TestCase):
method = "resolve"
| AlertRuleTriggerActionResolveTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 535516,
"end": 535802
} | class ____(BatchRequest):
"""
Updates a batch of tasks.
Headers
Content type should be 'application/json-lines'.
"""
_service = "tasks"
_action = "update_batch"
_version = "2.23"
_batched_request_cls = UpdateRequest
| UpdateBatchRequest |
python | openai__openai-python | src/openai/types/realtime/realtime_tools_config_union.py | {
"start": 2033,
"end": 2481
} | class ____(BaseModel):
always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None
"""A filter object to specify which tools are allowed."""
never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None
"""A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
| McpRequireApprovalMcpToolApprovalFilter |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 1834,
"end": 2999
} | class ____:
ode_class = None # Set in subclass.
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
def f(t, z):
return problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
def jac(t, z):
return problem.jac(z, t)
integrator_params = {}
if problem.lband is not None or problem.uband is not None:
integrator_params['uband'] = problem.uband
integrator_params['lband'] = problem.lband
ig = self.ode_class(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method,
**integrator_params)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_array_equal(z, ig.y)
assert_(ig.successful(), (problem, method))
assert_(ig.get_return_code() > 0, (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
| TestODEClass |
python | walkccc__LeetCode | solutions/2417. Closest Fair Integer/2417.py | {
"start": 0,
"end": 758
} | class ____:
def closestFair(self, n: int) -> int:
digitsCount = len(str(n))
return (self._getEvenDigits(n) if digitsCount % 2 == 0 else
self._getOddDigits(digitsCount))
def _getOddDigits(self, digitsCount: int) -> int:
zeros = (digitsCount + 1) // 2
ones = (digitsCount - 1) // 2
return int('1' + '0' * zeros + '1' * ones)
def _getEvenDigits(self, n: int) -> int:
digitsCount = len(str(n))
maxNum = int('1' + '0' * digitsCount)
for num in range(n, maxNum):
if self._isValidNum(num):
return num
return self._getOddDigits(digitsCount + 1)
def _isValidNum(self, num: int) -> bool:
count = 0
for c in str(num):
count += 1 if int(c) % 2 == 0 else -1
return count == 0
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 55058,
"end": 55564
} | class ____(BaseModel):
type: Literal["RecordSelector"]
extractor: Union[CustomRecordExtractor, DpathExtractor]
record_filter: Optional[Union[CustomRecordFilter, RecordFilter]] = Field(
None,
description="Responsible for filtering records to be emitted by the Source.",
title="Record Filter",
)
schema_normalization: Optional[SchemaNormalization] = SchemaNormalization.None_
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| RecordSelector |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 55245,
"end": 57737
} | class ____(testing.TestCase):
def test_view_as_complex_basic(self):
real_imag = np.array([[1.0, 2.0], [3.0, 4.0]])
expected = np.array([1.0 + 2.0j, 3.0 + 4.0j], dtype=np.complex64)
result = kmath.view_as_complex(real_imag)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(standardize_dtype(result.dtype), expected.dtype)
self.assertAllClose(result, expected)
def test_view_as_real_basic(self):
complex_tensor = np.array([1 + 2j, 3 + 4j], dtype=np.complex64)
expected = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
result = kmath.view_as_real(complex_tensor)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(standardize_dtype(result.dtype), expected.dtype)
self.assertAllClose(result, expected)
def test_view_as_complex_invalid_shape(self):
bad_input = np.array([1.0, 2.0, 3.0]) # Last dimension not size 2
with self.assertRaisesRegex(
ValueError, "Last dimension of input must be size 2"
):
kmath.view_as_complex(bad_input)
def test_view_as_complex_symbolic_input(self):
x = KerasTensor(shape=(None, 2), dtype="float32")
result = kmath.view_as_complex(x)
self.assertEqual(result.shape, (None,))
self.assertEqual(standardize_dtype(result.dtype), "complex64")
def test_view_as_real_symbolic_input(self):
x = KerasTensor(shape=(None,), dtype="complex64")
result = kmath.view_as_real(x)
self.assertEqual(result.shape, (None, 2))
self.assertEqual(standardize_dtype(result.dtype), "float32")
def test_view_as_complex_multi_dimensional(self):
x = np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32)
expected = np.array([[1 + 2j, 3 + 4j]], dtype=np.complex64)
result = kmath.view_as_complex(x)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(standardize_dtype(result.dtype), expected.dtype)
self.assertAllClose(result, expected)
def test_view_as_real_multi_dimensional(self):
x = np.array([[1 + 2j, 3 + 4j]], dtype=np.complex64)
expected = np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32)
result = kmath.view_as_real(x)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(standardize_dtype(result.dtype), expected.dtype)
self.assertAllClose(result, expected)
| ViewAsComplexRealTest |
python | ray-project__ray | rllib/callbacks/tests/test_multicallback.py | {
"start": 129,
"end": 4870
} | class ____(unittest.TestCase):
"""A tests suite to test the `MultiCallback`."""
@classmethod
def setUp(cls) -> None:
ray.init()
@classmethod
def tearDown(cls) -> None:
ray.shutdown()
def test_multicallback_with_custom_callback_function(self):
"""Tests if callbacks in `MultiCallback` get executed.
This also tests, if multiple callbacks from different sources, i.e.
`callback_class` and `on_episode_step` run correctly.
"""
# Define two standard `RLlibCallback`.
class TestRLlibCallback1(RLlibCallback):
def on_episode_step(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index,
rl_module=None,
worker=None,
base_env=None,
policies=None,
**kwargs
):
metrics_logger.log_value(
"callback_1", 1, reduce="mean", clear_on_reduce=True
)
class TestRLlibCallback2(RLlibCallback):
def on_episode_step(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index,
rl_module=None,
worker=None,
base_env=None,
policies=None,
**kwargs
):
metrics_logger.log_value(
"callback_2", 2, reduce="mean", clear_on_reduce=True
)
# Define a custom callback function.
def custom_on_episode_step_callback(
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index=None,
rl_module=None,
worker=None,
base_env=None,
policies=None,
**kwargs
):
metrics_logger.log_value(
"custom_callback", 3, reduce="mean", clear_on_reduce=True
)
# Configure the algorithm.
config = (
PPOConfig()
.environment("CartPole-v1")
.api_stack(
enable_env_runner_and_connector_v2=True,
enable_rl_module_and_learner=True,
)
# Use the callbacks and callback function.
.callbacks(
callbacks_class=[TestRLlibCallback1, TestRLlibCallback2],
on_episode_step=custom_on_episode_step_callback,
)
)
# Build the algorithm. At this stage, callbacks get already validated.
algo = config.build()
# Run 10 training iteration and check, if the metrics defined in the
# callbacks made it into the results. Furthermore, check, if the values are correct.
for _ in range(10):
results = algo.train()
self.assertIn("callback_1", results["env_runners"])
self.assertIn("callback_2", results["env_runners"])
self.assertIn("custom_callback", results["env_runners"])
self.assertAlmostEqual(results["env_runners"]["callback_1"], 1)
self.assertAlmostEqual(results["env_runners"]["callback_2"], 2)
self.assertAlmostEqual(results["env_runners"]["custom_callback"], 3)
algo.stop()
def test_multicallback_validation_error(self):
"""Check, if the validation safeguard catches wrong `MultiCallback`s."""
with self.assertRaises(ValueError):
(
PPOConfig()
.environment("CartPole-v1")
.api_stack(
enable_env_runner_and_connector_v2=True,
enable_rl_module_and_learner=True,
)
# This is wrong b/c it needs callables.
.callbacks(callbacks_class=["TestRLlibCallback1", "TestRLlibCallback2"])
)
def test_single_callback_validation_error(self):
"""Tests if the validation safeguard catches wrong `RLlibCallback`s."""
with self.assertRaises(ValueError):
(
PPOConfig()
.environment("CartPole-v1")
.api_stack(
enable_env_runner_and_connector_v2=True,
enable_rl_module_and_learner=True,
)
# This is wrong b/c it needs callables.
.callbacks(callbacks_class="TestRLlibCallback")
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMultiCallback |
python | keras-team__keras | keras/src/utils/dtype_utils_test.py | {
"start": 1936,
"end": 5986
} | class ____(test_case.TestCase):
def test_cast_to_common_dtype_float32_float64(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="float64")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float64")
def test_cast_to_common_dtype_float16_float32_float64(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="float32")
tensor3 = KerasTensor([7, 8, 9], dtype="float64")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float64")
def test_cast_to_common_dtype_float16_int16_float32(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="int16")
tensor3 = KerasTensor([7, 8, 9], dtype="float32")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_all_float32(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="float32")
tensor3 = KerasTensor([7, 8, 9], dtype="float32")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_float16_bfloat16(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="bfloat16")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float16")
def test_cast_to_common_dtype_float16_uint8(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float16")
tensor2 = KerasTensor([4, 5, 6], dtype="uint8")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float16")
def test_cast_to_common_dtype_mixed_types(self):
tensor1 = KerasTensor([1, 2, 3], dtype="float32")
tensor2 = KerasTensor([4, 5, 6], dtype="int32")
tensor3 = KerasTensor([7, 8, 9], dtype="bool")
casted_tensors = dtype_utils.cast_to_common_dtype(
[tensor1, tensor2, tensor3]
)
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
def test_cast_to_common_dtype_no_float(self):
tensor1 = KerasTensor([1, 2, 3], dtype="int32")
tensor2 = KerasTensor([4, 5, 6], dtype="uint8")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
self.assertEqual(casted_tensors[0].dtype, "int32")
self.assertEqual(casted_tensors[1].dtype, "uint8")
def test_cast_to_common_dtype_float16_bfloat16_promotion(self):
tensor1 = KerasTensor([4, 5, 6], dtype="bfloat16")
tensor2 = KerasTensor([1, 2, 3], dtype="float16")
casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
for tensor in casted_tensors:
self.assertEqual(tensor.dtype, "float32")
# TODO failed AssertionError: 'float16' != 'float32'
# The order of the tensors matters in the current logic
# of the cast_to_common_dtype function
# def test_cast_to_common_dtype_bfloat16_float16_promotion(self):
# tensor1 = KerasTensor([1, 2, 3], dtype="float16")
# tensor2 = KerasTensor([4, 5, 6], dtype="bfloat16")
# casted_tensors = dtype_utils.cast_to_common_dtype([tensor1, tensor2])
# for tensor in casted_tensors:
# self.assertEqual(tensor.dtype, "float32")
| CastToCommonDtype |
python | tox-dev__tox | src/tox/session/env_select.py | {
"start": 5435,
"end": 21691
} | class ____:
def __init__(self, state: State) -> None:
# needs core to load the default tox environment list
# to load the package environments of a run environments we need the run environment builder
# to load labels we need core + the run environment
self.on_empty_fallback_py = True
self._warned_about: set[str] = set() #: shared set of skipped environments that were already warned about
self._state = state
self._defined_envs_: dict[str, _ToxEnvInfo] | None = None
self._pkg_env_counter: Counter[str] = Counter()
from tox.plugin.manager import MANAGER # noqa: PLC0415
self._manager = MANAGER
self._log_handler = self._state._options.log_handler # noqa: SLF001
self._journal = self._state._journal # noqa: SLF001
self._provision: tuple[bool, str] | None = None
self._state.conf.core.add_config("labels", dict[str, EnvList], {}, "core labels")
tox_env_filter_regex = getattr(state.conf.options, "skip_env", "").strip()
self._filter_re = re.compile(tox_env_filter_regex) if tox_env_filter_regex else None
@property
def _cli_envs(self) -> CliEnv | None:
return getattr(self._state.conf.options, "env", None)
def _collect_names(self) -> Iterator[tuple[Iterable[str], bool]]:
""":return: sources of tox environments defined with name and if is marked as target to run"""
if self._provision is not None: # pragma: no branch
yield (self._provision[1],), False
env_list, everything_active = self._state.conf.core["env_list"], False
if self._cli_envs is None or self._cli_envs.is_default_list:
yield env_list, True
elif self._cli_envs.is_all:
everything_active = True
else:
self._ensure_envs_valid()
yield self._cli_envs, True
yield self._state.conf, everything_active
label_envs = dict.fromkeys(chain.from_iterable(self._state.conf.core["labels"].values()))
if label_envs:
yield label_envs.keys(), False
def _ensure_envs_valid(self) -> None:
valid_factors = set(chain.from_iterable(env.split("-") for env in self._state.conf))
valid_factors.add(".pkg") # packaging factor
invalid_envs: dict[str, str | None] = {}
for env in self._cli_envs or []:
if env.startswith(".pkg_external"): # external package
continue
factors: dict[str, str | None] = dict.fromkeys(env.split("-"))
found_factors: set[str] = set()
for factor in factors:
if (
_DYNAMIC_ENV_FACTORS.fullmatch(factor)
or _PY_PRE_RELEASE_FACTOR.fullmatch(factor)
or factor in valid_factors
):
found_factors.add(factor)
else:
closest = get_close_matches(factor, valid_factors, n=1)
factors[factor] = closest[0] if closest else None
if set(factors) - found_factors:
invalid_envs[env] = (
None
if any(i is None for i in factors.values())
else "-".join(cast("Iterable[str]", factors.values()))
)
if invalid_envs:
msg = "provided environments not found in configuration file:\n"
first = True
for env, suggestion in invalid_envs.items():
if not first:
msg += "\n"
first = False
msg += env
if suggestion:
msg += f" - did you mean {suggestion}?"
raise HandledError(msg)
def _env_name_to_active(self) -> dict[str, bool]:
env_name_to_active_map = {}
for a_collection, is_active in self._collect_names():
for name in a_collection:
if name not in env_name_to_active_map:
env_name_to_active_map[name] = is_active
# for factor/label selection update the active flag
if (
not (getattr(self._state.conf.options, "labels", []) or getattr(self._state.conf.options, "factors", []))
# if no active environment is defined fallback to py
and self.on_empty_fallback_py
and not any(env_name_to_active_map.values())
):
env_name_to_active_map["py"] = True
return env_name_to_active_map
@property
def _defined_envs(self) -> dict[str, _ToxEnvInfo]: # noqa: C901, PLR0912
# The problem of classifying run/package environments:
# There can be two type of tox environments: run or package. Given a tox environment name there's no easy way to
# find out which it is. Intuitively, a run environment is any environment not used for packaging by another run
# environment. To find out what are the packaging environments for a run environment, you have to first
# construct it. This implies a two-phase solution: construct all environments and query their packaging
# environments. The run environments are the ones not marked as of packaging type. This requires being able to
# change tox environments types, if it was earlier discovered as a run environment and is marked as packaging,
# we need to redefine it. E.g., when it shows up in config as [testenv:.package] and afterward by a run env is
# marked as package_env.
if self._defined_envs_ is None: # noqa: PLR1702
self._defined_envs_ = {}
failed: dict[str, Exception] = {}
env_name_to_active = self._env_name_to_active()
for name, is_active in env_name_to_active.items():
if name in self._pkg_env_counter: # already marked as packaging, nothing to do here
continue
with self._log_handler.with_context(name):
run_env = self._build_run_env(name)
if run_env is None:
continue
self._defined_envs_[name] = _ToxEnvInfo(run_env, is_active)
pkg_name_type = run_env.get_package_env_types()
if pkg_name_type is not None:
# build package env and assign it, then register the run environment which can trigger generation
# of additional run environments
start_package_env_use_counter = self._pkg_env_counter.copy()
try:
run_env.package_env = self._build_pkg_env(pkg_name_type, name, env_name_to_active)
except Exception as exception: # noqa: BLE001
# if it's not a run environment, wait to see if ends up being a packaging one -> rollback
failed[name] = exception
for key in self._pkg_env_counter - start_package_env_use_counter:
del self._defined_envs_[key]
self._state.conf.clear_env(key)
self._pkg_env_counter = start_package_env_use_counter
del self._defined_envs_[name]
self._state.conf.clear_env(name)
else:
try:
for env in run_env.package_envs:
# check if any packaging envs are already run and remove them
other_env_info = self._defined_envs_.get(env.name)
if other_env_info is not None and isinstance(other_env_info.env, RunToxEnv):
del self._defined_envs_[env.name] # pragma: no cover
for _pkg_env in other_env_info.env.package_envs: # pragma: no cover
self._pkg_env_counter[_pkg_env.name] -= 1 # pragma: no cover
except Exception: # noqa: BLE001
assert self._defined_envs_[name].package_skip is not None # noqa: S101
failed_to_create = failed.keys() - self._defined_envs_.keys()
if failed_to_create:
raise failed[next(iter(failed_to_create))]
for name, count in self._pkg_env_counter.items():
if not count:
self._defined_envs_.pop(name) # pragma: no cover
# reorder to as defined rather as found
order = chain(env_name_to_active, (i for i in self._defined_envs_ if i not in env_name_to_active))
self._defined_envs_ = {name: self._defined_envs_[name] for name in order if name in self._defined_envs_}
self._finalize_config()
self._mark_active()
return self._defined_envs_
def _finalize_config(self) -> None:
assert self._defined_envs_ is not None # noqa: S101
for tox_env in self._defined_envs_.values():
tox_env.env.conf.mark_finalized()
self._state.conf.core.mark_finalized()
def _build_run_env(self, name: str) -> RunToxEnv | None:
if self._provision is not None and self._provision[0] is False and name == self._provision[1]:
# ignore provision env unless this is a provision run
return None
if self._provision is not None and self._provision[0] and name != self._provision[1]:
# ignore other envs when this is a provision run
return None
env_conf = self._state.conf.get_env(name, package=False)
desc = "the tox execute used to evaluate this environment"
env_conf.add_config(keys="runner", desc=desc, of_type=str, default=self._state.conf.options.default_runner)
runner = REGISTER.runner(cast("str", env_conf["runner"]))
journal = self._journal.get_env_journal(name)
args = ToxEnvCreateArgs(env_conf, self._state.conf.core, self._state.conf.options, journal, self._log_handler)
run_env = runner(args)
run_env.register_config()
self._manager.tox_add_env_config(env_conf, self._state)
return run_env
def _build_pkg_env(self, name_type: tuple[str, str], run_env_name: str, active: dict[str, bool]) -> PackageToxEnv:
name, core_type = name_type
with self._log_handler.with_context(name):
if run_env_name == name:
msg = f"{run_env_name} cannot self-package"
raise HandledError(msg)
missing_active = self._cli_envs is not None and self._cli_envs.is_all
try:
package_tox_env = self._get_package_env(core_type, name, active.get(name, missing_active))
self._pkg_env_counter[name] += 1
run_env: RunToxEnv = self._defined_envs_[run_env_name].env # type: ignore[index,assignment]
child_package_envs = package_tox_env.register_run_env(run_env)
try:
name_type = next(child_package_envs)
while True:
child_pkg_env = self._build_pkg_env(name_type, run_env_name, active)
self._pkg_env_counter[name_type[0]] += 1
name_type = child_package_envs.send(child_pkg_env)
except StopIteration:
pass
except Skip as exception:
assert self._defined_envs_ is not None # noqa: S101
self._defined_envs_[run_env_name].package_skip = (name_type[0], exception)
return package_tox_env
def _get_package_env(self, packager: str, name: str, is_active: bool) -> PackageToxEnv: # noqa: FBT001
assert self._defined_envs_ is not None # noqa: S101
if name in self._defined_envs_:
env = self._defined_envs_[name].env
if isinstance(env, PackageToxEnv):
if env.id() != packager: # pragma: no branch # same env name is used by different packaging
msg = f"{name} is already defined as a {env.id()}, cannot be {packager} too" # pragma: no cover
raise HandledError(msg) # pragma: no cover
return env
self._state.conf.clear_env(name)
package_type = REGISTER.package(packager)
pkg_conf = self._state.conf.get_env(name, package=True)
journal = self._journal.get_env_journal(name)
args = ToxEnvCreateArgs(pkg_conf, self._state.conf.core, self._state.conf.options, journal, self._log_handler)
pkg_env: PackageToxEnv = package_type(args)
pkg_env.register_config()
self._defined_envs_[name] = _ToxEnvInfo(pkg_env, is_active)
self._manager.tox_add_env_config(pkg_conf, self._state)
return pkg_env
def _parse_factors(self) -> tuple[set[str], ...]:
# factors is a list of lists, from the combination of nargs="+" and action="append"
# also parse hyphenated factors into lists of factors
# so that `-f foo-bar` and `-f foo bar` are treated equivalently
raw_factors = getattr(self._state.conf.options, "factors", [])
return tuple({f for factor in factor_list for f in factor.split("-")} for factor_list in raw_factors)
def _mark_active(self) -> None: # noqa: C901
labels = set(getattr(self._state.conf.options, "labels", []))
factors = self._parse_factors()
assert self._defined_envs_ is not None # noqa: S101
if labels or factors:
for env_info in self._defined_envs_.values():
env_info.is_active = False # if any was selected reset
# ignore labels when provisioning will occur
if labels and (self._provision is None or not self._provision[0]):
for label in labels:
for env_name in self._state.conf.core["labels"].get(label, []):
self._defined_envs_[env_name].is_active = True
for env_info in self._defined_envs_.values():
if labels.intersection(env_info.env.conf["labels"]):
env_info.is_active = True
if factors: # if matches mark it active
for name, env_info in self._defined_envs_.items():
for factor_set in factors:
if factor_set.issubset(set(name.split("-"))):
env_info.is_active = True
break
def __getitem__(self, item: str) -> RunToxEnv | PackageToxEnv:
"""
:param item: the name of the environment
:return: the tox environment
"""
return self._defined_envs[item].env
def iter(
self,
*,
only_active: bool = True,
package: bool = False,
) -> Iterator[str]:
"""
Get tox environments.
:param only_active: active environments are marked to be executed in the current target
:param package: return package environments
:return: an iteration of tox environments
"""
for name, env_info in self._defined_envs.items():
if only_active and not env_info.is_active:
continue
if not package and not isinstance(env_info.env, RunToxEnv):
continue
if self._filter_re is not None and self._filter_re.match(name):
if name not in self._warned_about:
self._warned_about.add(name)
LOGGER.warning("skip environment %s, matches filter %r", name, self._filter_re.pattern)
continue
yield name
def ensure_only_run_env_is_active(self) -> None:
envs, active = self._defined_envs, self._env_name_to_active()
invalid = [n for n, a in active.items() if a and isinstance(envs[n].env, PackageToxEnv)]
if invalid:
msg = f"cannot run packaging environment(s) {','.join(invalid)}"
raise HandledError(msg)
def _mark_provision(self, on: bool, provision_tox_env: str) -> None: # noqa: FBT001
self._provision = on, provision_tox_env
__all__ = [
"CliEnv",
"EnvSelector",
"register_env_select_flags",
]
| EnvSelector |
python | doocs__leetcode | solution/1700-1799/1763.Longest Nice Substring/Solution2.py | {
"start": 0,
"end": 483
} | class ____:
def longestNiceSubstring(self, s: str) -> str:
n = len(s)
ans = ''
for i in range(n):
lower = upper = 0
for j in range(i, n):
if s[j].islower():
lower |= 1 << (ord(s[j]) - ord('a'))
else:
upper |= 1 << (ord(s[j]) - ord('A'))
if lower == upper and len(ans) < j - i + 1:
ans = s[i : j + 1]
return ans
| Solution |
python | sympy__sympy | sympy/combinatorics/permutations.py | {
"start": 5574,
"end": 12027
} | class ____(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
Explanation
===========
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
>>> from sympy.combinatorics.permutations import Perm, Cycle
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
(1 3 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
(1 3 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
(1 5 4 10)
>>> Cycle(1, 2)(4)(5)(10)
(1 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
return as_int(arg)
def __iter__(self):
yield from self.list()
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)(2, 3)
(1 3 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = Cycle(1, 2)
>>> a(2, 3)
(1 3 2)
>>> a(2, 3)(4, 5)
(1 3 2)(4 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> p = Cycle(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Cycle(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
if size is not None:
big = max([i for i in self.keys() if self[i] != i] + [-1])
size = max(size, big + 1)
else:
size = self.size
return [self[i] for i in range(size)]
def __repr__(self):
"""We want it to print as a Cycle, not as a dict.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> print(_)
(1 2)
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
"""
if not self:
return 'Cycle()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
return 'Cycle%s' % s
def __str__(self):
"""We want it to be printed in a Cycle notation with no
comma in-between.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> Cycle(1, 2, 4)(5, 6)
(1 2 4)(5 6)
"""
if not self:
return '()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
s = s.replace(',', '')
return s
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2, 6)
(1 2 6)
"""
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
| Cycle |
python | getsentry__sentry | src/sentry/api/serializers/models/organization.py | {
"start": 5774,
"end": 7603
} | class ____(serializers.Serializer):
name = serializers.CharField(max_length=64)
# XXX: Sentry org slugs are different from other resource slugs. See
# SentrySlugField for the full regex pattern. In short, they differ b/c
# 1. cannot contain underscores
# 2. must start with a number or letter
# 3. cannot end with a dash
slug = SentrySerializerSlugField(
org_slug=True,
max_length=DEFAULT_SLUG_MAX_LENGTH,
)
def validate_slug(self, value: str) -> str:
# Historically, the only check just made sure there was more than 1
# character for the slug, but since then, there are many slugs that
# fit within this new imposed limit. We're not fixing existing, but
# just preventing new bad values.
if len(value) < 3:
raise serializers.ValidationError(
f'This slug "{value}" is too short. Minimum of 3 characters.'
)
if value.lower() != value:
raise serializers.ValidationError(
f'This slug "{value}" should not contain uppercase symbols.'
)
if value in RESERVED_ORGANIZATION_SLUGS:
raise serializers.ValidationError(f'This slug "{value}" is reserved and not allowed.')
qs = Organization.objects.filter(slug=value)
if "organization" in self.context:
qs = qs.exclude(id=self.context["organization"].id)
if qs.exists():
raise serializers.ValidationError(f'The slug "{value}" is already in use.')
contains_whitespace = any(c.isspace() for c in self.initial_data["slug"])
if contains_whitespace:
raise serializers.ValidationError(
f'The slug "{value}" should not contain any whitespace.'
)
return value
| BaseOrganizationSerializer |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_msgraph.py | {
"start": 12822,
"end": 16280
} | class ____:
def test_default_response_handler_when_json(self):
users = load_json_from_resources(dirname(__file__), "..", "resources", "users.json")
response = mock_json_response(200, users)
actual = asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
assert isinstance(actual, dict)
assert actual == users
def test_default_response_handler_when_not_json(self):
response = mock_json_response(200, JSONDecodeError("", "", 0))
actual = asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
assert actual == {}
def test_default_response_handler_when_content(self):
users = load_file_from_resources(dirname(__file__), "..", "resources", "users.json").encode()
response = mock_response(200, users)
actual = asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
assert isinstance(actual, bytes)
assert actual == users
def test_default_response_handler_when_no_content_but_headers(self):
response = mock_response(200, headers={"RequestId": "ffb6096e-d409-4826-aaeb-b5d4b165dc4d"})
actual = asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
assert isinstance(actual, dict)
assert actual["requestid"] == "ffb6096e-d409-4826-aaeb-b5d4b165dc4d"
def test_handle_response_async_when_bad_request(self):
response = mock_json_response(400, {})
with pytest.raises(AirflowBadRequest):
asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
def test_handle_response_async_when_not_found(self):
response = mock_json_response(404, {})
with pytest.raises(AirflowNotFoundException):
asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
def test_handle_response_async_when_internal_server_error(self):
response = mock_json_response(500, {})
with pytest.raises(AirflowException):
asyncio.run(DefaultResponseHandler().handle_response_async(response, None))
# TODO: Elad: review this after merging the bump 2.10 PR
# We should not have specific provider test block the release
@pytest.mark.xfail(reason="TODO: Remove")
def test_when_provider_min_airflow_version_is_2_10_or_higher_remove_obsolete_code(self):
"""
Once this test starts failing due to the fact that the minimum Airflow version is now 2.10.0 or higher
for this provider, you should remove the obsolete code in the get_proxies method of the
KiotaRequestAdapterHook and remove this test. This test was added to make sure to not forget to
remove the fallback code for backward compatibility with Airflow 2.9.x which isn't need anymore once
this provider depends on Airflow 2.10.0 or higher.
"""
min_airflow_version = get_provider_min_airflow_version("apache-airflow-providers-microsoft-azure")
# Check if the current Airflow version is 2.10.0 or higher
if min_airflow_version[0] >= 3 or (min_airflow_version[0] >= 2 and min_airflow_version[1] >= 10):
method_source = inspect.getsource(KiotaRequestAdapterHook.get_proxies)
raise AirflowProviderDeprecationWarning(
f"Check TODO's to remove obsolete code in get_proxies method:\n\r\n\r\t\t\t{method_source}"
)
| TestResponseHandler |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Solve_BipedalWalker/A3C.py | {
"start": 4926,
"end": 8586
} | class ____(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
if self.name == 'W_0' and total_step % 30 == 0:
self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
test = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
achieve = '| Achieve' if self.env.unwrapped.hull.position[0] >= 88 else '| -------'
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
achieve,
"| Pos: %i" % self.env.unwrapped.hull.position[0],
"| RR: %.1f" % GLOBAL_RUNNING_R[-1],
'| EpR: %.1f' % ep_r,
'| var:', test,
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
import matplotlib.pyplot as plt
plt.plot(GLOBAL_RUNNING_R)
plt.xlabel('episode')
plt.ylabel('global running reward')
plt.show()
| Worker |
python | pandas-dev__pandas | pandas/tests/extension/test_masked.py | {
"start": 3941,
"end": 12746
} | class ____(base.ExtensionTests):
_combine_le_expected_dtype = "boolean"
@pytest.fixture(autouse=True)
def skip_if_doesnt_support_2d(self, dtype, request):
# Override the fixture so that we run these tests.
assert not dtype._supports_2d
# If dtype._supports_2d is ever changed to True, then this fixture
# override becomes unnecessary.
@pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data_missing, na_action, using_nan_is_na):
result = data_missing.map(lambda x: x, na_action=na_action)
if data_missing.dtype == Float32Dtype() and using_nan_is_na:
# map roundtrips through objects, which converts to float64
expected = data_missing.to_numpy(dtype="float64", na_value=np.nan)
else:
expected = data_missing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_map_na_action_ignore(self, data_missing_for_sorting, using_nan_is_na):
zero = data_missing_for_sorting[2]
result = data_missing_for_sorting.map(lambda x: zero, na_action="ignore")
if data_missing_for_sorting.dtype.kind == "b":
expected = np.array([False, pd.NA, False], dtype=object)
elif not using_nan_is_na:
# TODO: would we prefer to get NaN in this case to get a non-object?
expected = np.array([zero, pd.NA, zero], dtype=object)
else:
expected = np.array([zero, np.nan, zero])
tm.assert_numpy_array_equal(result, expected)
def _get_expected_exception(self, op_name, obj, other):
try:
dtype = tm.get_dtype(obj)
except AttributeError:
# passed arguments reversed
dtype = tm.get_dtype(other)
if dtype.kind == "b":
if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]:
# match behavior with non-masked bool dtype
return NotImplementedError
elif op_name in ["__sub__", "__rsub__"]:
# exception message would include "numpy boolean subtract""
return TypeError
return None
return None
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
sdtype = tm.get_dtype(obj)
expected = pointwise_result
if sdtype.kind == "b":
if op_name in (
"__mod__",
"__rmod__",
):
# combine keeps boolean type
expected = expected.astype("Int8")
return expected
def test_divmod_series_array(self, data, data_for_twos, request):
if data.dtype.kind == "b":
mark = pytest.mark.xfail(
reason="Inconsistency between floordiv and divmod; we raise for "
"floordiv but not for divmod. This matches what we do for "
"non-masked bool dtype."
)
request.applymarker(mark)
super().test_divmod_series_array(data, data_for_twos)
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
if op_name in ["any", "all"] and ser.dtype.kind != "b":
pytest.skip(reason="Tested in tests/reductions/test_reductions.py")
return True
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
cmp_dtype = "int64"
if ser.dtype.kind == "f":
# Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has
# no attribute "numpy_dtype"
cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]
elif ser.dtype.kind == "b":
if op_name in ["min", "max"]:
cmp_dtype = "bool"
# TODO: prod with integer dtypes does *not* match the result we would
# get if we used object for cmp_dtype. In that cae the object result
# is a large integer while the non-object case overflows and returns 0
alt = ser.dropna().astype(cmp_dtype)
if op_name == "count":
result = getattr(ser, op_name)()
expected = getattr(alt, op_name)()
else:
result = getattr(ser, op_name)(skipna=skipna)
expected = getattr(alt, op_name)(skipna=skipna)
if not skipna and ser.isna().any() and op_name not in ["any", "all"]:
expected = pd.NA
tm.assert_almost_equal(result, expected)
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
if is_float_dtype(arr.dtype):
cmp_dtype = arr.dtype.name
elif op_name in ["mean", "median", "var", "std", "skew", "kurt", "sem"]:
cmp_dtype = "Float64"
elif op_name in ["max", "min"]:
cmp_dtype = arr.dtype.name
elif arr.dtype in ["Int64", "UInt64"]:
cmp_dtype = arr.dtype.name
elif is_signed_integer_dtype(arr.dtype):
# TODO: Why does Window Numpy 2.0 dtype depend on skipna?
cmp_dtype = (
"Int32"
if (is_platform_windows() and (not np_version_gt2 or not skipna))
or not IS64
else "Int64"
)
elif is_unsigned_integer_dtype(arr.dtype):
cmp_dtype = (
"UInt32"
if (is_platform_windows() and (not np_version_gt2 or not skipna))
or not IS64
else "UInt64"
)
elif arr.dtype.kind == "b":
if op_name in ["min", "max"]:
cmp_dtype = "boolean"
elif op_name in ["sum", "prod"]:
cmp_dtype = (
"Int32"
if (is_platform_windows() and (not np_version_gt2 or not skipna))
or not IS64
else "Int64"
)
else:
raise TypeError("not supposed to reach this")
else:
raise TypeError("not supposed to reach this")
return cmp_dtype
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
return True
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
length = 64
if is_windows_or_32bit:
# Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has
# no attribute "itemsize"
if not ser.dtype.itemsize == 8: # type: ignore[union-attr]
length = 32
if ser.dtype.name.startswith("U"):
expected_dtype = f"UInt{length}"
elif ser.dtype.name.startswith("I"):
expected_dtype = f"Int{length}"
elif ser.dtype.name.startswith("F"):
# Incompatible types in assignment (expression has type
# "Union[dtype[Any], ExtensionDtype]", variable has type "str")
expected_dtype = ser.dtype # type: ignore[assignment]
elif ser.dtype.kind == "b":
if op_name in ("cummin", "cummax"):
expected_dtype = "boolean"
else:
expected_dtype = f"Int{length}"
if expected_dtype == "Float32" and op_name == "cumprod" and skipna:
# TODO: xfail?
pytest.skip(
f"Float32 precision lead to large differences with op {op_name} "
f"and skipna={skipna}"
)
if op_name == "cumsum":
pass
elif op_name in ["cummax", "cummin"]:
expected_dtype = ser.dtype # type: ignore[assignment]
elif op_name == "cumprod":
ser = ser[:12]
else:
raise NotImplementedError(f"{op_name} not supported")
result = getattr(ser, op_name)(skipna=skipna)
expected = pd.Series(
pd.array(
getattr(ser.astype("float64"), op_name)(skipna=skipna),
dtype="Float64",
)
)
expected[np.isnan(expected)] = pd.NA
expected = expected.astype(expected_dtype)
tm.assert_series_equal(result, expected)
def test_loc_setitem_with_expansion_preserves_ea_index_dtype(self, data, request):
super().test_loc_setitem_with_expansion_preserves_ea_index_dtype(data)
@pytest.mark.parametrize(
"arr", [pd.array([True, False]), pd.array([1, 2]), pd.array([1.0, 2.0])]
)
def test_cast_pointwise_result_all_na_respects_original_dtype(arr):
# GH#62344
values = [pd.NA, pd.NA]
result = arr._cast_pointwise_result(values)
assert result.dtype == arr.dtype
assert all(x is pd.NA for x in result)
| TestMaskedArrays |
python | allegroai__clearml | clearml/task_parameters.py | {
"start": 3712,
"end": 3977
} | class ____(type):
def __new__(mcs: Type["_AttrsMeta"], name: str, bases: Tuple[Any], dct: Dict[str, Any]) -> type:
new_class = super(_AttrsMeta, mcs).__new__(mcs, name, bases, dct)
return attr.s(new_class)
@six.add_metaclass(_AttrsMeta)
| _AttrsMeta |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 55602,
"end": 57039
} | class ____(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variable_scope.get_variable('var', initializer=0.0, use_resource=True)
tensor = state_ops.assign_add(var, 1.0)
self.summary_op = summary_lib.scalar('my_summary', tensor)
with variable_scope.variable_scope('foo', use_resource=True):
training_util.create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
| ResourceSummarySaverHookTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/progress.py | {
"start": 107,
"end": 343
} | class ____(WidgetParameterItem):
def makeWidget(self):
w = QtWidgets.QProgressBar()
w.setMaximumHeight(20)
w.sigChanged = w.valueChanged
self.hideWidget = False
return w
| ProgressBarParameterItem |
python | numba__llvmlite | llvmlite/binding/ffi.py | {
"start": 6664,
"end": 8854
} | class ____(object):
"""Wraps and duck-types a ctypes.CFUNCTYPE to provide
automatic locking when the wrapped function is called.
TODO: we can add methods to mark the function as threadsafe
and remove the locking-step on call when marked.
"""
__slots__ = ['_lock', '_cfn']
def __init__(self, lock, cfn):
self._lock = lock
self._cfn = cfn
@property
def argtypes(self):
return self._cfn.argtypes
@argtypes.setter
def argtypes(self, argtypes):
self._cfn.argtypes = argtypes
@property
def restype(self):
return self._cfn.restype
@restype.setter
def restype(self, restype):
self._cfn.restype = restype
def __call__(self, *args, **kwargs):
with self._lock:
return self._cfn(*args, **kwargs)
def _importlib_resources_path_repl(package, resource):
"""Replacement implementation of `import.resources.path` to avoid
deprecation warning following code at importlib_resources/_legacy.py
as suggested by https://importlib-resources.readthedocs.io/en/latest/using.html#migrating-from-legacy
Notes on differences from importlib.resources implementation:
The `_common.normalize_path(resource)` call is skipped because it is an
internal API and it is unnecessary for the use here. What it does is
ensuring `resource` is a str and that it does not contain path separators.
""" # noqa E501
return _impres.as_file(_impres.files(package) / resource)
_importlib_resources_path = (_importlib_resources_path_repl
if sys.version_info[:2] >= (3, 10)
else _impres.path)
lib = _lib_wrapper()
def register_lock_callback(acq_fn, rel_fn):
"""Register callback functions for lock acquire and release.
*acq_fn* and *rel_fn* are callables that take no arguments.
"""
lib._lock.register(acq_fn, rel_fn)
def unregister_lock_callback(acq_fn, rel_fn):
"""Remove the registered callback functions for lock acquire and release.
The arguments are the same as used in `register_lock_callback()`.
"""
lib._lock.unregister(acq_fn, rel_fn)
| _lib_fn_wrapper |
python | google__pytype | pytype/overlays/enum_overlay.py | {
"start": 33524,
"end": 35321
} | class ____(abstract.SimpleFunction):
"""Implements the functionality of __getitem__ for enums."""
def __init__(self, ctx):
sig = function.Signature(
name="__getitem__",
param_names=("cls", "name"),
posonly_count=0,
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={"name": ctx.convert.str_type},
)
super().__init__(sig, ctx)
def _get_member_by_name(
self, enum: EnumInstance | abstract.PyTDClass, name: str
) -> cfg.Variable | None:
if isinstance(enum, EnumInstance):
return enum.members.get(name)
else:
if name in enum:
enum.load_lazy_attribute(name)
return enum.members[name]
def call(self, node, func, args, alias_map=None):
_, argmap = self.match_and_map_args(node, args, alias_map)
cls_var = argmap["cls"]
name_var = argmap["name"]
try:
cls = abstract_utils.get_atomic_value(cls_var)
except abstract_utils.ConversionError:
return node, self.ctx.new_unsolvable(node)
# We may have been given an instance of the class, such as if pytype is
# analyzing this method due to a super() call in a subclass.
if isinstance(cls, abstract.Instance):
cls = cls.cls
# If we can't get a concrete name, treat it like it matches and return a
# canonical enum member.
try:
name = abstract_utils.get_atomic_python_constant(name_var, str)
except abstract_utils.ConversionError:
return node, cls.instantiate(node)
inst = self._get_member_by_name(cls, name)
if inst:
return node, inst
else:
self.ctx.errorlog.attribute_error(
self.ctx.vm.frames, cls_var.bindings[0], name
)
return node, self.ctx.new_unsolvable(node)
| EnumMetaGetItem |
python | PrefectHQ__prefect | tests/test_transactions.py | {
"start": 14488,
"end": 18294
} | class ____:
@pytest.mark.parametrize("txn_class", [Transaction, AsyncTransaction])
@pytest.mark.parametrize("state", TransactionState.__members__.values())
def test_state_and_methods_are_consistent(
self, txn_class: type[BaseTransaction], state: TransactionState
):
"Not the best test, but it does the job"
txn = txn_class(state=state)
assert txn.is_active() == (txn.state.name == "ACTIVE")
assert txn.is_pending() == (txn.state.name == "PENDING")
assert txn.is_committed() == (txn.state.name == "COMMITTED")
assert txn.is_staged() == (txn.state.name == "STAGED")
assert txn.is_rolled_back() == (txn.state.name == "ROLLED_BACK")
class TestTransaction:
def test_happy_state_lifecycle(self):
txn = Transaction()
assert txn.is_pending()
with txn:
assert txn.is_active()
assert txn.is_committed()
def test_unhappy_state_lifecycle(self):
txn = Transaction()
assert txn.is_pending()
with pytest.raises(ValueError, match="foo"):
with txn:
assert txn.is_active()
raise ValueError("foo")
assert txn.is_rolled_back()
class TestAsyncTransaction:
async def test_happy_state_lifecycle(self):
txn = AsyncTransaction()
assert txn.is_pending()
async with txn:
assert txn.is_active()
assert txn.is_committed()
async with txn:
assert txn.is_active()
assert txn.is_committed()
async def test_unhappy_state_lifecycle(self):
txn = AsyncTransaction()
assert txn.is_pending()
with pytest.raises(ValueError, match="foo"):
async with txn:
assert txn.is_active()
raise ValueError("foo")
assert txn.is_rolled_back()
def test_overwrite_ignores_existing_record():
class Store(ResultStore):
def exists(self, key: str) -> bool:
return True
def read(self, key: str, holder: str | None = None) -> Any:
return "done"
def write(
self,
obj: Any,
key: str | None = None,
expiration: datetime.datetime | None = None,
holder: str | None = None,
) -> None:
pass
def supports_isolation_level(self, level: IsolationLevel) -> bool:
return True
with Transaction(
key="test_overwrite_ignores_existing_record", store=Store()
) as txn:
assert txn.is_committed()
with Transaction(
key="test_overwrite_ignores_existing_record", store=Store(), overwrite=True
) as txn:
assert not txn.is_committed()
async def test_overwrite_ignores_existing_record_async():
class Store(ResultStore):
async def aexists(self, key: str) -> bool:
return True
async def aread(self, key: str, holder: str | None = None) -> Any:
return "done"
async def awrite(
self,
obj: Any,
key: str | None = None,
expiration: datetime.datetime | None = None,
holder: str | None = None,
) -> None:
pass
def supports_isolation_level(self, level: IsolationLevel) -> bool:
return True
async with AsyncTransaction(
key="test_overwrite_ignores_existing_record", store=Store()
) as txn:
assert txn.is_committed()
async with AsyncTransaction(
key="test_overwrite_ignores_existing_record", store=Store(), overwrite=True
) as txn:
assert not txn.is_committed()
| TestTransactionState |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 271733,
"end": 272299
} | class ____(NormalityTests):
test_name = 'normaltest'
case_ref = (3.92371918158185551, 0.14059672529747502) # statistic, pvalue
def test_too_few_observations(self, xp):
stats.normaltest(xp.arange(8.))
# 1D sample has too few observations -> warning / NaN output
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
res = stats.normaltest(xp.arange(7.))
NaN = xp.asarray(xp.nan)
xp_assert_equal(res.statistic, NaN)
xp_assert_equal(res.pvalue, NaN)
| TestNormalTest |
python | spack__spack | lib/spack/spack/test/conftest.py | {
"start": 41555,
"end": 42239
} | class ____:
def __init__(self, root):
self.root = root
def path_for_spec(self, spec):
return os.path.sep.join([self.root, spec.name + "-" + spec.dag_hash()])
def ensure_installed(self, spec):
pass
@pytest.fixture()
def gen_mock_layout(tmp_path: Path):
# Generate a MockLayout in a temporary directory. In general the prefixes
# specified by MockLayout should never be written to, but this ensures
# that even if they are, that it causes no harm
def create_layout(root):
subroot = tmp_path / root
subroot.mkdir(parents=True, exist_ok=True)
return MockLayout(str(subroot))
yield create_layout
| MockLayout |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 2966,
"end": 3242
} | class ____(ToUpperCase, MemberType, _ParentEnum, enum.Enum):
"""this is enum class"""
x = 'x'
def isupper(self):
"""overridden"""
return False
def __str__(self):
"""overridden"""
return super().__str__()
| EnumClassWithParentEnum |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/commands/index.py | {
"start": 846,
"end": 4731
} | class ____(IndexGroupCommand):
"""
Inspect information available from package indexes.
"""
ignore_require_venv = True
usage = """
%prog versions <package>
"""
def add_options(self) -> None:
cmdoptions.add_target_python_options(self.cmd_opts)
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
handlers = {
"versions": self.get_available_package_versions,
}
logger.warning(
"pip index is currently an experimental command. "
"It may be removed/changed in a future release "
"without prior warning."
)
# Determine action
if not args or args[0] not in handlers:
logger.error(
"Need an action (%s) to perform.",
", ".join(sorted(handlers)),
)
return ERROR
action = args[0]
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def _build_package_finder(
self,
options: Values,
session: PipSession,
target_python: Optional[TargetPython] = None,
ignore_requires_python: Optional[bool] = None,
) -> PackageFinder:
"""
Create a package finder appropriate to the index command.
"""
link_collector = LinkCollector.create(session, options=options)
# Pass allow_yanked=False to ignore yanked versions.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=options.pre,
ignore_requires_python=ignore_requires_python,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
target_python=target_python,
)
def get_available_package_versions(self, options: Values, args: List[Any]) -> None:
if len(args) != 1:
raise CommandError("You need to specify exactly one argument")
target_python = cmdoptions.make_target_python(options)
query = args[0]
with self._build_session(options) as session:
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
versions: Iterable[Version] = (
candidate.version for candidate in finder.find_all_candidates(query)
)
if not options.pre:
# Remove prereleases
versions = (
version for version in versions if not version.is_prerelease
)
versions = set(versions)
if not versions:
raise DistributionNotFound(
f"No matching distribution found for {query}"
)
formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)]
latest = formatted_versions[0]
write_output(f"{query} ({latest})")
write_output("Available versions: {}".format(", ".join(formatted_versions)))
print_dist_installation_info(query, latest)
| IndexCommand |
python | python-pillow__Pillow | src/PIL/ImageDraw2.py | {
"start": 1081,
"end": 1391
} | class ____:
"""Stores a TrueType font and color"""
def __init__(
self, color: str, file: StrOrBytesPath | BinaryIO, size: float = 12
) -> None:
# FIXME: add support for bitmap fonts
self.color = ImageColor.getrgb(color)
self.font = ImageFont.truetype(file, size)
| Font |
python | redis__redis-py | tests/test_scenario/test_active_active.py | {
"start": 1399,
"end": 15080
} | class ____:
def teardown_method(self, method):
# Timeout so the cluster could recover from network failure.
sleep(10)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_multi_db_client_failover_to_another_db(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
# Handle unavailable databases from previous test.
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
# Client initialized on the first command.
retry.call_with_retry(
lambda: r_multi_db.set("key", "value"), lambda _: dummy_fail()
)
thread.start()
# Execute commands before network failure
while not event.is_set():
assert (
retry.call_with_retry(
lambda: r_multi_db.get("key"), lambda _: dummy_fail()
)
== "value"
)
sleep(0.5)
# Execute commands until database failover
while not listener.is_changed_flag:
assert (
retry.call_with_retry(
lambda: r_multi_db.get("key"), lambda _: dummy_fail()
)
== "value"
)
sleep(0.5)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2, "health_check_interval": 20},
{
"client_class": RedisCluster,
"min_num_failures": 2,
"health_check_interval": 20,
},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_multi_db_client_uses_lag_aware_health_check(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
env0_username = os.getenv("ENV0_USERNAME")
env0_password = os.getenv("ENV0_PASSWORD")
# Adding additional health check to the client.
r_multi_db.add_health_check(
LagAwareHealthCheck(
verify_tls=False,
auth_basic=(env0_username, env0_password),
lag_aware_tolerance=10000,
)
)
# Client initialized on the first command.
retry.call_with_retry(
lambda: r_multi_db.set("key", "value"), lambda _: dummy_fail()
)
thread.start()
# Execute commands before network failure
while not event.is_set():
assert (
retry.call_with_retry(
lambda: r_multi_db.get("key"), lambda _: dummy_fail()
)
== "value"
)
sleep(0.5)
# Execute commands after network failure
while not listener.is_changed_flag:
assert (
retry.call_with_retry(
lambda: r_multi_db.get("key"), lambda _: dummy_fail()
)
== "value"
)
sleep(0.5)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_context_manager_pipeline_failover_to_another_db(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
def callback():
with r_multi_db.pipeline() as pipe:
pipe.set("{hash}key1", "value1")
pipe.set("{hash}key2", "value2")
pipe.set("{hash}key3", "value3")
pipe.get("{hash}key1")
pipe.get("{hash}key2")
pipe.get("{hash}key3")
assert pipe.execute() == [
True,
True,
True,
"value1",
"value2",
"value3",
]
# Client initialized on first pipe execution.
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
thread.start()
# Execute pipeline before network failure
while not event.is_set():
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
sleep(0.5)
# Execute pipeline until database failover
for _ in range(5):
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
sleep(0.5)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_chaining_pipeline_failover_to_another_db(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
def callback():
pipe = r_multi_db.pipeline()
pipe.set("{hash}key1", "value1")
pipe.set("{hash}key2", "value2")
pipe.set("{hash}key3", "value3")
pipe.get("{hash}key1")
pipe.get("{hash}key2")
pipe.get("{hash}key3")
assert pipe.execute() == [True, True, True, "value1", "value2", "value3"]
# Client initialized on first pipe execution.
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
thread.start()
# Execute pipeline before network failure
while not event.is_set():
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
sleep(0.5)
# Execute pipeline until database failover
for _ in range(5):
retry.call_with_retry(lambda: callback(), lambda _: dummy_fail())
sleep(0.5)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_transaction_failover_to_another_db(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
def callback(pipe: Pipeline):
pipe.set("{hash}key1", "value1")
pipe.set("{hash}key2", "value2")
pipe.set("{hash}key3", "value3")
pipe.get("{hash}key1")
pipe.get("{hash}key2")
pipe.get("{hash}key3")
# Client initialized on first transaction execution.
retry.call_with_retry(
lambda: r_multi_db.transaction(callback), lambda _: dummy_fail()
)
thread.start()
# Execute transaction before network failure
while not event.is_set():
retry.call_with_retry(
lambda: r_multi_db.transaction(callback), lambda _: dummy_fail()
)
sleep(0.5)
# Execute transaction until database failover
while not listener.is_changed_flag:
retry.call_with_retry(
lambda: r_multi_db.transaction(callback), lambda _: dummy_fail()
)
sleep(0.5)
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_pubsub_failover_to_another_db(self, r_multi_db, fault_injector_client):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
data = json.dumps({"message": "test"})
messages_count = 0
def handler(message):
nonlocal messages_count
messages_count += 1
pubsub = r_multi_db.pubsub()
# Assign a handler and run in a separate thread.
retry.call_with_retry(
lambda: pubsub.subscribe(**{"test-channel": handler}),
lambda _: dummy_fail(),
)
pubsub_thread = pubsub.run_in_thread(sleep_time=0.1, daemon=True)
thread.start()
# Execute publish before network failure
while not event.is_set():
retry.call_with_retry(
lambda: r_multi_db.publish("test-channel", data), lambda _: dummy_fail()
)
sleep(0.5)
# Execute publish until database failover
while not listener.is_changed_flag:
retry.call_with_retry(
lambda: r_multi_db.publish("test-channel", data), lambda _: dummy_fail()
)
sleep(0.5)
pubsub_thread.stop()
assert messages_count > 2
@pytest.mark.parametrize(
"r_multi_db",
[
{"client_class": Redis, "min_num_failures": 2},
{"client_class": RedisCluster, "min_num_failures": 2},
],
ids=["standalone", "cluster"],
indirect=True,
)
@pytest.mark.timeout(100)
def test_sharded_pubsub_failover_to_another_db(
self, r_multi_db, fault_injector_client
):
r_multi_db, listener, config = r_multi_db
retry = Retry(
supported_errors=(TemporaryUnavailableException,),
retries=DEFAULT_FAILOVER_ATTEMPTS,
backoff=ConstantBackoff(backoff=DEFAULT_FAILOVER_DELAY),
)
event = threading.Event()
thread = threading.Thread(
target=trigger_network_failure_action,
daemon=True,
args=(fault_injector_client, config, event),
)
data = json.dumps({"message": "test"})
messages_count = 0
def handler(message):
nonlocal messages_count
messages_count += 1
pubsub = r_multi_db.pubsub()
# Assign a handler and run in a separate thread.
retry.call_with_retry(
lambda: pubsub.ssubscribe(**{"test-channel": handler}),
lambda _: dummy_fail(),
)
pubsub_thread = pubsub.run_in_thread(
sleep_time=0.1, daemon=True, sharded_pubsub=True
)
thread.start()
# Execute publish before network failure
while not event.is_set():
retry.call_with_retry(
lambda: r_multi_db.spublish("test-channel", data),
lambda _: dummy_fail(),
)
sleep(0.5)
# Execute publish until database failover
while not listener.is_changed_flag:
retry.call_with_retry(
lambda: r_multi_db.spublish("test-channel", data),
lambda _: dummy_fail(),
)
sleep(0.5)
pubsub_thread.stop()
assert messages_count > 2
| TestActiveActive |
python | kamyu104__LeetCode-Solutions | Python/execution-of-all-suffix-instructions-staying-in-a-grid.py | {
"start": 50,
"end": 1212
} | class ____(object):
def executeInstructions(self, n, startPos, s):
"""
:type n: int
:type startPos: List[int]
:type s: str
:rtype: List[int]
"""
directions = {'U':(-1, 0), 'R':(0, 1), 'D':(1, 0), 'L':(0, -1)}
(x0, y0), (x, y) = startPos, (0, 0)
result = range(len(s), 0, -1)
lookup_x = collections.defaultdict(list)
lookup_y = collections.defaultdict(list)
lookup_x[x0-x].append(0)
lookup_y[y0-y].append(0)
for i, d in enumerate(s):
dx, dy = directions[d]
x, y = x+dx, y+dy
for k in n-x, -x-1:
if k not in lookup_x:
continue
for j in lookup_x[k]:
result[j] = min(result[j], i-j)
lookup_x[k] = []
for k in n-y, -y-1:
if k not in lookup_y:
continue
for j in lookup_y[k]:
result[j] = min(result[j], i-j)
lookup_y[k] = []
lookup_x[x0-x].append(i+1)
lookup_y[y0-y].append(i+1)
return result
| Solution |
python | PyCQA__flake8 | src/flake8/exceptions.py | {
"start": 980,
"end": 1629
} | class ____(Flake8Exception):
"""The plugin requested unknown parameters."""
FORMAT = '"%(name)s" requested unknown parameters causing %(exc)s'
def __init__(self, plugin_name: str, exception: Exception) -> None:
"""Pop certain keyword arguments for initialization."""
self.plugin_name = plugin_name
self.original_exception = exception
super().__init__(plugin_name, exception)
def __str__(self) -> str:
"""Format our exception message."""
return self.FORMAT % {
"name": self.plugin_name,
"exc": self.original_exception,
}
| PluginRequestedUnknownParameters |
python | aio-libs__aiohttp | tests/test_web_response.py | {
"start": 44723,
"end": 48893
} | class ____:
def test_content_type_is_application_json_by_default(self) -> None:
resp = web.json_response("")
assert "application/json" == resp.content_type
def test_passing_text_only(self) -> None:
resp = web.json_response(text=json.dumps("jaysawn"))
assert resp.text == json.dumps("jaysawn")
def test_data_and_text_raises_value_error(self) -> None:
with pytest.raises(ValueError) as excinfo:
web.json_response(data="foo", text="bar")
expected_message = "only one of data, text, or body should be specified"
assert expected_message == excinfo.value.args[0]
def test_data_and_body_raises_value_error(self) -> None:
with pytest.raises(ValueError) as excinfo:
web.json_response(data="foo", body=b"bar")
expected_message = "only one of data, text, or body should be specified"
assert expected_message == excinfo.value.args[0]
def test_text_is_json_encoded(self) -> None:
resp = web.json_response({"foo": 42})
assert json.dumps({"foo": 42}) == resp.text
def test_content_type_is_overrideable(self) -> None:
resp = web.json_response({"foo": 42}, content_type="application/vnd.json+api")
assert "application/vnd.json+api" == resp.content_type
@pytest.mark.dev_mode
async def test_no_warn_small_cookie(
buf: bytearray, writer: AbstractStreamWriter
) -> None:
resp = web.Response()
resp.set_cookie("foo", "ÿ" + "8" * 4064, max_age=2600) # No warning
req = make_request("GET", "/", writer=writer)
await resp.prepare(req)
await resp.write_eof()
match = re.search(b"Set-Cookie: (.*?)\r\n", buf)
assert match is not None
cookie = match.group(1)
assert len(cookie) == 4096
@pytest.mark.dev_mode
async def test_warn_large_cookie(buf: bytearray, writer: AbstractStreamWriter) -> None:
resp = web.Response()
with pytest.warns(
UserWarning,
match="The size of is too large, it might get ignored by the client.",
):
resp.set_cookie("foo", "ÿ" + "8" * 4065, max_age=2600)
req = make_request("GET", "/", writer=writer)
await resp.prepare(req)
await resp.write_eof()
match = re.search(b"Set-Cookie: (.*?)\r\n", buf)
assert match is not None
cookie = match.group(1)
assert len(cookie) == 4097
@pytest.mark.parametrize("loose_header_type", (MultiDict, CIMultiDict, dict))
async def test_passing_cimultidict_to_web_response_not_mutated(
loose_header_type: type,
) -> None:
req = make_request("GET", "/")
headers = loose_header_type({})
resp = web.Response(body=b"answer", headers=headers)
await resp.prepare(req)
assert resp.content_length == 6
assert not headers
async def test_stream_response_sends_headers_immediately() -> None:
"""Test that StreamResponse sends headers immediately."""
writer = mock.create_autospec(StreamWriter, spec_set=True)
writer.write_headers = mock.AsyncMock()
writer.send_headers = mock.Mock()
writer.write_eof = mock.AsyncMock()
req = make_request("GET", "/", writer=writer)
resp = web.StreamResponse()
# StreamResponse should have _send_headers_immediately = True
assert resp._send_headers_immediately is True
# Prepare the response
await resp.prepare(req)
# Headers should be sent immediately
writer.send_headers.assert_called_once()
async def test_response_buffers_headers() -> None:
"""Test that Response buffers headers for packet coalescing."""
writer = mock.create_autospec(StreamWriter, spec_set=True)
writer.write_headers = mock.AsyncMock()
writer.send_headers = mock.Mock()
writer.write_eof = mock.AsyncMock()
req = make_request("GET", "/", writer=writer)
resp = web.Response(body=b"hello")
# Response should have _send_headers_immediately = False
assert resp._send_headers_immediately is False
# Prepare the response
await resp.prepare(req)
# Headers should NOT be sent immediately
writer.send_headers.assert_not_called()
# But write_headers should have been called
writer.write_headers.assert_called_once()
| TestJSONResponse |
python | django__django | tests/apps/query_performing_app/apps.py | {
"start": 1203,
"end": 1295
} | class ____(CursorQueryAppConfig):
database = "default"
| QueryDefaultDatabaseCursorAppConfig |
python | streamlit__streamlit | lib/tests/streamlit/elements/help_test.py | {
"start": 10532,
"end": 14403
} | class ____(unittest.TestCase):
def test_st_help_no_arg(self):
actual = _get_variable_name_from_code_str("st.help()")
assert actual is None
def test_variable_should_match_own_name(self):
tests = [
"a",
"a_b",
"a.b",
"a[b]",
"a[0]",
"a[0].c",
"a[0].c.foo()",
]
for test in tests:
for st_call in st_calls:
# Wrap test in an st call.
code = st_call.format(test)
actual = _get_variable_name_from_code_str(code)
assert actual == test
def test_constant_should_have_no_name(self):
tests = [
"None",
"0",
"1",
"123",
"False",
"True",
"'some string'",
"b'some bytes'",
"...",
]
for test in tests:
for st_call in st_calls:
# Wrap test in an st call.
code = st_call.format(test)
actual = _get_variable_name_from_code_str(code)
assert actual is None
def test_walrus_should_return_var_name(self):
for st_call in st_calls:
# Wrap test in an st call.
code = st_call.format("a := 123")
actual = _get_variable_name_from_code_str(code)
assert actual == "a"
def test_magic_should_just_echo(self):
tests = [
"a",
"a_b",
"a.b",
"a[b]",
"a[0]",
"a[0].c",
"a[0].c.foo()",
"None",
"0",
"1",
"123",
"False",
"True",
"'some string'",
"b'some bytes'",
"...",
"f'some {f} string'",
"[x for x in range(10)]",
"(x for x in range(10))",
"{x: None for x in range(10)}",
]
for code in tests:
actual = _get_variable_name_from_code_str(code)
assert actual == code
# Testing with comma at the end
tests += [
"foo()",
]
for code in tests:
actual = _get_variable_name_from_code_str(code + ",")
assert actual == code
def test_if_dont_know_just_echo(self):
tests = [
(
"foo()",
"foo()",
),
(
"[x for x in range(10)]",
"[x for x in range(10)]",
),
(
"(x for x in range(10))",
"(x for x in range(10))",
),
(
"x for x in range(10)",
# Python >= 3.8 has its own bug here (because of course) where the
# column offsets are off by one in different directions, leading to parentheses
# appearing around the generator expression. This leads to syntactically correct
# code, though, so not so bad!
"(x for x in range(10))",
),
(
"{x: None for x in range(10)}",
"{x: None for x in range(10)}",
),
]
for test, expected in tests:
for st_call in st_calls:
# Wrap test in an st call.
code = st_call.format(test)
actual = _get_variable_name_from_code_str(code)
assert actual == expected
def test_multiline_gets_linearized(self):
test = """foo(
"bar"
)"""
for st_call in st_calls:
# Wrap test in an st call.
code = st_call.format(test)
actual = _get_variable_name_from_code_str(code)
assert actual == "foo("
| GetVariableNameFromCodeStrTest |
python | scipy__scipy | scipy/cluster/tests/test_vq.py | {
"start": 3947,
"end": 6585
} | class ____:
def test_whiten(self, xp):
desired = xp.asarray([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
obs = xp.asarray([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
xp_assert_close(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self, xp):
desired = xp.asarray([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
obs = xp.asarray([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with eager_warns(RuntimeWarning, match="Some columns have standard...", xp=xp):
actual = whiten(obs)
xp_assert_close(actual, desired, rtol=1e-5)
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
@pytest.mark.parametrize("bad_value", [math.nan, math.inf, -math.inf])
def test_whiten_not_finite(self, bad_value, xp):
obs = xp.asarray([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
if is_lazy_array(obs):
desired = xp.asarray([[5.08738849, math.nan],
[3.19909255, math.nan],
[4.51041982, math.nan],
[4.38567074, math.nan],
[2.32191480, math.nan]])
xp_assert_close(whiten(obs), desired, rtol=1e-5)
else:
assert_raises(ValueError, whiten, obs)
@pytest.mark.skipif(SCIPY_ARRAY_API,
reason='`np.matrix` unsupported in array API mode')
def test_whiten_not_finite_matrix(self):
for bad_value in np.nan, np.inf, -np.inf:
obs = matrix([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
@make_xp_test_case(vq)
| TestWhiten |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels45.py | {
"start": 315,
"end": 1844
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels45.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [70329856, 71246976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [
{
"value": "=Sheet1!$D$1",
"border": {"color": "red"},
"fill": {"color": "#00B050"},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | scipy/optimize/tests/test_lsq_linear.py | {
"start": 9334,
"end": 9430
} | class ____(BaseMixin, SparseMixin):
method = 'trf'
lsq_solvers = ['exact', 'lsmr']
| TestTRF |
python | django__django | tests/auth_tests/test_admin_multidb.py | {
"start": 685,
"end": 2642
} | class ____(TestCase):
databases = {"default", "other"}
READ_ONLY_METHODS = {"get", "options", "head", "trace"}
@classmethod
def setUpTestData(cls):
cls.superusers = {}
for db in cls.databases:
Router.target_db = db
cls.superusers[db] = User.objects.create_superuser(
username="admin",
password="something",
email="test@test.org",
)
def tearDown(self):
# Reset the routers' state between each test.
Router.target_db = None
@mock.patch("django.contrib.auth.admin.transaction")
def test_add_view(self, mock):
for db in self.databases:
with self.subTest(db_connection=db):
Router.target_db = db
self.client.force_login(self.superusers[db])
response = self.client.post(
reverse("test_adminsite:auth_user_add"),
{
"username": "some_user",
"password1": "helloworld",
"password2": "helloworld",
},
)
self.assertEqual(response.status_code, 302)
mock.atomic.assert_called_with(using=db)
@mock.patch("django.contrib.auth.admin.transaction")
def test_read_only_methods_add_view(self, mock):
for db in self.databases:
for method in self.READ_ONLY_METHODS:
with self.subTest(db_connection=db, method=method):
mock.mock_reset()
Router.target_db = db
self.client.force_login(self.superusers[db])
response = getattr(self.client, method)(
reverse("test_adminsite:auth_user_add")
)
self.assertEqual(response.status_code, 200)
mock.atomic.assert_not_called()
| MultiDatabaseTests |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 786,
"end": 846
} | class ____:
class Inner:
pass
| ClassSimplestWithInner |
python | celery__celery | t/unit/tasks/test_result.py | {
"start": 1809,
"end": 16673
} | class ____:
def setup_method(self):
self.app.conf.result_cache_max = 100
self.app.conf.result_serializer = 'pickle'
self.app.conf.result_extended = True
self.task1 = mock_task('task1', states.SUCCESS, 'the')
self.task2 = mock_task('task2', states.SUCCESS, 'quick')
self.task3 = mock_task('task3', states.FAILURE, KeyError('brown'))
self.task4 = mock_task('task3', states.RETRY, KeyError('red'))
self.task5 = mock_task(
'task3', states.FAILURE, KeyError('blue'), PYTRACEBACK,
)
self.task6 = mock_task('task6', states.SUCCESS, None)
for task in (self.task1, self.task2,
self.task3, self.task4, self.task5, self.task6):
save_result(self.app, task)
@self.app.task(shared=False)
def mytask():
pass
self.mytask = mytask
def test_forget(self):
first = Mock()
second = self.app.AsyncResult(self.task1['id'], parent=first)
third = self.app.AsyncResult(self.task2['id'], parent=second)
last = self.app.AsyncResult(self.task3['id'], parent=third)
last.forget()
first.forget.assert_called_once()
assert last.result is None
assert second.result is None
def test_ignored_getter(self):
result = self.app.AsyncResult(uuid())
assert result.ignored is False
result.__delattr__('_ignored')
assert result.ignored is False
@patch('celery.result.task_join_will_block')
def test_assert_will_not_block(self, task_join_will_block):
task_join_will_block.return_value = True
with pytest.raises(RuntimeError):
assert_will_not_block()
task_join_will_block.return_value = False
assert_will_not_block()
@patch('celery.result.task_join_will_block')
def test_get_sync_subtask_option(self, task_join_will_block):
task_join_will_block.return_value = True
tid = uuid()
backend = _MockBackend()
res_subtask_async = AsyncResult(tid, backend=backend)
with pytest.raises(RuntimeError):
res_subtask_async.get()
res_subtask_async.get(disable_sync_subtasks=False)
def test_without_id(self):
with pytest.raises(ValueError):
AsyncResult(None, app=self.app)
def test_compat_properties(self):
x = self.app.AsyncResult('1')
assert x.task_id == x.id
x.task_id = '2'
assert x.id == '2'
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce_direct(self):
x = AsyncResult('1', app=self.app)
fun, args = x.__reduce__()
assert fun(*args) == x
def test_children(self):
x = self.app.AsyncResult('1')
children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x._cache = {'children': children, 'status': states.SUCCESS}
x.backend = Mock()
assert x.children
assert len(x.children) == 3
def test_propagates_for_parent(self):
x = self.app.AsyncResult(uuid())
x.backend = Mock(name='backend')
x.backend.get_task_meta.return_value = {}
x.backend.wait_for_pending.return_value = 84
x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)
with pytest.raises(KeyError):
x.get(propagate=True)
x.backend.wait_for_pending.assert_not_called()
x.parent = EagerResult(uuid(), 42, states.SUCCESS)
assert x.get(propagate=True) == 84
x.backend.wait_for_pending.assert_called()
def test_get_children(self):
tid = uuid()
x = self.app.AsyncResult(tid)
child = [self.app.AsyncResult(uuid()).as_tuple()
for i in range(10)]
x._cache = {'children': child}
assert x.children
assert len(x.children) == 10
x._cache = {'status': states.SUCCESS}
x.backend._cache[tid] = {'result': None}
assert x.children is None
def test_build_graph_get_leaf_collect(self):
x = self.app.AsyncResult('1')
x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x.iterdeps = Mock()
x.iterdeps.return_value = (
(None, x),
(x, c[0]),
(c[0], c[1]),
(c[1], c[2])
)
x.backend.READY_STATES = states.READY_STATES
assert x.graph
assert x.get_leaf() == 2
it = x.collect()
assert list(it) == [
(x, None),
(c[0], 0),
(c[1], 1),
(c[2], 2),
]
def test_iterdeps(self):
x = self.app.AsyncResult('1')
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x._cache = {'status': states.SUCCESS, 'result': None, 'children': c}
for child in c:
child.backend = Mock()
child.backend.get_children.return_value = []
it = x.iterdeps()
assert list(it) == [
(None, x),
(x, c[0]),
(x, c[1]),
(x, c[2]),
]
x._cache = None
x.ready = Mock()
x.ready.return_value = False
with pytest.raises(IncompleteStream):
list(x.iterdeps())
list(x.iterdeps(intermediate=True))
def test_eq_not_implemented(self):
assert self.app.AsyncResult('1') != object()
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
a1 = self.app.AsyncResult('uuid')
restored = pickle.loads(pickle.dumps(a1))
assert restored.id == 'uuid'
a2 = self.app.AsyncResult('uuid')
assert pickle.loads(pickle.dumps(a2)).id == 'uuid'
def test_maybe_set_cache_empty(self):
self.app.AsyncResult('uuid')._maybe_set_cache(None)
def test_set_cache__children(self):
r1 = self.app.AsyncResult('id1')
r2 = self.app.AsyncResult('id2')
r1._set_cache({'children': [r2.as_tuple()]})
assert r2 in r1.children
def test_successful(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
assert ok_res.successful()
assert not nok_res.successful()
assert not nok_res2.successful()
pending_res = self.app.AsyncResult(uuid())
assert not pending_res.successful()
def test_raising(self):
notb = self.app.AsyncResult(self.task3['id'])
withtb = self.app.AsyncResult(self.task5['id'])
with pytest.raises(KeyError):
notb.get()
with pytest.raises(KeyError) as excinfo:
withtb.get()
tb = [t.strip() for t in traceback.format_tb(excinfo.tb)]
assert 'File "foo.py", line 2, in foofunc' not in tb
assert 'File "bar.py", line 3, in barfunc' not in tb
assert excinfo.value.args[0] == 'blue'
assert excinfo.typename == 'KeyError'
def test_raising_remote_tracebacks(self):
pytest.importorskip('tblib')
withtb = self.app.AsyncResult(self.task5['id'])
self.app.conf.task_remote_tracebacks = True
with pytest.raises(KeyError) as excinfo:
withtb.get()
tb = [t.strip() for t in traceback.format_tb(excinfo.tb)]
assert 'File "foo.py", line 2, in foofunc' in tb
assert 'File "bar.py", line 3, in barfunc' in tb
assert excinfo.value.args[0] == 'blue'
assert excinfo.typename == 'KeyError'
def test_str(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
assert str(ok_res) == self.task1['id']
assert str(ok2_res) == self.task2['id']
assert str(nok_res) == self.task3['id']
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
assert str(pending_res) == pending_id
def test_repr(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
assert repr(ok_res) == f"<AsyncResult: {self.task1['id']}>"
assert repr(ok2_res) == f"<AsyncResult: {self.task2['id']}>"
assert repr(nok_res) == f"<AsyncResult: {self.task3['id']}>"
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
assert repr(pending_res) == f'<AsyncResult: {pending_id}>'
def test_hash(self):
assert (hash(self.app.AsyncResult('x0w991')) ==
hash(self.app.AsyncResult('x0w991')))
assert (hash(self.app.AsyncResult('x0w991')) !=
hash(self.app.AsyncResult('x1w991')))
def test_get_traceback(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
assert not ok_res.traceback
assert nok_res.traceback
assert nok_res2.traceback
pending_res = self.app.AsyncResult(uuid())
assert not pending_res.traceback
def test_get__backend_gives_None(self):
res = self.app.AsyncResult(self.task1['id'])
res.backend.wait_for = Mock(name='wait_for')
res.backend.wait_for.return_value = None
assert res.get() is None
def test_get(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok2_res = self.app.AsyncResult(self.task4['id'])
none_res = self.app.AsyncResult(self.task6['id'])
callback = Mock(name='callback')
assert ok_res.get(callback=callback) == 'the'
callback.assert_called_with(ok_res.id, 'the')
assert ok2_res.get() == 'quick'
with pytest.raises(KeyError):
nok_res.get()
assert nok_res.get(propagate=False)
assert isinstance(nok2_res.result, KeyError)
assert ok_res.info == 'the'
assert none_res.get() is None
assert none_res.state == states.SUCCESS
def test_get_when_ignored(self):
result = self.app.AsyncResult(uuid())
result.ignored = True
# Does not block
assert result.get() is None
def test_eq_ne(self):
r1 = self.app.AsyncResult(self.task1['id'])
r2 = self.app.AsyncResult(self.task1['id'])
r3 = self.app.AsyncResult(self.task2['id'])
assert r1 == r2
assert r1 != r3
assert r1 == r2.id
assert r1 != r3.id
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce_restore(self):
r1 = self.app.AsyncResult(self.task1['id'])
fun, args = r1.__reduce__()
assert fun(*args) == r1
def test_get_timeout(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with pytest.raises(TimeoutError):
res.get(timeout=0.001)
pending_res = self.app.AsyncResult(uuid())
with patch('celery.result.time') as _time:
with pytest.raises(TimeoutError):
pending_res.get(timeout=0.001, interval=0.001)
_time.sleep.assert_called_with(0.001)
def test_get_timeout_longer(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with patch('celery.result.time') as _time:
with pytest.raises(TimeoutError):
res.get(timeout=1, interval=1)
_time.sleep.assert_called_with(1)
def test_ready(self):
oks = (self.app.AsyncResult(self.task1['id']),
self.app.AsyncResult(self.task2['id']),
self.app.AsyncResult(self.task3['id']))
assert all(result.ready() for result in oks)
assert not self.app.AsyncResult(self.task4['id']).ready()
assert not self.app.AsyncResult(uuid()).ready()
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="Mocking here doesn't play well with PyPy",
)
def test_del(self):
with patch('celery.result.AsyncResult.backend') as backend:
result = self.app.AsyncResult(self.task1['id'])
result.backend = backend
result_clone = copy.copy(result)
del result
backend.remove_pending_result.assert_called_once_with(
result_clone
)
result = self.app.AsyncResult(self.task1['id'])
result.backend = None
del result
def test_get_request_meta(self):
x = self.app.AsyncResult('1')
request = Context(
task='foo',
children=None,
args=['one', 'two'],
kwargs={'kwarg1': 'three'},
hostname="foo",
retries=1,
delivery_info={'routing_key': 'celery'}
)
x.backend.store_result(task_id="1", result='foo', state=states.SUCCESS,
traceback=None, request=request)
assert x.name == 'foo'
assert x.args == ['one', 'two']
assert x.kwargs == {'kwarg1': 'three'}
assert x.worker == 'foo'
assert x.retries == 1
assert x.queue == 'celery'
assert isinstance(x.date_done, datetime.datetime)
assert x.task_id == "1"
assert x.state == "SUCCESS"
result = self.app.AsyncResult(self.task4['id'])
assert result.date_done is None
@patch('celery.app.base.to_utc')
@pytest.mark.parametrize('timezone, date', [
("UTC", "2024-08-24T00:00:00+00:00"),
("America/Los_Angeles", "2024-08-23T17:00:00-07:00"),
("Pacific/Kwajalein", "2024-08-24T12:00:00+12:00"),
("Europe/Berlin", "2024-08-24T02:00:00+02:00"),
])
def test_date_done(self, utc_datetime_mock, timezone, date):
utc_datetime_mock.return_value = datetime.datetime(2024, 8, 24, 0, 0, 0, 0, datetime.timezone.utc)
self.app.conf.timezone = timezone
del self.app.timezone # reset cached timezone
result = Backend(app=self.app)._get_result_meta(None, states.SUCCESS, None, None)
assert result.get('date_done') == date
def test_forget_remove_pending_result(self):
with patch('celery.result.AsyncResult.backend') as backend:
result = self.app.AsyncResult(self.task1['id'])
result.backend = backend
result_clone = copy.copy(result)
result.forget()
backend.remove_pending_result.assert_called_once_with(
result_clone
)
result = self.app.AsyncResult(self.task1['id'])
result.backend = None
del result
| test_AsyncResult |
python | pytorch__pytorch | test/distributed/test_composability.py | {
"start": 1896,
"end": 2863
} | class ____(torch.nn.Module):
def __init__(self, d_hid: int):
super().__init__()
self.net1 = nn.Linear(d_hid, d_hid)
self.net2 = nn.Linear(d_hid, d_hid)
self.net3 = nn.Linear(d_hid, d_hid * 2)
self.init_weights()
def init_weights(self):
torch.nn.init.kaiming_uniform_(
self.net1.weight, mode="fan_in", nonlinearity="relu"
)
torch.nn.init.kaiming_uniform_(
self.net2.weight, mode="fan_in", nonlinearity="relu"
)
torch.nn.init.kaiming_uniform_(
self.net3.weight, mode="fan_in", nonlinearity="relu"
)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
def loss_fn(y, target, scale=1e-4):
# Scale the loss to simulate a small learning rate and avoid exploding grads
return torch.nn.functional.cross_entropy(y, target) * scale
| MLPModuleEven |
python | fluentpython__example-code | 06-dp-1class-func/strategy.py | {
"start": 1300,
"end": 2623
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self) # <1>
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
# <2>
def fidelity_promo(order): # <3>
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# END STRATEGY
| Order |
python | justquick__django-activity-stream | actstream/feeds.py | {
"start": 10897,
"end": 11161
} | class ____(ObjectActivityFeed):
"""
Atom feed of Activity for a given object (where actions involve the given object as any of the entities).
"""
feed_type = ActivityStreamsAtomFeed
subtitle = ObjectActivityFeed.description
| AtomObjectActivityFeed |
python | wandb__wandb | wandb/sdk/data_types/html.py | {
"start": 397,
"end": 4900
} | class ____(BatchableMedia):
"""W&B class for logging HTML content to W&B."""
_log_type = "html-file"
def __init__(
self,
data: Union[str, pathlib.Path, "TextIO"],
inject: bool = True,
data_is_not_path: bool = False,
) -> None:
"""Creates a W&B HTML object.
Args:
data:
A string that is a path to a file with the extension ".html",
or a string or IO object containing literal HTML.
inject: Add a stylesheet to the HTML object. If set
to False the HTML will pass through unchanged.
data_is_not_path: If set to False, the data will be
treated as a path to a file.
Examples:
It can be initialized by providing a path to a file:
```python
with wandb.init() as run:
run.log({"html": wandb.Html("./index.html")})
```
Alternatively, it can be initialized by providing literal HTML,
in either a string or IO object:
```python
with wandb.init() as run:
run.log({"html": wandb.Html("<h1>Hello, world!</h1>")})
```
"""
super().__init__()
data_is_path = (
isinstance(data, (str, pathlib.Path))
and os.path.isfile(data)
and os.path.splitext(data)[1] == ".html"
) and not data_is_not_path
data_path = ""
if data_is_path:
data_path = str(data)
with open(data_path, encoding="utf-8") as file:
self.html = file.read()
elif isinstance(data, str):
self.html = data
elif hasattr(data, "read"):
if hasattr(data, "seek"):
data.seek(0)
self.html = data.read()
else:
raise ValueError("data must be a string or an io object")
if inject:
self.inject_head()
if inject or not data_is_path:
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".html")
with open(tmp_path, "w", encoding="utf-8") as out:
out.write(self.html)
self._set_file(tmp_path, is_tmp=True)
else:
self._set_file(data_path, is_tmp=False)
def inject_head(self) -> None:
"""Inject a <head> tag into the HTML.
<!-- lazydoc-ignore: internal -->
"""
join = ""
if "<head>" in self.html:
parts = self.html.split("<head>", 1)
parts[0] = parts[0] + "<head>"
elif "<html>" in self.html:
parts = self.html.split("<html>", 1)
parts[0] = parts[0] + "<html><head>"
parts[1] = "</head>" + parts[1]
else:
parts = ["", self.html]
parts.insert(
1,
'<base target="_blank"><link rel="stylesheet" type="text/css" href="https://app.wandb.ai/normalize.css" />',
)
self.html = join.join(parts).strip()
@classmethod
def get_media_subdir(cls: Type["Html"]) -> str:
"""Get media subdirectory.
"<!-- lazydoc-ignore-classmethod: internal -->
"""
return os.path.join("media", "html")
def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
"""Returns the JSON representation expected by the backend.
<!-- lazydoc-ignore: internal -->
"""
json_dict = super().to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
@classmethod
def from_json(
cls: Type["Html"], json_obj: dict, source_artifact: "Artifact"
) -> "Html":
"""Deserialize a JSON object into it's class representation.
"<!-- lazydoc-ignore-classmethod: internal -->
"""
return cls(source_artifact.get_entry(json_obj["path"]).download(), inject=False)
@classmethod
def seq_to_json(
cls: Type["Html"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""Convert a sequence of HTML objects to a JSON representation.
"<!-- lazydoc-ignore-classmethod: internal -->
"""
base_path = os.path.join(run.dir, cls.get_media_subdir())
filesystem.mkdir_exists_ok(base_path)
meta = {
"_type": "html",
"count": len(seq),
"html": [h.to_json(run) for h in seq],
}
return meta
| Html |
python | django__django | tests/middleware_exceptions/tests.py | {
"start": 9054,
"end": 13186
} | class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.PaymentMiddleware",
]
)
def test_sync_middleware(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(response.status_code, 402)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.DecoratedPaymentMiddleware",
]
)
def test_sync_decorated_middleware(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(response.status_code, 402)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.async_payment_middleware",
]
)
def test_async_middleware(self):
with self.assertLogs("django.request", "DEBUG") as cm:
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(response.status_code, 402)
self.assertEqual(
cm.records[0].getMessage(),
"Synchronous handler adapted for middleware "
"middleware_exceptions.middleware.async_payment_middleware.",
)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.NotSyncOrAsyncMiddleware",
]
)
def test_not_sync_or_async_middleware(self):
msg = (
"Middleware "
"middleware_exceptions.middleware.NotSyncOrAsyncMiddleware must "
"have at least one of sync_capable/async_capable set to True."
)
with self.assertRaisesMessage(RuntimeError, msg):
self.client.get("/middleware_exceptions/view/")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.PaymentMiddleware",
]
)
async def test_sync_middleware_async(self):
with self.assertLogs("django.request", "DEBUG") as cm:
response = await self.async_client.get("/middleware_exceptions/view/")
self.assertEqual(response.status_code, 402)
self.assertEqual(
cm.records[0].getMessage(),
"Asynchronous handler adapted for middleware "
"middleware_exceptions.middleware.PaymentMiddleware.",
)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.async_payment_middleware",
]
)
async def test_async_middleware_async(self):
with self.assertLogs("django.request", "WARNING") as cm:
response = await self.async_client.get("/middleware_exceptions/view/")
self.assertEqual(response.status_code, 402)
self.assertEqual(
cm.records[0].getMessage(),
"Payment Required: /middleware_exceptions/view/",
)
@override_settings(
DEBUG=False,
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncNoTemplateResponseMiddleware",
],
)
def test_async_process_template_response_returns_none_with_sync_client(self):
msg = (
"AsyncNoTemplateResponseMiddleware.process_template_response "
"didn't return an HttpResponse object."
)
with self.assertRaisesMessage(ValueError, msg):
self.client.get("/middleware_exceptions/template_response/")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.SyncAndAsyncMiddleware",
]
)
async def test_async_and_sync_middleware_async_call(self):
response = await self.async_client.get("/middleware_exceptions/view/")
self.assertEqual(response.content, b"OK")
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.SyncAndAsyncMiddleware",
]
)
def test_async_and_sync_middleware_sync_call(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(response.content, b"OK")
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="middleware_exceptions.urls")
| MiddlewareSyncAsyncTests |
python | django__django | django/core/validators.py | {
"start": 12685,
"end": 13747
} | class ____:
message = _("Ensure this value is %(limit_value)s (it is %(show_value)s).")
code = "limit_value"
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
limit_value = (
self.limit_value() if callable(self.limit_value) else self.limit_value
)
params = {"limit_value": limit_value, "show_value": cleaned, "value": value}
if self.compare(cleaned, limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.limit_value == other.limit_value
and self.message == other.message
and self.code == other.code
)
def compare(self, a, b):
return a is not b
def clean(self, x):
return x
@deconstructible
| BaseValidator |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_bedrock.py | {
"start": 989,
"end": 1632
} | class ____:
@pytest.mark.parametrize(
("test_hook", "service_name"),
[
pytest.param(BedrockHook(), "bedrock", id="bedrock"),
pytest.param(BedrockRuntimeHook(), "bedrock-runtime", id="bedrock-runtime"),
pytest.param(BedrockAgentHook(), "bedrock-agent", id="bedrock-agent"),
pytest.param(BedrockAgentRuntimeHook(), "bedrock-agent-runtime", id="bedrock-agent-runtime"),
],
)
def test_bedrock_hooks(self, test_hook, service_name):
assert test_hook.conn is not None
assert test_hook.conn.meta.service_model.service_name == service_name
| TestBedrockHooks |
python | prabhupant__python-ds | algorithms/dynamic_programming/Z_Algorithm.py | {
"start": 0,
"end": 975
} | class ____():
"""
return all occurences of string s in text, returns its indexes starting from zero
delimeter should be charachter which will not occur neither is S nor in text
by default its '$'
"""
@staticmethod
def find_occurrences(s:str , text:str , delimeter = '$'):
return Z_Algorithm.z_function(s + delimeter + text , len(s))
@staticmethod
def z_function(text:str , size:int):
l = 0
r = 0
z = [0] * len(text)
for i in range(1 , len(text)):
if i <= r:
z[i] = min(r - i + 1 , z[i - l])
while i + z[i] < len(text) and text[z[i]] == text[i + z[i]]:
z[i] += 1
if i + z[i] - 1 > r:
l = i
r = i + z[i] - 1
res = []
for i in range(size , len(text)):
if z[i] == size:
res.append(i - size - 1)
return res | Z_Algorithm |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/temporal_context.py | {
"start": 104,
"end": 1286
} | class ____:
"""TemporalContext represents an effective time, used for business logic, and last_event_id
which is used to identify that state of the event log at some point in time. Put another way,
the value of a TemporalContext represents a point in time and a snapshot of the event log.
Effective time: This is the effective time of the computation in terms of business logic,
and it impacts the behavior of partitioning and partition mapping. For example,
the "last" partition window of a given partitions definition, it is with
respect to the effective time.
Last event id: Our event log has a monotonically increasing event id. This is used to
cursor the event log. This event_id is also propogated to derived tables to indicate
when that record is valid. This allows us to query the state of the event log
at a given point in time.
Note that insertion time of the last_event_id is not the same as the effective time.
A last_event_id of None indicates that the reads will be volatile and will immediately
reflect any subsequent writes.
"""
effective_dt: datetime
last_event_id: Optional[int]
| TemporalContext |
python | mlflow__mlflow | mlflow/genai/datasets/databricks_evaluation_dataset_source.py | {
"start": 79,
"end": 2407
} | class ____(DatasetSource):
"""
Represents a Databricks Evaluation Dataset source.
This source is used for datasets managed by the Databricks agents SDK.
"""
def __init__(self, table_name: str, dataset_id: str):
"""
Args:
table_name: The three-level UC table name of the dataset
dataset_id: The unique identifier of the dataset
"""
self._table_name = table_name
self._dataset_id = dataset_id
@property
def table_name(self) -> str:
"""The UC table name of the dataset."""
return self._table_name
@property
def dataset_id(self) -> str:
"""The unique identifier of the dataset."""
return self._dataset_id
@staticmethod
def _get_source_type() -> str:
return "databricks_evaluation_dataset"
def load(self, **kwargs) -> Any:
"""
Loads the dataset from the source.
This method is not implemented as the dataset should be loaded through
the databricks.agents.datasets API.
"""
raise NotImplementedError(
"Loading a Databricks Evaluation Dataset from source is not supported"
)
@staticmethod
def _can_resolve(raw_source: dict[str, Any]) -> bool:
"""
Determines whether the source can be resolved from a dictionary representation.
"""
# Resolution from a dictionary representation is not supported for Databricks Evaluation
# Datasets
return False
@classmethod
def _resolve(cls, raw_source: dict[str, Any]):
"""
Resolves the source from a dictionary representation.
"""
raise NotImplementedError("Resolution from a source dictionary is not supported")
def to_dict(self) -> dict[str, Any]:
"""
Returns a dictionary representation of the source.
"""
return {
"table_name": self._table_name,
"dataset_id": self._dataset_id,
}
@classmethod
def from_dict(cls, source_dict: dict[str, Any]) -> "DatabricksEvaluationDatasetSource":
"""
Creates an instance from a dictionary representation.
"""
return cls(table_name=source_dict["table_name"], dataset_id=source_dict["dataset_id"])
| DatabricksEvaluationDatasetSource |
python | realpython__materials | python-property/product.py | {
"start": 0,
"end": 187
} | class ____:
def __init__(self, name, price):
self._name = name
self._price = float(price)
@property
def price(self):
return f"${self._price:,.2f}"
| Product |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_api.py | {
"start": 4425,
"end": 4608
} | class ____:
"""
This base class represents the policy of no offloading and is only used as
the default value for the ``offload_policy`` arg.
"""
@dataclass
| OffloadPolicy |
python | getsentry__sentry | src/sentry/prevent/migrations/0001_create_prevent_ai_configuration.py | {
"start": 325,
"end": 2749
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
initial = True
dependencies = [
("sentry", "0999_add_extrapolation_mode_to_snuba_query"),
]
operations = [
migrations.CreateModel(
name="PreventAIConfiguration",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"integration_id",
sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey(
"sentry.Integration", db_index=True, null=True, on_delete="CASCADE"
),
),
("data", models.JSONField(default=dict)),
(
"organization",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.organization"
),
),
],
options={
"db_table": "prevent_ai_configuration",
"unique_together": {("organization_id", "integration_id")},
},
),
]
| Migration |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/data_lake.py | {
"start": 1689,
"end": 10320
} | class ____(BaseHook):
"""
Integration with Azure Data Lake.
AzureDataLakeHook communicates via a REST API compatible with WebHDFS. Make
sure that a Airflow connection of type ``azure_data_lake`` exists.
Authorization can be done by supplying a *login* (=Client ID), *password*
(=Client Secret), and extra fields *tenant* (Tenant) and *account_name*
(Account Name). See connection ``azure_data_lake_default`` for an example.
Client ID and secret should be in user and password parameters.
Tenant and account name should be extra field as
``{"tenant": "<TENANT>", "account_name": "ACCOUNT_NAME"}``.
:param azure_data_lake_conn_id: Reference to
:ref:`Azure Data Lake connection<howto/connection:adl>`.
"""
conn_name_attr = "azure_data_lake_conn_id"
default_conn_name = "azure_data_lake_default"
conn_type = "azure_data_lake"
hook_name = "Azure Data Lake"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"tenant": StringField(lazy_gettext("Azure Tenant ID"), widget=BS3TextFieldWidget()),
"account_name": StringField(
lazy_gettext("Azure DataLake Store Name"), widget=BS3TextFieldWidget()
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Azure Client ID",
"password": "Azure Client Secret",
},
"placeholders": {
"login": "client id",
"password": "secret",
"tenant": "tenant id",
"account_name": "datalake store",
},
}
def __init__(self, azure_data_lake_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_data_lake_conn_id
self._conn: core.AzureDLFileSystem | None = None
self.account_name: str | None = None
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_conn(self) -> core.AzureDLFileSystem:
"""Return a AzureDLFileSystem object."""
if not self._conn:
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
self.account_name = self._get_field(extras, "account_name")
credential: Credentials
tenant = self._get_field(extras, "tenant")
if tenant:
credential = lib.auth(tenant_id=tenant, client_secret=conn.password, client_id=conn.login)
else:
managed_identity_client_id = self._get_field(extras, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extras, "workload_identity_tenant_id")
credential = AzureIdentityCredentialAdapter(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
self._conn = core.AzureDLFileSystem(credential, store_name=self.account_name)
self._conn.connect()
return self._conn
def check_for_file(self, file_path: str) -> bool:
"""
Check if a file exists on Azure Data Lake.
:param file_path: Path and name of the file.
:return: True if the file exists, False otherwise.
"""
try:
files = self.get_conn().glob(file_path, details=False, invalidate_cache=True)
return len(files) == 1
except FileNotFoundError:
return False
def upload_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""
Upload a file to Azure Data Lake.
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported.
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within.
:param nthreads: Number of threads to use. If None, uses the number of cores.
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
"""
multithread.ADLUploader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def download_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""
Download a file from Azure Blob Storage.
:param local_path: local path. If downloading a single file, will write to this
specific file, unless it is an existing directory, in which case a file is
created within it. If downloading multiple files, this is the root
directory to write within. Will create directories as required.
:param remote_path: remote path/globstring to use to find remote files.
Recursive glob patterns using `**` are not supported.
:param nthreads: Number of threads to use. If None, uses the number of cores.
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
"""
multithread.ADLDownloader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def list(self, path: str) -> list:
"""
List files in Azure Data Lake Storage.
:param path: full path/globstring to use to list files in ADLS
"""
if "*" in path:
return self.get_conn().glob(path)
return self.get_conn().walk(path)
def remove(self, path: str, recursive: bool = False, ignore_not_found: bool = True) -> None:
"""
Remove files in Azure Data Lake Storage.
:param path: A directory or file to remove in ADLS
:param recursive: Whether to loop into directories in the location and remove the files
:param ignore_not_found: Whether to raise error if file to delete is not found
"""
try:
self.get_conn().remove(path=path, recursive=recursive)
except FileNotFoundError:
if ignore_not_found:
self.log.info("File %s not found", path)
else:
raise AirflowException(f"File {path} not found")
| AzureDataLakeHook |
python | simonw__datasette | datasette/permissions.py | {
"start": 6352,
"end": 6734
} | class ____:
name: str
abbr: str | None
description: str | None
takes_database: bool
takes_resource: bool
default: bool
# This is deliberately undocumented: it's considered an internal
# implementation detail for view-table/view-database and should
# not be used by plugins as it may change in the future.
implies_can_view: bool = False
| Permission |
python | Textualize__textual | tests/test_path.py | {
"start": 432,
"end": 1095
} | class ____(App[None]):
CSS_PATH = ["test.tcss", Path("/another/path.tcss")]
@pytest.mark.parametrize(
"app_class,expected_css_path_attribute",
[
(RelativePathObjectApp, [APP_DIR / "test.tcss"]),
(RelativePathStrApp, [APP_DIR / "test.tcss"]),
(AbsolutePathObjectApp, [Path("/tmp/test.tcss")]),
(AbsolutePathStrApp, [Path("/tmp/test.tcss")]),
(ListPathApp, [APP_DIR / "test.tcss", Path("/another/path.tcss")]),
],
)
def test_css_paths_of_various_types(app_class, expected_css_path_attribute):
app = app_class()
assert app.css_path == [path.absolute() for path in expected_css_path_attribute]
| ListPathApp |
python | matplotlib__matplotlib | lib/matplotlib/widgets.py | {
"start": 75749,
"end": 89152
} | class ____(AxesWidget):
def __init__(self, ax, onselect=None, useblit=False, button=None,
state_modifier_keys=None, use_data_coordinates=False):
super().__init__(ax)
self._visible = True
if onselect is None:
self.onselect = lambda *args: None
else:
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.connect_default_events()
self._state_modifier_keys = dict(move=' ', clear='escape',
square='shift', center='control',
rotate='r')
self._state_modifier_keys.update(state_modifier_keys or {})
self._use_data_coordinates = use_data_coordinates
self.background = None
if isinstance(button, Integral):
self.validButtons = [button]
else:
self.validButtons = button
# Set to True when a selection is completed, otherwise is False
self._selection_completed = False
# will save the data (position at mouseclick)
self._eventpress = None
# will save the data (pos. at mouserelease)
self._eventrelease = None
self._prev_event = None
self._state = set()
def set_active(self, active):
super().set_active(active)
if active:
self.update_background(None)
def _get_animated_artists(self):
"""
Convenience method to get all animated artists of the figure containing
this widget, excluding those already present in self.artists.
The returned tuple is not sorted by 'z_order': z_order sorting is
valid only when considering all artists and not only a subset of all
artists.
"""
return tuple(a for ax_ in self.ax.get_figure().get_axes()
for a in ax_.get_children()
if a.get_animated() and a not in self.artists)
def update_background(self, event):
"""Force an update of the background."""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if not self.useblit:
return
if self.canvas.is_saving():
return # saving does not use blitting
# Make sure that widget artists don't get accidentally included in the
# background, by re-rendering the background if needed (and then
# re-re-rendering the canvas with the visible widget artists).
# We need to remove all artists which will be drawn when updating
# the selector: if we have animated artists in the figure, it is safer
# to redrawn by default, in case they have updated by the callback
# zorder needs to be respected when redrawing
artists = sorted(self.artists + self._get_animated_artists(),
key=lambda a: a.get_zorder())
needs_redraw = any(artist.get_visible() for artist in artists)
with ExitStack() as stack:
if needs_redraw:
for artist in artists:
stack.enter_context(artist._cm_set(visible=False))
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
if needs_redraw:
for artist in artists:
self.ax.draw_artist(artist)
def connect_default_events(self):
"""Connect the major canvas events to methods."""
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.connect_event('key_press_event', self.on_key_press)
self.connect_event('key_release_event', self.on_key_release)
self.connect_event('scroll_event', self.on_scroll)
def ignore(self, event):
# docstring inherited
if super().ignore(event):
return True
if not self.ax.get_visible():
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
if not hasattr(event, 'button'):
event.button = None
# Only do rectangle selection if event was triggered
# with a desired button
if (self.validButtons is not None
and event.button not in self.validButtons):
return True
# If no button was pressed yet ignore the event if it was out of the Axes.
if self._eventpress is None:
return not self.ax.contains(event)[0]
# If a button was pressed, check if the release-button is the same.
if event.button == self._eventpress.button:
return False
# If a button was pressed, check if the release-button is the same.
return (not self.ax.contains(event)[0] or
event.button != self._eventpress.button)
def update(self):
"""Draw using blit() or draw_idle(), depending on ``self.useblit``."""
if (not self.ax.get_visible() or
self.ax.get_figure(root=True)._get_renderer() is None):
return
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
else:
self.update_background(None)
# We need to draw all artists, which are not included in the
# background, therefore we also draw self._get_animated_artists()
# and we make sure that we respect z_order
artists = sorted(self.artists + self._get_animated_artists(),
key=lambda a: a.get_zorder())
for artist in artists:
self.ax.draw_artist(artist)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _get_data(self, event):
"""Get the xdata and ydata for event, with limits."""
if event.xdata is None:
return None, None
xdata, ydata = self._get_data_coords(event)
xdata = np.clip(xdata, *self.ax.get_xbound())
ydata = np.clip(ydata, *self.ax.get_ybound())
return xdata, ydata
def _clean_event(self, event):
"""
Preprocess an event:
- Replace *event* by the previous event if *event* has no ``xdata``.
- Get ``xdata`` and ``ydata`` from this widget's Axes, and clip them to the axes
limits.
- Update the previous event.
"""
if event.xdata is None:
event = self._prev_event
else:
event = copy.copy(event)
event.xdata, event.ydata = self._get_data(event)
self._prev_event = event
return event
def press(self, event):
"""Button press handler and validator."""
if not self.ignore(event):
event = self._clean_event(event)
self._eventpress = event
self._prev_event = event
key = event.key or ''
key = key.replace('ctrl', 'control')
# move state is locked in on a button press
if key == self._state_modifier_keys['move']:
self._state.add('move')
self._press(event)
return True
return False
def _press(self, event):
"""Button press event handler."""
def release(self, event):
"""Button release event handler and validator."""
if not self.ignore(event) and self._eventpress:
event = self._clean_event(event)
self._eventrelease = event
self._release(event)
self._eventpress = None
self._eventrelease = None
self._state.discard('move')
return True
return False
def _release(self, event):
"""Button release event handler."""
def onmove(self, event):
"""Cursor move event handler and validator."""
if not self.ignore(event) and self._eventpress:
event = self._clean_event(event)
self._onmove(event)
return True
return False
def _onmove(self, event):
"""Cursor move event handler."""
def on_scroll(self, event):
"""Mouse scroll event handler and validator."""
if not self.ignore(event):
self._on_scroll(event)
def _on_scroll(self, event):
"""Mouse scroll event handler."""
def on_key_press(self, event):
"""Key press event handler and validator for all selection widgets."""
if self.active:
key = event.key or ''
key = key.replace('ctrl', 'control')
if key == self._state_modifier_keys['clear']:
self.clear()
return
for (state, modifier) in self._state_modifier_keys.items():
if modifier in key.split('+'):
# 'rotate' is changing _state on press and is not removed
# from _state when releasing
if state == 'rotate':
if state in self._state:
self._state.discard(state)
else:
self._state.add(state)
else:
self._state.add(state)
self._on_key_press(event)
def _on_key_press(self, event):
"""Key press event handler - for widget-specific key press actions."""
def on_key_release(self, event):
"""Key release event handler and validator."""
if self.active:
key = event.key or ''
for (state, modifier) in self._state_modifier_keys.items():
# 'rotate' is changing _state on press and is not removed
# from _state when releasing
if modifier in key.split('+') and state != 'rotate':
self._state.discard(state)
self._on_key_release(event)
def _on_key_release(self, event):
"""Key release event handler."""
def set_visible(self, visible):
"""Set the visibility of the selector artists."""
self._visible = visible
for artist in self.artists:
artist.set_visible(visible)
def get_visible(self):
"""Get the visibility of the selector artists."""
return self._visible
def clear(self):
"""Clear the selection and set the selector ready to make a new one."""
self._clear_without_update()
self.update()
def _clear_without_update(self):
self._selection_completed = False
self.set_visible(False)
@property
def artists(self):
"""Tuple of the artists of the selector."""
handles_artists = getattr(self, '_handles_artists', ())
return (self._selection_artist,) + handles_artists
def set_props(self, **props):
"""
Set the properties of the selector artist.
See the *props* argument in the selector docstring to know which properties are
supported.
"""
artist = self._selection_artist
props = cbook.normalize_kwargs(props, artist)
artist.set(**props)
if self.useblit:
self.update()
def set_handle_props(self, **handle_props):
"""
Set the properties of the handles selector artist. See the
`handle_props` argument in the selector docstring to know which
properties are supported.
"""
if not hasattr(self, '_handles_artists'):
raise NotImplementedError("This selector doesn't have handles.")
artist = self._handles_artists[0]
handle_props = cbook.normalize_kwargs(handle_props, artist)
for handle in self._handles_artists:
handle.set(**handle_props)
if self.useblit:
self.update()
self._handle_props.update(handle_props)
def _validate_state(self, state):
supported_state = [
key for key, value in self._state_modifier_keys.items()
if key != 'clear' and value != 'not-applicable'
]
_api.check_in_list(supported_state, state=state)
def add_state(self, state):
"""
Add a state to define the widget's behavior. See the
`state_modifier_keys` parameters for details.
Parameters
----------
state : str
Must be a supported state of the selector. See the
`state_modifier_keys` parameters for details.
Raises
------
ValueError
When the state is not supported by the selector.
"""
self._validate_state(state)
self._state.add(state)
def remove_state(self, state):
"""
Remove a state to define the widget's behavior. See the
`state_modifier_keys` parameters for details.
Parameters
----------
state : str
Must be a supported state of the selector. See the
`state_modifier_keys` parameters for details.
Raises
------
ValueError
When the state is not supported by the selector.
"""
self._validate_state(state)
self._state.remove(state)
| _SelectorWidget |
python | ray-project__ray | python/ray/experimental/shuffle.py | {
"start": 2278,
"end": 3098
} | class ____(ObjectStoreWriter):
def __init__(self):
self.results = []
def add(self, item: InType) -> None:
self.results.append(item)
def finish(self) -> List[Any]:
return self.results
def round_robin_partitioner(
input_stream: Iterable[InType], num_partitions: int
) -> Iterable[Tuple[PartitionID, InType]]:
"""Round robin partitions items from the input reader.
You can write custom partitioning functions for your use case.
Args:
input_stream: Iterator over items from the input reader.
num_partitions: Number of output partitions.
Yields:
Tuples of (partition id, input item).
"""
i = 0
for item in input_stream:
yield (i, item)
i += 1
i %= num_partitions
@ray.remote
| ObjectStoreWriterNonStreaming |
python | openai__openai-python | src/openai/types/beta/chatkit/session_create_params.py | {
"start": 514,
"end": 1274
} | class ____(TypedDict, total=False):
user: Required[str]
"""
A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
"""
workflow: Required[ChatSessionWorkflowParam]
"""Workflow that powers the session."""
chatkit_configuration: ChatSessionChatKitConfigurationParam
"""Optional overrides for ChatKit runtime configuration features"""
expires_after: ChatSessionExpiresAfterParam
"""Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
"""
rate_limits: ChatSessionRateLimitsParam
"""Optional override for per-minute request limits. When omitted, defaults to 10."""
| SessionCreateParams |
python | scipy__scipy | scipy/optimize/_shgo_lib/_complex.py | {
"start": 233,
"end": 50263
} | class ____:
"""
Base class for a simplicial complex described as a cache of vertices
together with their connections.
Important methods:
Domain triangulation:
Complex.triangulate, Complex.split_generation
Triangulating arbitrary points (must be traingulable,
may exist outside domain):
Complex.triangulate(sample_set)
Converting another simplicial complex structure data type to the
structure used in Complex (ex. OBJ wavefront)
Complex.convert(datatype, data)
Important objects:
HC.V: The cache of vertices and their connection
HC.H: Storage structure of all vertex groups
Parameters
----------
dim : int
Spatial dimensionality of the complex R^dim
domain : list of tuples, optional
The bounds [x_l, x_u]^dim of the hyperrectangle space
ex. The default domain is the hyperrectangle [0, 1]^dim
Note: The domain must be convex, non-convex spaces can be cut
away from this domain using the non-linear
g_cons functions to define any arbitrary domain
(these domains may also be disconnected from each other)
sfield :
A scalar function defined in the associated domain f: R^dim --> R
sfield_args : tuple
Additional arguments to be passed to `sfield`
vfield :
A scalar function defined in the associated domain
f: R^dim --> R^m
(for example a gradient function of the scalar field)
vfield_args : tuple
Additional arguments to be passed to vfield
symmetry : None or list
Specify if the objective function contains symmetric variables.
The search space (and therefore performance) is decreased by up to
O(n!) times in the fully symmetric case.
E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
x_6 are symmetric to x_4, this can be specified to the solver as:
symmetry = [0, # Variable 1
0, # symmetric to variable 1
0, # symmetric to variable 1
3, # Variable 4
3, # symmetric to variable 4
3, # symmetric to variable 4
]
constraints : dict or sequence of dict, optional
Constraints definition.
Function(s) ``R**n`` in the form::
g(x) <= 0 applied as g : R^n -> R^m
h(x) == 0 applied as h : R^n -> R^p
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be
non-negative.constraints : dict or sequence of dict, optional
Constraints definition.
Function(s) ``R**n`` in the form::
g(x) <= 0 applied as g : R^n -> R^m
h(x) == 0 applied as h : R^n -> R^p
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (unused).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
workers : int optional
Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
functions in parallel.
"""
def __init__(self, dim, domain=None, sfield=None, sfield_args=(),
symmetry=None, constraints=None, workers=1):
self.dim = dim
# Domains
self.domain = domain
if domain is None:
self.bounds = [(0.0, 1.0), ] * dim
else:
self.bounds = domain
self.symmetry = symmetry
# here in init to avoid if checks
# Field functions
self.sfield = sfield
self.sfield_args = sfield_args
# Process constraints
# Constraints
# Process constraint dict sequence:
if constraints is not None:
self.min_cons = constraints
self.g_cons = []
self.g_args = []
if not isinstance(constraints, tuple | list):
constraints = (constraints,)
for cons in constraints:
if cons['type'] in ('ineq'):
self.g_cons.append(cons['fun'])
try:
self.g_args.append(cons['args'])
except KeyError:
self.g_args.append(())
self.g_cons = tuple(self.g_cons)
self.g_args = tuple(self.g_args)
else:
self.g_cons = None
self.g_args = None
# Homology properties
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# ex. the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is sub-generated it is removed from this list
self.H = [] # Storage structure of vertex groups
# Cache of all vertices
if (sfield is not None) or (self.g_cons is not None):
# Initiate a vertex cache and an associated field cache, note that
# the field case is always initiated inside the vertex cache if an
# associated field scalar field is defined:
if sfield is not None:
self.V = VertexCacheField(field=sfield, field_args=sfield_args,
g_cons=self.g_cons,
g_cons_args=self.g_args,
workers=workers)
elif self.g_cons is not None:
self.V = VertexCacheField(field=sfield, field_args=sfield_args,
g_cons=self.g_cons,
g_cons_args=self.g_args,
workers=workers)
else:
self.V = VertexCacheIndex()
self.V_non_symm = [] # List of non-symmetric vertices
self.split_edge = cache(self._split_edge)
def __call__(self):
return self.H
# %% Triangulation methods
def cyclic_product(self, bounds, origin, supremum, centroid=True):
"""Generate initial triangulation using cyclic product"""
# Define current hyperrectangle
vot = tuple(origin)
vut = tuple(supremum) # Hyperrectangle supremum
self.V[vot]
vo = self.V[vot]
yield vo.x
self.V[vut].connect(self.V[vot])
yield vut
# Cyclic group approach with second x_l --- x_u operation.
# These containers store the "lower" and "upper" vertices
# corresponding to the origin or supremum of every C2 group.
# It has the structure of `dim` times embedded lists each containing
# these vertices as the entire complex grows. Bounds[0] has to be done
# outside the loops before we have symmetric containers.
# NOTE: This means that bounds[0][1] must always exist
C0x = [[self.V[vot]]]
a_vo = copy.copy(list(origin))
a_vo[0] = vut[0] # Update aN Origin
a_vo = self.V[tuple(a_vo)]
# self.V[vot].connect(self.V[tuple(a_vo)])
self.V[vot].connect(a_vo)
yield a_vo.x
C1x = [[a_vo]]
# C1x = [[self.V[tuple(a_vo)]]]
ab_C = [] # Container for a + b operations
# Loop over remaining bounds
for i, x in enumerate(bounds[1:]):
# Update lower and upper containers
C0x.append([])
C1x.append([])
# try to access a second bound (if not, C1 is symmetric)
try:
# Early try so that we don't have to copy the cache before
# moving on to next C1/C2: Try to add the operation of a new
# C2 product by accessing the upper bound
x[1]
# Copy lists for iteration
cC0x = [x[:] for x in C0x[:i + 1]]
cC1x = [x[:] for x in C1x[:i + 1]]
for j, (VL, VU) in enumerate(zip(cC0x, cC1x)):
for k, (vl, vu) in enumerate(zip(VL, VU)):
# Build aN vertices for each lower-upper pair in N:
a_vl = list(vl.x)
a_vu = list(vu.x)
a_vl[i + 1] = vut[i + 1]
a_vu[i + 1] = vut[i + 1]
a_vl = self.V[tuple(a_vl)]
# Connect vertices in N to corresponding vertices
# in aN:
vl.connect(a_vl)
yield a_vl.x
a_vu = self.V[tuple(a_vu)]
# Connect vertices in N to corresponding vertices
# in aN:
vu.connect(a_vu)
# Connect new vertex pair in aN:
a_vl.connect(a_vu)
# Connect lower pair to upper (triangulation
# operation of a + b (two arbitrary operations):
vl.connect(a_vu)
ab_C.append((vl, a_vu))
# Update the containers
C0x[i + 1].append(vl)
C0x[i + 1].append(vu)
C1x[i + 1].append(a_vl)
C1x[i + 1].append(a_vu)
# Update old containers
C0x[j].append(a_vl)
C1x[j].append(a_vu)
# Yield new points
yield a_vu.x
# Try to connect aN lower source of previous a + b
# operation with a aN vertex
ab_Cc = copy.copy(ab_C)
for vp in ab_Cc:
b_v = list(vp[0].x)
ab_v = list(vp[1].x)
b_v[i + 1] = vut[i + 1]
ab_v[i + 1] = vut[i + 1]
b_v = self.V[tuple(b_v)] # b + vl
ab_v = self.V[tuple(ab_v)] # b + a_vl
# Note o---o is already connected
vp[0].connect(ab_v) # o-s
b_v.connect(ab_v) # s-s
# Add new list of cross pairs
ab_C.append((vp[0], ab_v))
ab_C.append((b_v, ab_v))
except IndexError:
cC0x = C0x[i]
cC1x = C1x[i]
VL, VU = cC0x, cC1x
for k, (vl, vu) in enumerate(zip(VL, VU)):
# Build aN vertices for each lower-upper pair in N:
a_vu = list(vu.x)
a_vu[i + 1] = vut[i + 1]
# Connect vertices in N to corresponding vertices
# in aN:
a_vu = self.V[tuple(a_vu)]
# Connect vertices in N to corresponding vertices
# in aN:
vu.connect(a_vu)
# Connect new vertex pair in aN:
# a_vl.connect(a_vu)
# Connect lower pair to upper (triangulation
# operation of a + b (two arbitrary operations):
vl.connect(a_vu)
ab_C.append((vl, a_vu))
C0x[i + 1].append(vu)
C1x[i + 1].append(a_vu)
# Yield new points
a_vu.connect(self.V[vut])
yield a_vu.x
ab_Cc = copy.copy(ab_C)
for vp in ab_Cc:
if vp[1].x[i] == vut[i]:
ab_v = list(vp[1].x)
ab_v[i + 1] = vut[i + 1]
ab_v = self.V[tuple(ab_v)] # b + a_vl
# Note o---o is already connected
vp[0].connect(ab_v) # o-s
# Add new list of cross pairs
ab_C.append((vp[0], ab_v))
# Clean class trash
try:
del C0x
del cC0x
del C1x
del cC1x
del ab_C
del ab_Cc
except UnboundLocalError:
pass
# Extra yield to ensure that the triangulation is completed
if centroid:
vo = self.V[vot]
vs = self.V[vut]
# Disconnect the origin and supremum
vo.disconnect(vs)
# Build centroid
vc = self.split_edge(vot, vut)
for v in vo.nn:
v.connect(vc)
yield vc.x
return vc.x
else:
yield vut
return vut
def triangulate(self, n=None, symmetry=None, centroid=True,
printout=False):
"""
Triangulate the initial domain, if n is not None then a limited number
of points will be generated
Parameters
----------
n : int, Number of points to be sampled.
symmetry :
Ex. Dictionary/hashtable
f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
symmetry = symmetry[0]: 0, # Variable 1
symmetry[1]: 0, # symmetric to variable 1
symmetry[2]: 0, # symmetric to variable 1
symmetry[3]: 3, # Variable 4
symmetry[4]: 3, # symmetric to variable 4
symmetry[5]: 3, # symmetric to variable 4
}
centroid : bool, if True add a central point to the hypercube
printout : bool, if True print out results
NOTES:
------
Rather than using the combinatorial algorithm to connect vertices we
make the following observation:
The bound pairs are similar a C2 cyclic group and the structure is
formed using the cartesian product:
H = C2 x C2 x C2 ... x C2 (dim times)
So construct any normal subgroup N and consider H/N first, we connect
all vertices within N (ex. N is C2 (the first dimension), then we move
to a left coset aN (an operation moving around the defined H/N group by
for example moving from the lower bound in C2 (dimension 2) to the
higher bound in C2. During this operation connection all the vertices.
Now repeat the N connections. Note that these elements can be connected
in parallel.
"""
# Inherit class arguments
if symmetry is None:
symmetry = self.symmetry
# Build origin and supremum vectors
origin = [i[0] for i in self.bounds]
self.origin = origin
supremum = [i[1] for i in self.bounds]
self.supremum = supremum
if symmetry is None:
cbounds = self.bounds
else:
cbounds = copy.copy(self.bounds)
for i, j in enumerate(symmetry):
if i is not j:
# pop second entry on second symmetry vars
cbounds[i] = [self.bounds[symmetry[i]][0]]
# Sole (first) entry is the sup value and there is no
# origin:
cbounds[i] = [self.bounds[symmetry[i]][1]]
if (self.bounds[symmetry[i]] is not
self.bounds[symmetry[j]]):
logging.warning(f"Variable {i} was specified as "
f"symmetric to variable {j}, however"
f", the bounds {i} ="
f" {self.bounds[symmetry[i]]} and {j}"
f" ="
f" {self.bounds[symmetry[j]]} do not "
f"match, the mismatch was ignored in "
f"the initial triangulation.")
cbounds[i] = self.bounds[symmetry[j]]
if n is None:
# Build generator
self.cp = self.cyclic_product(cbounds, origin, supremum, centroid)
for i in self.cp:
i
try:
self.triangulated_vectors.append((tuple(self.origin),
tuple(self.supremum)))
except (AttributeError, KeyError):
self.triangulated_vectors = [(tuple(self.origin),
tuple(self.supremum))]
else:
# Check if generator already exists
try:
self.cp
except (AttributeError, KeyError):
self.cp = self.cyclic_product(cbounds, origin, supremum,
centroid)
try:
while len(self.V.cache) < n:
next(self.cp)
except StopIteration:
try:
self.triangulated_vectors.append((tuple(self.origin),
tuple(self.supremum)))
except (AttributeError, KeyError):
self.triangulated_vectors = [(tuple(self.origin),
tuple(self.supremum))]
if printout:
# for v in self.C0():
# v.print_out()
for v in self.V.cache:
self.V[v].print_out()
return
def refine(self, n=1):
if n is None:
try:
self.triangulated_vectors
self.refine_all()
return
except AttributeError as ae:
if str(ae) == "'Complex' object has no attribute " \
"'triangulated_vectors'":
self.triangulate(symmetry=self.symmetry)
return
else:
raise
nt = len(self.V.cache) + n # Target number of total vertices
# In the outer while loop we iterate until we have added an extra `n`
# vertices to the complex:
while len(self.V.cache) < nt: # while loop 1
try: # try 1
# Try to access triangulated_vectors, this should only be
# defined if an initial triangulation has already been
# performed:
self.triangulated_vectors
# Try a usual iteration of the current generator, if it
# does not exist or is exhausted then produce a new generator
try: # try 2
next(self.rls)
except (AttributeError, StopIteration, KeyError):
vp = self.triangulated_vectors[0]
self.rls = self.refine_local_space(*vp, bounds=self.bounds)
next(self.rls)
except (AttributeError, KeyError):
# If an initial triangulation has not been completed, then
# we start/continue the initial triangulation targeting `nt`
# vertices, if nt is greater than the initial number of
# vertices then the `refine` routine will move back to try 1.
self.triangulate(nt, self.symmetry)
return
def refine_all(self, centroids=True):
"""Refine the entire domain of the current complex."""
try:
self.triangulated_vectors
tvs = copy.copy(self.triangulated_vectors)
for i, vp in enumerate(tvs):
self.rls = self.refine_local_space(*vp, bounds=self.bounds)
for i in self.rls:
i
except AttributeError as ae:
if str(ae) == "'Complex' object has no attribute " \
"'triangulated_vectors'":
self.triangulate(symmetry=self.symmetry, centroid=centroids)
else:
raise
# This adds a centroid to every new sub-domain generated and defined
# by self.triangulated_vectors, in addition the vertices ! to complete
# the triangulation
return
def refine_local_space(self, origin, supremum, bounds, centroid=1):
# Copy for later removal
origin_c = copy.copy(origin)
supremum_c = copy.copy(supremum)
# Initiate local variables redefined in later inner `for` loop:
vl, vu, a_vu = None, None, None
# Change the vector orientation so that it is only increasing
s_ov = list(origin)
s_origin = list(origin)
s_sv = list(supremum)
s_supremum = list(supremum)
for i, vi in enumerate(s_origin):
if s_ov[i] > s_sv[i]:
s_origin[i] = s_sv[i]
s_supremum[i] = s_ov[i]
vot = tuple(s_origin)
vut = tuple(s_supremum) # Hyperrectangle supremum
vo = self.V[vot] # initiate if doesn't exist yet
vs = self.V[vut]
# Start by finding the old centroid of the new space:
vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
# Find set of extreme vertices in current local space
sup_set = copy.copy(vco.nn)
# Cyclic group approach with second x_l --- x_u operation.
# These containers store the "lower" and "upper" vertices
# corresponding to the origin or supremum of every C2 group.
# It has the structure of `dim` times embedded lists each containing
# these vertices as the entire complex grows. Bounds[0] has to be done
# outside the loops before we have symmetric containers.
# NOTE: This means that bounds[0][1] must always exist
a_vl = copy.copy(list(vot))
a_vl[0] = vut[0] # Update aN Origin
if tuple(a_vl) not in self.V.cache:
vo = self.V[vot] # initiate if doesn't exist yet
vs = self.V[vut]
# Start by finding the old centroid of the new space:
vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
# Find set of extreme vertices in current local space
sup_set = copy.copy(vco.nn)
a_vl = copy.copy(list(vot))
a_vl[0] = vut[0] # Update aN Origin
a_vl = self.V[tuple(a_vl)]
else:
a_vl = self.V[tuple(a_vl)]
c_v = self.split_edge(vo.x, a_vl.x)
c_v.connect(vco)
yield c_v.x
Cox = [[vo]]
Ccx = [[c_v]]
Cux = [[a_vl]]
ab_C = [] # Container for a + b operations
s_ab_C = [] # Container for symmetric a + b operations
# Loop over remaining bounds
for i, x in enumerate(bounds[1:]):
# Update lower and upper containers
Cox.append([])
Ccx.append([])
Cux.append([])
# try to access a second bound (if not, C1 is symmetric)
try:
t_a_vl = list(vot)
t_a_vl[i + 1] = vut[i + 1]
# New: lists are used anyway, so copy all
# %%
# Copy lists for iteration
cCox = [x[:] for x in Cox[:i + 1]]
cCcx = [x[:] for x in Ccx[:i + 1]]
cCux = [x[:] for x in Cux[:i + 1]]
# Try to connect aN lower source of previous a + b
# operation with a aN vertex
ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the
# (VL, VC, VU) for-loop, but we use the copy of the list in the
# ab_Cc for-loop.
s_ab_Cc = copy.copy(s_ab_C)
# Early try so that we don't have to copy the cache before
# moving on to next C1/C2: Try to add the operation of a new
# C2 product by accessing the upper bound
if tuple(t_a_vl) not in self.V.cache:
# Raise error to continue symmetric refine
raise IndexError
t_a_vu = list(vut)
t_a_vu[i + 1] = vut[i + 1]
if tuple(t_a_vu) not in self.V.cache:
# Raise error to continue symmetric refine:
raise IndexError
for vectors in s_ab_Cc:
# s_ab_C.append([c_vc, vl, vu, a_vu])
bc_vc = list(vectors[0].x)
b_vl = list(vectors[1].x)
b_vu = list(vectors[2].x)
ba_vu = list(vectors[3].x)
bc_vc[i + 1] = vut[i + 1]
b_vl[i + 1] = vut[i + 1]
b_vu[i + 1] = vut[i + 1]
ba_vu[i + 1] = vut[i + 1]
bc_vc = self.V[tuple(bc_vc)]
bc_vc.connect(vco) # NOTE: Unneeded?
yield bc_vc
# Split to centre, call this centre group "d = 0.5*a"
d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
d_bc_vc.connect(bc_vc)
d_bc_vc.connect(vectors[1]) # Connect all to centroid
d_bc_vc.connect(vectors[2]) # Connect all to centroid
d_bc_vc.connect(vectors[3]) # Connect all to centroid
yield d_bc_vc.x
b_vl = self.V[tuple(b_vl)]
bc_vc.connect(b_vl) # Connect aN cross pairs
d_bc_vc.connect(b_vl) # Connect all to centroid
yield b_vl
b_vu = self.V[tuple(b_vu)]
bc_vc.connect(b_vu) # Connect aN cross pairs
d_bc_vc.connect(b_vu) # Connect all to centroid
b_vl_c = self.split_edge(b_vu.x, b_vl.x)
bc_vc.connect(b_vl_c)
yield b_vu
ba_vu = self.V[tuple(ba_vu)]
bc_vc.connect(ba_vu) # Connect aN cross pairs
d_bc_vc.connect(ba_vu) # Connect all to centroid
# Split the a + b edge of the initial triangulation:
os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
b_vu_c = self.split_edge(b_vu.x, ba_vu.x)
bc_vc.connect(b_vu_c)
yield os_v.x # often equal to vco, but not always
yield ss_v.x # often equal to bc_vu, but not always
yield ba_vu
# Split remaining to centre, call this centre group
# "d = 0.5*a"
d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
yield d_bc_vc.x
d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
yield d_b_vl.x
d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
yield d_b_vu.x
d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
yield d_ba_vu
# comb = [c_vc, vl, vu, a_vl, a_vu,
# bc_vc, b_vl, b_vu, ba_vl, ba_vu]
comb = [vl, vu, a_vu,
b_vl, b_vu, ba_vu]
comb_iter = itertools.combinations(comb, 2)
for vecs in comb_iter:
self.split_edge(vecs[0].x, vecs[1].x)
# Add new list of cross pairs
ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev
for vectors in ab_Cc:
bc_vc = list(vectors[0].x)
b_vl = list(vectors[1].x)
b_vu = list(vectors[2].x)
ba_vl = list(vectors[3].x)
ba_vu = list(vectors[4].x)
bc_vc[i + 1] = vut[i + 1]
b_vl[i + 1] = vut[i + 1]
b_vu[i + 1] = vut[i + 1]
ba_vl[i + 1] = vut[i + 1]
ba_vu[i + 1] = vut[i + 1]
bc_vc = self.V[tuple(bc_vc)]
bc_vc.connect(vco) # NOTE: Unneeded?
yield bc_vc
# Split to centre, call this centre group "d = 0.5*a"
d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
d_bc_vc.connect(bc_vc)
d_bc_vc.connect(vectors[1]) # Connect all to centroid
d_bc_vc.connect(vectors[2]) # Connect all to centroid
d_bc_vc.connect(vectors[3]) # Connect all to centroid
d_bc_vc.connect(vectors[4]) # Connect all to centroid
yield d_bc_vc.x
b_vl = self.V[tuple(b_vl)]
bc_vc.connect(b_vl) # Connect aN cross pairs
d_bc_vc.connect(b_vl) # Connect all to centroid
yield b_vl
b_vu = self.V[tuple(b_vu)]
bc_vc.connect(b_vu) # Connect aN cross pairs
d_bc_vc.connect(b_vu) # Connect all to centroid
yield b_vu
ba_vl = self.V[tuple(ba_vl)]
bc_vc.connect(ba_vl) # Connect aN cross pairs
d_bc_vc.connect(ba_vl) # Connect all to centroid
self.split_edge(b_vu.x, ba_vl.x)
yield ba_vl
ba_vu = self.V[tuple(ba_vu)]
bc_vc.connect(ba_vu) # Connect aN cross pairs
d_bc_vc.connect(ba_vu) # Connect all to centroid
# Split the a + b edge of the initial triangulation:
os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
yield os_v.x # often equal to vco, but not always
yield ss_v.x # often equal to bc_vu, but not always
yield ba_vu
# Split remaining to centre, call this centre group
# "d = 0.5*a"
d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
yield d_bc_vc.x
d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
yield d_b_vl.x
d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
yield d_b_vu.x
d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs
yield d_ba_vl
d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
d_bc_vc.connect(vco) # NOTE: Unneeded?
d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
yield d_ba_vu
c_vc, vl, vu, a_vl, a_vu = vectors
comb = [vl, vu, a_vl, a_vu,
b_vl, b_vu, ba_vl, ba_vu]
comb_iter = itertools.combinations(comb, 2)
for vecs in comb_iter:
self.split_edge(vecs[0].x, vecs[1].x)
# Add new list of cross pairs
ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu))
ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu))
ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl))
for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)):
for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
# Build aN vertices for each lower-upper C3 group in N:
a_vl = list(vl.x)
a_vu = list(vu.x)
a_vl[i + 1] = vut[i + 1]
a_vu[i + 1] = vut[i + 1]
a_vl = self.V[tuple(a_vl)]
a_vu = self.V[tuple(a_vu)]
# Note, build (a + vc) later for consistent yields
# Split the a + b edge of the initial triangulation:
c_vc = self.split_edge(vl.x, a_vu.x)
self.split_edge(vl.x, vu.x) # Equal to vc
# Build cN vertices for each lower-upper C3 group in N:
c_vc.connect(vco)
c_vc.connect(vc)
c_vc.connect(vl) # Connect c + ac operations
c_vc.connect(vu) # Connect c + ac operations
c_vc.connect(a_vl) # Connect c + ac operations
c_vc.connect(a_vu) # Connect c + ac operations
yield c_vc.x
c_vl = self.split_edge(vl.x, a_vl.x)
c_vl.connect(vco)
c_vc.connect(c_vl) # Connect cN group vertices
yield c_vl.x
# yield at end of loop:
c_vu = self.split_edge(vu.x, a_vu.x)
c_vu.connect(vco)
# Connect remaining cN group vertices
c_vc.connect(c_vu) # Connect cN group vertices
yield c_vu.x
a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ?
a_vc.connect(vco)
a_vc.connect(c_vc)
# Storage for connecting c + ac operations:
ab_C.append((c_vc, vl, vu, a_vl, a_vu))
# Update the containers
Cox[i + 1].append(vl)
Cox[i + 1].append(vc)
Cox[i + 1].append(vu)
Ccx[i + 1].append(c_vl)
Ccx[i + 1].append(c_vc)
Ccx[i + 1].append(c_vu)
Cux[i + 1].append(a_vl)
Cux[i + 1].append(a_vc)
Cux[i + 1].append(a_vu)
# Update old containers
Cox[j].append(c_vl) # !
Cox[j].append(a_vl)
Ccx[j].append(c_vc) # !
Ccx[j].append(a_vc) # !
Cux[j].append(c_vu) # !
Cux[j].append(a_vu)
# Yield new points
yield a_vc.x
except IndexError:
for vectors in ab_Cc:
ba_vl = list(vectors[3].x)
ba_vu = list(vectors[4].x)
ba_vl[i + 1] = vut[i + 1]
ba_vu[i + 1] = vut[i + 1]
ba_vu = self.V[tuple(ba_vu)]
yield ba_vu
d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s
yield ba_vu
d_bc_vc.connect(vectors[1]) # Connect all to centroid
d_bc_vc.connect(vectors[2]) # Connect all to centroid
d_bc_vc.connect(vectors[3]) # Connect all to centroid
d_bc_vc.connect(vectors[4]) # Connect all to centroid
yield d_bc_vc.x
ba_vl = self.V[tuple(ba_vl)]
yield ba_vl
d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x)
yield d_ba_vl
yield d_ba_vu
yield d_ba_vc
c_vc, vl, vu, a_vl, a_vu = vectors
comb = [vl, vu, a_vl, a_vu,
ba_vl,
ba_vu]
comb_iter = itertools.combinations(comb, 2)
for vecs in comb_iter:
self.split_edge(vecs[0].x, vecs[1].x)
# Copy lists for iteration
cCox = Cox[i]
cCcx = Ccx[i]
cCux = Cux[i]
VL, VC, VU = cCox, cCcx, cCux
for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
# Build aN vertices for each lower-upper pair in N:
a_vu = list(vu.x)
a_vu[i + 1] = vut[i + 1]
# Connect vertices in N to corresponding vertices
# in aN:
a_vu = self.V[tuple(a_vu)]
yield a_vl.x
# Split the a + b edge of the initial triangulation:
c_vc = self.split_edge(vl.x, a_vu.x)
self.split_edge(vl.x, vu.x) # Equal to vc
c_vc.connect(vco)
c_vc.connect(vc)
c_vc.connect(vl) # Connect c + ac operations
c_vc.connect(vu) # Connect c + ac operations
c_vc.connect(a_vu) # Connect c + ac operations
yield (c_vc.x)
c_vu = self.split_edge(vu.x,
a_vu.x) # yield at end of loop
c_vu.connect(vco)
# Connect remaining cN group vertices
c_vc.connect(c_vu) # Connect cN group vertices
yield (c_vu.x)
# Update the containers
Cox[i + 1].append(vu)
Ccx[i + 1].append(c_vu)
Cux[i + 1].append(a_vu)
# Update old containers
s_ab_C.append([c_vc, vl, vu, a_vu])
yield a_vu.x
# Clean class trash
try:
del Cox
del Ccx
del Cux
del ab_C
del ab_Cc
except UnboundLocalError:
pass
try:
self.triangulated_vectors.remove((tuple(origin_c),
tuple(supremum_c)))
except ValueError:
# Turn this into a logging warning?
pass
# Add newly triangulated vectors:
for vs in sup_set:
self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x)))
# Extra yield to ensure that the triangulation is completed
if centroid:
vcn_set = set()
c_nn_lists = []
for vs in sup_set:
# Build centroid
c_nn = self.vpool(vco.x, vs.x)
try:
c_nn.remove(vcn_set)
except KeyError:
pass
c_nn_lists.append(c_nn)
for c_nn in c_nn_lists:
try:
c_nn.remove(vcn_set)
except KeyError:
pass
for vs, c_nn in zip(sup_set, c_nn_lists):
# Build centroid
vcn = self.split_edge(vco.x, vs.x)
vcn_set.add(vcn)
try: # Shouldn't be needed?
c_nn.remove(vcn_set)
except KeyError:
pass
for vnn in c_nn:
vcn.connect(vnn)
yield vcn.x
else:
pass
yield vut
return
def refine_star(self, v):
"""Refine the star domain of a vertex `v`."""
# Copy lists before iteration
vnn = copy.copy(v.nn)
v1nn = []
d_v0v1_set = set()
for v1 in vnn:
v1nn.append(copy.copy(v1.nn))
for v1, v1nn in zip(vnn, v1nn):
vnnu = v1nn.intersection(vnn)
d_v0v1 = self.split_edge(v.x, v1.x)
for o_d_v0v1 in d_v0v1_set:
d_v0v1.connect(o_d_v0v1)
d_v0v1_set.add(d_v0v1)
for v2 in vnnu:
d_v1v2 = self.split_edge(v1.x, v2.x)
d_v0v1.connect(d_v1v2)
return
def _split_edge(self, v1, v2):
v1 = self.V[v1]
v2 = self.V[v2]
# Destroy original edge, if it exists:
v1.disconnect(v2)
# Compute vertex on centre of edge:
try:
vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a
except TypeError: # Allow for decimal operations
vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a
vc = self.V[tuple(vct)]
# Connect to original 2 vertices to the new centre vertex
vc.connect(v1)
vc.connect(v2)
return vc
def vpool(self, origin, supremum):
vot = tuple(origin)
vst = tuple(supremum)
# Initiate vertices in case they don't exist
vo = self.V[vot]
vs = self.V[vst]
# Remove origin - supremum disconnect
# Find the lower/upper bounds of the refinement hyperrectangle
bl = list(vot)
bu = list(vst)
for i, (voi, vsi) in enumerate(zip(vot, vst)):
if bl[i] > vsi:
bl[i] = vsi
if bu[i] < voi:
bu[i] = voi
# NOTE: This is mostly done with sets/lists because we aren't sure
# how well the numpy arrays will scale to thousands of
# dimensions.
vn_pool = set()
vn_pool.update(vo.nn)
vn_pool.update(vs.nn)
cvn_pool = copy.copy(vn_pool)
for vn in cvn_pool:
for i, xi in enumerate(vn.x):
if bl[i] <= xi <= bu[i]:
pass
else:
try:
vn_pool.remove(vn)
except KeyError:
pass # NOTE: Not all neighbours are in initial pool
return vn_pool
def vf_to_vv(self, vertices, simplices):
"""
Convert a vertex-face mesh to a vertex-vertex mesh used by this class
Parameters
----------
vertices : list
Vertices
simplices : list
Simplices
"""
if self.dim > 1:
for s in simplices:
edges = itertools.combinations(s, self.dim)
for e in edges:
self.V[tuple(vertices[e[0]])].connect(
self.V[tuple(vertices[e[1]])])
else:
for e in simplices:
self.V[tuple(vertices[e[0]])].connect(
self.V[tuple(vertices[e[1]])])
return
def connect_vertex_non_symm(self, v_x, near=None):
"""
Adds a vertex at coords v_x to the complex that is not symmetric to the
initial triangulation and sub-triangulation.
If near is specified (for example; a star domain or collections of
cells known to contain v) then only those simplices containd in near
will be searched, this greatly speeds up the process.
If near is not specified this method will search the entire simplicial
complex structure.
Parameters
----------
v_x : tuple
Coordinates of non-symmetric vertex
near : set or list
List of vertices, these are points near v to check for
"""
if near is None:
star = self.V
else:
star = near
# Create the vertex origin
if tuple(v_x) in self.V.cache:
if self.V[v_x] in self.V_non_symm:
pass
else:
return
self.V[v_x]
found_nn = False
S_rows = []
for v in star:
S_rows.append(v.x)
S_rows = np.array(S_rows)
A = np.array(S_rows) - np.array(v_x)
# Iterate through all the possible simplices of S_rows
for s_i in itertools.combinations(range(S_rows.shape[0]),
r=self.dim + 1):
# Check if connected, else s_i is not a simplex
valid_simplex = True
for i in itertools.combinations(s_i, r=2):
# Every combination of vertices must be connected, we check of
# the current iteration of all combinations of s_i are
# connected we break the loop if it is not.
if ((self.V[tuple(S_rows[i[1]])] not in
self.V[tuple(S_rows[i[0]])].nn)
and (self.V[tuple(S_rows[i[0]])] not in
self.V[tuple(S_rows[i[1]])].nn)):
valid_simplex = False
break
S = S_rows[tuple([s_i])]
if valid_simplex:
if self.deg_simplex(S, proj=None):
valid_simplex = False
# If s_i is a valid simplex we can test if v_x is inside si
if valid_simplex:
# Find the A_j0 value from the precalculated values
A_j0 = A[tuple([s_i])]
if self.in_simplex(S, v_x, A_j0):
found_nn = True
# breaks the main for loop, s_i is the target simplex:
break
# Connect the simplex to point
if found_nn:
for i in s_i:
self.V[v_x].connect(self.V[tuple(S_rows[i])])
# Attached the simplex to storage for all non-symmetric vertices
self.V_non_symm.append(self.V[v_x])
# this bool value indicates a successful connection if True:
return found_nn
def in_simplex(self, S, v_x, A_j0=None):
"""Check if a vector v_x is in simplex `S`.
Parameters
----------
S : array_like
Array containing simplex entries of vertices as rows
v_x :
A candidate vertex
A_j0 : array, optional,
Allows for A_j0 to be pre-calculated
Returns
-------
res : boolean
True if `v_x` is in `S`
"""
A_11 = np.delete(S, 0, 0) - S[0]
sign_det_A_11 = np.sign(np.linalg.det(A_11))
if sign_det_A_11 == 0:
# NOTE: We keep the variable A_11, but we loop through A_jj
# ind=
# while sign_det_A_11 == 0:
# A_11 = np.delete(S, ind, 0) - S[ind]
# sign_det_A_11 = np.sign(np.linalg.det(A_11))
sign_det_A_11 = -1 # TODO: Choose another det of j instead?
# TODO: Unlikely to work in many cases
if A_j0 is None:
A_j0 = S - v_x
for d in range(self.dim + 1):
det_A_jj = (-1)**d * sign_det_A_11
# TODO: Note that scipy might be faster to add as an optional
# dependency
sign_det_A_j0 = np.sign(np.linalg.det(np.delete(A_j0, d,
0)))
# TODO: Note if sign_det_A_j0 == then the point is coplanar to the
# current simplex facet, so perhaps return True and attach?
if det_A_jj == sign_det_A_j0:
continue
else:
return False
return True
def deg_simplex(self, S, proj=None):
"""Test a simplex S for degeneracy (linear dependence in R^dim).
Parameters
----------
S : np.array
Simplex with rows as vertex vectors
proj : array, optional,
If the projection S[1:] - S[0] is already
computed it can be added as an optional argument.
"""
# Strategy: we test all combination of faces, if any of the
# determinants are zero then the vectors lie on the same face and is
# therefore linearly dependent in the space of R^dim
if proj is None:
proj = S[1:] - S[0]
# TODO: Is checking the projection of one vertex against faces of other
# vertices sufficient? Or do we need to check more vertices in
# dimensions higher than 2?
# TODO: Literature seems to suggest using proj.T, but why is this
# needed?
if np.linalg.det(proj) == 0.0: # TODO: Replace with tolerance?
return True # Simplex is degenerate
else:
return False # Simplex is not degenerate
| Complex |
python | spyder-ide__spyder | spyder/widgets/printer.py | {
"start": 1537,
"end": 4437
} | class ____(QPrintPreviewDialog):
"""
Subclass to make the default Qt dialog conform to the style and icons used
in Spyder.
"""
def __init__(self, printer, parent=None):
super().__init__(printer, parent)
self.toolbar = self.findChildren(QToolBar)[0]
self.adjust_toolbar_style()
self.make_tooltips_translatable()
def adjust_toolbar_style(self):
"""Make toolbar to follow Spyder style."""
self.toolbar.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))
self.toolbar.setMovable(False)
actions = self.toolbar.actions()
actions[0].setIcon(ima.icon('print.fit_width'))
actions[1].setIcon(ima.icon('print.fit_page'))
actions[2].setVisible(False) # Separator
actions[4].setIcon(ima.icon('zoom_out'))
actions[5].setIcon(ima.icon('zoom_in'))
actions[6].setVisible(False) # Separator
actions[7].setIcon(ima.icon('portrait'))
actions[8].setIcon(ima.icon('landscape'))
actions[9].setVisible(False) # Separator
actions[10].setIcon(ima.icon('first_page'))
actions[11].setIcon(ima.icon('previous_page'))
actions[13].setIcon(ima.icon('next_page'))
actions[14].setIcon(ima.icon('last_page'))
actions[15].setVisible(False) # Separator
actions[16].setIcon(ima.icon('print.single_page'))
actions[17].setVisible(False) # No icon in Material design for this
actions[18].setIcon(ima.icon('print.all_pages'))
actions[19].setVisible(False) # Separator
actions[20].setIcon(ima.icon('print.page_setup'))
actions[21].setIcon(ima.icon('print'))
def make_tooltips_translatable(self):
"""Make toolbar button tooltips translatable."""
# These are the tooltips shown by default by Qt. The number on the left
# is the corresponding action index in the toolbar.
translatable_tooltips = [
(0, _('Fit width')),
(1, _('Fit page')),
(4, _('Zoom out')),
(5, _('Zoom in')),
(7, _('Portrait')),
(8, _('Landscape')),
(10, _('First page')),
(11, _('Previous page')),
(13, _('Next page')),
(14, _('Last page')),
(16, _('Show single page')),
(18, _('Show overview of all pages')),
(20, _('Page setup')),
(21, _('Print')),
]
actions = self.toolbar.actions()
for idx, tooltip in translatable_tooltips:
actions[idx].setText(tooltip)
actions[idx].setToolTip(tooltip)
def showEvent(self, event):
"""
Give focus to the toolbar to avoid giving focus to the combobox that
shows the page percentage size, which is odd.
"""
super().showEvent(event)
self.toolbar.setFocus()
| SpyderPrintPreviewDialog |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/api/test_operations.py | {
"start": 13870,
"end": 18258
} | class ____:
backfill_id: NonNegativeInt = 1
backfill_body = BackfillPostBody(
dag_id="dag_id",
from_date=datetime.datetime(2024, 12, 31, 23, 59, 59),
to_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
run_backwards=False,
dag_run_conf={},
reprocess_behavior=ReprocessBehavior.COMPLETED,
max_active_runs=1,
)
backfill_response = BackfillResponse(
id=backfill_id,
dag_id="dag_id",
from_date=datetime.datetime(2024, 12, 31, 23, 59, 59),
to_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
dag_run_conf={},
is_paused=False,
reprocess_behavior=ReprocessBehavior.COMPLETED,
max_active_runs=1,
created_at=datetime.datetime(2024, 12, 31, 23, 59, 59),
completed_at=datetime.datetime(2025, 1, 1, 0, 0, 0),
updated_at=datetime.datetime(2025, 1, 1, 0, 0, 0),
dag_display_name="TEST_DAG_1",
)
backfills_collection_response = BackfillCollectionResponse(
backfills=[backfill_response],
total_entries=1,
)
def test_create(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/backfills"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.create(backfill=self.backfill_body)
assert response == self.backfill_response
def test_create_dry_run(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/backfills/dry_run"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.create_dry_run(backfill=self.backfill_body)
assert response == self.backfill_response
def test_get(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/backfills/{self.backfill_id}"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.get(self.backfill_id)
assert response == self.backfill_response
def test_list(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/backfills"
return httpx.Response(200, json=json.loads(self.backfills_collection_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.list(dag_id="dag_id")
assert response == self.backfills_collection_response
def test_pause(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/backfills/{self.backfill_id}/pause"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.pause(self.backfill_id)
assert response == self.backfill_response
def test_unpause(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/backfills/{self.backfill_id}/unpause"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.unpause(self.backfill_id)
assert response == self.backfill_response
def test_cancel(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/backfills/{self.backfill_id}/cancel"
return httpx.Response(200, json=json.loads(self.backfill_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.backfills.cancel(self.backfill_id)
assert response == self.backfill_response
| TestBackfillOperations |
python | getsentry__sentry | src/sentry/api/endpoints/project_servicehooks.py | {
"start": 790,
"end": 5006
} | class ____(ProjectEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"POST": ApiPublishStatus.UNKNOWN,
}
def has_feature(self, request: Request, project):
return features.has("projects:servicehooks", project=project, actor=request.user)
def get(self, request: Request, project) -> Response:
"""
List a Project's Service Hooks
``````````````````````````````
Return a list of service hooks bound to a project.
This endpoint requires the 'servicehooks' feature to
be enabled for your project.
:pparam string organization_id_or_slug: the id or slug of the organization the
client keys belong to.
:pparam string project_id_or_slug: the id or slug of the project the client keys
belong to.
:auth: required
"""
if not self.has_feature(request, project):
return self.respond(
{
"error_type": "unavailable_feature",
"detail": ["You do not have that feature enabled"],
},
status=403,
)
queryset = ServiceHook.objects.filter(project_id=project.id)
status = request.GET.get("status")
if status == "active":
queryset = queryset.filter(status=ObjectStatus.ACTIVE)
elif status == "disabled":
queryset = queryset.filter(status=ObjectStatus.DISABLED)
elif status:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by="-id",
on_results=lambda x: serialize(x, request.user, ServiceHookSerializer()),
)
def post(self, request: Request, project) -> Response:
"""
Register a new Service Hook
```````````````````````````
Register a new service hook on a project.
Events include:
- event.alert: An alert is generated for an event (via rules).
- event.created: A new event has been processed.
This endpoint requires the 'servicehooks' feature to
be enabled for your project.
:pparam string organization_id_or_slug: the id or slug of the organization the
client keys belong to.
:pparam string project_id_or_slug: the id or slug of the project the client keys
belong to.
:param string url: the url for the webhook
:param array[string] events: the events to subscribe to
:auth: required
"""
if not request.user.is_authenticated:
return self.respond(status=401)
if not self.has_feature(request, project):
return self.respond(
{
"error_type": "unavailable_feature",
"detail": ["You do not have that feature enabled"],
},
status=403,
)
validator = ServiceHookValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=status.HTTP_400_BAD_REQUEST)
result = validator.validated_data
app_id: int | None = getattr(request.auth, "application_id", None)
hook = hook_service.create_service_hook(
project_ids=[project.id],
organization_id=project.organization.id,
url=result["url"],
actor_id=request.user.id,
events=result.get("events"),
application_id=app_id,
installation_id=None, # Just being explicit here.
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=hook.id,
event=audit_log.get_event_id("SERVICEHOOK_ADD"),
data=hook.get_audit_log_data(),
)
return self.respond(
serialize(ServiceHook.objects.get(id=hook.id), request.user, ServiceHookSerializer()),
status=201,
)
| ProjectServiceHooksEndpoint |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 6517,
"end": 7331
} | class ____:
"""Test hy_AM company provider methods"""
def test_bs(self, faker, num_samples):
for _ in range(num_samples):
bs = faker.bs()
assert isinstance(bs, str)
def test_catch_phrase(self, faker, num_samples):
for _ in range(num_samples):
catch_phrase = faker.catch_phrase()
assert isinstance(catch_phrase, str)
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in HyAmCompanyProvider.company_suffixes
| TestHyAm |
python | huggingface__transformers | src/transformers/models/minimax/modeling_minimax.py | {
"start": 17957,
"end": 21049
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| MiniMaxAttention |
python | walkccc__LeetCode | solutions/698. Partition to K Equal Sum Subsets/698.py | {
"start": 0,
"end": 812
} | class ____:
def canPartitionKSubsets(self, nums: list[int], k: int) -> bool:
summ = sum(nums)
if summ % k != 0:
return False
target = summ // k # the target sum of each subset
if any(num > target for num in nums):
return False
def dfs(s: int, remainingGroups: int, currSum: int, used: int) -> bool:
if remainingGroups == 0:
return True
if currSum > target:
return False
if currSum == target: # Find a valid group, so fresh start.
return dfs(0, remainingGroups - 1, 0, used)
for i in range(s, len(nums)):
if used >> i & 1:
continue
if dfs(i + 1, remainingGroups, currSum + nums[i], used | 1 << i):
return True
return False
nums.sort(reverse=True)
return dfs(0, k, 0, 0)
| Solution |
python | huggingface__transformers | src/transformers/models/janus/modular_janus.py | {
"start": 7234,
"end": 11339
} | class ____(ChameleonVQVAEConfig):
r"""
This is the configuration class to store the configuration of a [`JanusVQVAEModel`]. It is used to instantiate a
`JanusVQVAEModel` according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. Instantiating a
configuration with the defaults will yield a similar configuration to the VQModel of the
[deepseek-community/Janus-Pro-1B](https://huggingface.co/deepseek-community/Janus-Pro-1B).
Args:
embed_dim (`int`, *optional*, defaults to 8):
Dimensionality of each embedding vector.
num_embeddings (`int`, *optional*, defaults to 16384):
Number of codebook embeddings.
double_latent (`bool`, *optional*, defaults to `False`):
Whether to use double z channels.
latent_channels (`int`, *optional*, defaults to 256):
Number of channels for the latent space.
num_patches (`int`, *optional*, defaults to 32):
Num of patches the input images can be divided into.
in_channels (`int`, *optional*, defaults to 3):
Number of input channels.
out_channels (`int`, *optional*, defaults to 3):
Number of out channels.
base_channels (`int`, *optional*, defaults to 128):
Base channel count.
channel_multiplier (`list[int]`, *optional*, defaults to `[1, 1, 2, 2, 4]`):
Channel multipliers for each resolution.
num_res_blocks (`int`, *optional*, defaults to 2):
Number of residual blocks.
dropout (`float`, *optional*, defaults to 0.0):
Dropout rate.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
projection_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the MLP projection head.
num_hidden_layers (`int`, *optional*, defaults to 2):
Number of hidden layers in VAVAE MLP Connecter module.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
image_token_embed_dim (`int`, *optional*, defaults to 2048):
Dimension of image embeddings. It should be same as the dimensionality of text embeddings.
"""
def __init__(
self,
embed_dim: int = 8,
num_embeddings: int = 16384,
double_latent: bool = False,
latent_channels: int = 256,
num_patches: int = 32,
in_channels: int = 3,
out_channels: int = 3,
base_channels: int = 128,
channel_multiplier: list[int] = [1, 1, 2, 2, 4],
num_res_blocks: int = 2,
dropout: float = 0.0,
initializer_range=0.02,
projection_dim=2048,
num_hidden_layers=2,
hidden_act="gelu",
image_token_embed_dim=2048,
**kwargs,
):
super().__init__(
embed_dim=embed_dim,
num_embeddings=num_embeddings,
double_latent=double_latent,
latent_channels=latent_channels,
in_channels=in_channels,
base_channels=base_channels,
channel_multiplier=channel_multiplier,
num_res_blocks=num_res_blocks,
dropout=dropout,
initializer_range=initializer_range,
**kwargs,
)
self.num_patches = num_patches
self.out_channels = out_channels
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.hidden_act = hidden_act
self.image_token_embed_dim = image_token_embed_dim
del self.resolution
del self.attn_resolutions
del self.attn_type
| JanusVQVAEConfig |
python | ansible__ansible | test/lib/ansible_test/_internal/config.py | {
"start": 8701,
"end": 9302
} | class ____(TestConfig):
"""Configuration for the sanity command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args, 'sanity')
self.test: list[str] = args.test
self.skip_test: list[str] = args.skip_test
self.list_tests: bool = args.list_tests
self.allow_disabled: bool = args.allow_disabled
self.enable_optional_errors: bool = args.enable_optional_errors
self.prime_venvs: bool = args.prime_venvs
self.fix: bool = getattr(args, 'fix', False)
self.display_stderr = self.lint or self.list_tests
| SanityConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.