language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
python/ray/train/torch/torch_detection_predictor.py
|
{
"start": 291,
"end": 2931
}
|
class ____(TorchPredictor):
"""A predictor for TorchVision detection models.
Unlike other Torch models, instance segmentation models return
`List[Dict[str, Tensor]]`. This predictor extends :class:`TorchPredictor` to support
the non-standard outputs.
To learn more about instance segmentation models, read
`Instance segmentation models <https://pytorch.org/vision/main/auto_examples/plot_visualization_utils.html#instance-seg-output>`_.
Example:
.. testcode::
import numpy as np
from torchvision import models
from ray.train.torch import TorchDetectionPredictor
model = models.detection.fasterrcnn_resnet50_fpn_v2(pretrained=True)
predictor = TorchDetectionPredictor(model=model)
predictions = predictor.predict(np.zeros((4, 3, 32, 32), dtype=np.float32))
print(predictions.keys())
.. testoutput::
dict_keys(['pred_boxes', 'pred_labels', 'pred_scores'])
""" # noqa: E501
def _predict_numpy(
self,
data: Union[np.ndarray, Dict[str, np.ndarray]],
dtype: Optional[Union[TensorDtype, Dict[str, TensorDtype]]],
) -> Dict[str, np.ndarray]:
if isinstance(data, dict) and len(data) != 1:
raise ValueError(
f"""Expected input to contain one key, but got {len(data)} instead."""
)
if dtype is not None and not isinstance(dtype, torch.dtype):
raise ValueError(
"Expected `dtype` to be a `torch.dtype`, but got a "
f"{type(dtype).__name__} instead."
)
if isinstance(data, dict):
images = next(iter(data.values()))
else:
images = data
inputs = [
torch.as_tensor(image, dtype=dtype).to(self.device) for image in images
]
outputs = self.call_model(inputs)
outputs = _convert_outputs_to_batch(outputs)
outputs = {"pred_" + key: value for key, value in outputs.items()}
return outputs
def _convert_outputs_to_batch(
outputs: List[Dict[str, torch.Tensor]],
) -> Dict[str, List[torch.Tensor]]:
"""Batch detection model outputs.
TorchVision detection models return `List[Dict[Tensor]]`. Each `Dict` contain
'boxes', 'labels, and 'scores'.
This function batches values and returns a `Dict[str, List[Tensor]]`.
""" # noqa: E501
batch = collections.defaultdict(list)
for output in outputs:
for key, value in output.items():
batch[key].append(value.cpu().detach())
return batch
|
TorchDetectionPredictor
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/row_conditions.py
|
{
"start": 872,
"end": 1064
}
|
class ____(ValueError):
"""Raised when an AND group contains OR conditions."""
def __init__(self):
super().__init__("AND groups cannot contain OR conditions")
|
AndContainsOrError
|
python
|
weaviate__weaviate-python-client
|
integration/test_batch_v4.py
|
{
"start": 1536,
"end": 1677
}
|
class ____:
"""Handles pandas and polars series."""
array: list
def to_list(self) -> list:
return self.array
|
MockDFSeries
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/basic_session_run_hooks.py
|
{
"start": 25284,
"end": 28862
}
|
class ____(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
self._last_global_step = None
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[
Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)
])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._log_and_record(elapsed_steps, elapsed_time, global_step)
# Check whether the global step has been increased. Here, we do not use the
# timer.last_triggered_step as the timer might record a different global
# step value such that the comparison could be unreliable. For simplicity,
# we just compare the stale_global_step with previously recorded version.
if stale_global_step == self._last_global_step:
# Here, we give a warning in the first 5 times if we have observed that
# the global step has not been increased. For some Optimizers, the global
# step is not increased each time by design. For example,
# SyncReplicaOptimizer doesn't increase the global step in worker's main
# train step.
logging.log_first_n(
logging.WARN,
"It seems that global step (tf.train.get_global_step) has not "
"been increased. Current value (could be stable): %s vs previous "
"value: %s. You could increase the global step by passing "
"tf.train.get_global_step() to Optimizer.apply_gradients or "
"Optimizer.minimize.", 5, stale_global_step, self._last_global_step)
self._last_global_step = stale_global_step
@tf_export(v1=["train.NanLossDuringTrainingError"])
|
StepCounterHook
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/pooling_ops_test.py
|
{
"start": 9806,
"end": 21361
}
|
class ____(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
@test_util.disable_mlir_bridge(
"TODO(b/266613412): investigate FPE in AvgPoolGrad for TPU"
)
def testAvgPoolGradSamePaddingZeroStrideZeroSize(self):
output_gradient_vals = np.array([0.39117979], dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape([1, 1, 1, 1])
with self.session() as sess:
with self.test_scope():
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_gradient_vals.shape
)
t = gen_nn_ops.avg_pool_grad(
orig_input_shape=[1, 0, 0, 0],
grad=output_gradients,
ksize=[1, 0, 0, 0],
strides=[1, 0, 0, 0],
padding="SAME",
data_format="NCHW",
)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
(
"Sliding window ksize field for dimension 1 must be positive but"
" is 0"
),
):
sess.run(t, {output_gradients: output_gradient_vals})
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
|
PoolGradTest
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 12429,
"end": 12573
}
|
class ____(SubclassSelectorProxyModel):
concrete_field = models.CharField(max_length=30, default="test_cf")
|
SubclassSelectorProxyConcreteModel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/chalkboard-xor-game.py
|
{
"start": 85,
"end": 288
}
|
class ____(object):
def xorGame(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return reduce(xor, nums) == 0 or \
len(nums) % 2 == 0
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_values.py
|
{
"start": 793,
"end": 24621
}
|
class ____(fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
run_setup_bind = None
run_create_tables = None
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column("people_id", Integer, primary_key=True),
Column("age", Integer),
Column("name", String(30)),
)
Table(
"bookcases",
metadata,
Column("bookcase_id", Integer, primary_key=True),
Column(
"bookcase_owner_id", Integer, ForeignKey("people.people_id")
),
Column("bookcase_shelves", Integer),
Column("bookcase_width", Integer),
)
Table(
"books",
metadata,
Column("book_id", Integer, primary_key=True),
Column(
"bookcase_id", Integer, ForeignKey("bookcases.bookcase_id")
),
Column("book_owner_id", Integer, ForeignKey("people.people_id")),
Column("book_weight", Integer),
)
def test_wrong_number_of_elements(self):
v1 = values(
column("CaseSensitive", Integer),
column("has spaces", String),
name="Spaces and Cases",
).data([(1, "textA", 99), (2, "textB", 88)])
with expect_raises_message(
exc.ArgumentError,
r"Wrong number of elements for 2-tuple: \(1, 'textA', 99\)",
):
str(v1)
@testing.fixture
def _auto_proxy_fixture(self):
c1 = column("q", Integer)
c2 = column("p", Integer)
t = table("t", c1) # noqa: F841
v1 = values(c1, c2).data([(1, 2), (3, 4)])
return c1, c2, t, v1
def test_auto_proxy_col_ownership(self, _auto_proxy_fixture):
"""test #10280"""
c1, c2, t, v1 = _auto_proxy_fixture
is_(c2, v1.c.p)
is_not(c1, v1.c.q)
def test_auto_proxy_select_c_col(self, _auto_proxy_fixture):
"""test #10280"""
c1, c2, t, v1 = _auto_proxy_fixture
self.assert_compile(select(t.c.q), "SELECT t.q FROM t")
self.assert_compile(
select(v1.c.q),
"SELECT q FROM (VALUES (:param_1, :param_2), "
"(:param_3, :param_4))",
checkparams={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
},
)
def test_auto_proxy_select_direct_col(self, _auto_proxy_fixture):
"""test #10280"""
c1, c2, t, v1 = _auto_proxy_fixture
self.assert_compile(select(c1), "SELECT t.q FROM t")
# for VALUES, the column does not have its set_parent called up front.
# this is to make the construction of values() faster, as the values.c
# use case is not required in order to use the construct
self.assert_compile(select(c2), "SELECT p")
# once we call v.c, then it's set up.
# patch for #10280 added an extra step to make sure this works
# even after the previous compile is called.
# is this how it should work? not sure, just testing how it is
# right now
v1.c.p
self.assert_compile(
select(c2),
"SELECT p FROM (VALUES (:param_1, :param_2), "
"(:param_3, :param_4))",
checkparams={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
},
)
def test_auto_proxy_make_new_values(self, _auto_proxy_fixture):
"""test #10280"""
c1, c2, t, v1 = _auto_proxy_fixture
self.assert_compile(
select(v1.c.p),
"SELECT p FROM (VALUES (:param_1, :param_2), "
"(:param_3, :param_4))",
checkparams={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
},
)
v2 = values(c1, c2).data([(5, 6)])
self.assert_compile(
select(v2.c.p),
"SELECT p FROM (VALUES (:param_1, :param_2))",
checkparams={"param_1": 5, "param_2": 6},
)
def test_column_quoting(self):
v1 = values(
column("CaseSensitive", Integer),
column("has spaces", String),
column("number", Integer),
name="Spaces and Cases",
).data([(1, "textA", 99), (2, "textB", 88)])
self.assert_compile(
select(v1),
'SELECT "Spaces and Cases"."CaseSensitive", '
'"Spaces and Cases"."has spaces", "Spaces and Cases".number FROM '
"(VALUES (:param_1, :param_2, :param_3), "
"(:param_4, :param_5, :param_6)) "
'AS "Spaces and Cases" ("CaseSensitive", "has spaces", number)',
)
def test_values_in_scalar_subq(self):
"""test #9772"""
people = self.tables.people
table_value_constructor = values(
Column("v1", Integer), name="tvc"
).data(
[
(people.c.people_id,),
(people.c.age,),
(people.c.name,),
]
)
maximum = select(func.max(table_value_constructor.c.v1))
maximum_subquery = maximum.scalar_subquery()
query = select(people.c.people_id, maximum_subquery)
self.assert_compile(
query,
"SELECT people.people_id, "
"(SELECT max(tvc.v1) AS max_1 FROM "
"(VALUES (people.people_id), (people.age), (people.name)) "
"AS tvc (v1)) AS anon_1 FROM people",
)
def test_values_in_select_cte_params(self):
cte1 = select(
values(
column("col1", String),
column("col2", Integer),
name="temp_table",
).data([("a", 2), ("b", 3)])
).cte("cte1")
cte2 = select(cte1.c.col1).where(cte1.c.col1 == "q").cte("cte2")
stmt = select(cte2.c.col1)
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = "numeric"
self.assert_compile(
stmt,
"WITH cte1 AS (SELECT temp_table.col1 AS col1, "
"temp_table.col2 AS col2 FROM (VALUES (:1, :2), (:3, :4)) AS "
"temp_table (col1, col2)), "
"cte2 AS "
"(SELECT cte1.col1 AS col1 FROM cte1 WHERE cte1.col1 = :5) "
"SELECT cte2.col1 FROM cte2",
checkpositional=("a", 2, "b", 3, "q"),
dialect=dialect,
)
self.assert_compile(
stmt,
"WITH cte1 AS (SELECT temp_table.col1 AS col1, "
"temp_table.col2 AS col2 FROM (VALUES ('a', 2), ('b', 3)) "
"AS temp_table (col1, col2)), "
"cte2 AS "
"(SELECT cte1.col1 AS col1 FROM cte1 WHERE cte1.col1 = 'q') "
"SELECT cte2.col1 FROM cte2",
literal_binds=True,
dialect=dialect,
)
def test_values_in_select_cte_literal_binds(self):
cte1 = select(
values(
column("col1", String),
column("col2", Integer),
name="temp_table",
literal_binds=True,
).data([("a", 2), ("b", 3)])
).cte("cte1")
cte2 = select(cte1.c.col1).where(cte1.c.col1 == "q").cte("cte2")
stmt = select(cte2.c.col1)
self.assert_compile(
stmt,
"WITH cte1 AS (SELECT temp_table.col1 AS col1, "
"temp_table.col2 AS col2 FROM (VALUES ('a', 2), ('b', 3)) "
"AS temp_table (col1, col2)), "
"cte2 AS "
"(SELECT cte1.col1 AS col1 FROM cte1 WHERE cte1.col1 = :col1_1) "
"SELECT cte2.col1 FROM cte2",
checkparams={"col1_1": "q"},
)
@testing.variation("values_named", [True, False])
@testing.variation("cte_named", [True, False])
@testing.variation("literal_binds", [True, False])
@testing.variation("recursive", [True, False])
def test_direct_cte(
self, values_named, cte_named, literal_binds, recursive
):
"""test #12734"""
cte1 = (
values(
column("col1", String),
column("col2", Integer),
literal_binds=bool(literal_binds),
name="some name" if values_named else None,
)
.data([("a", 2), ("b", 3)])
.cte("cte1" if cte_named else None, recursive=bool(recursive))
)
stmt = select(cte1.c.col1)
if cte_named:
cte_name = "cte1"
elif values_named:
cte_name = "some_name_1"
else:
cte_name = "anon_1"
if literal_binds:
params = "('a', 2), ('b', 3)"
else:
params = "(:param_1, :param_2), (:param_3, :param_4)"
recursive_str = "RECURSIVE " if recursive else ""
self.assert_compile(
stmt,
f"WITH {recursive_str}{cte_name}(col1, col2) AS "
f"(VALUES {params}) "
f"SELECT {cte_name}.col1 FROM {cte_name}",
checkparams=(
{
"param_1": "a",
"param_2": 2,
"param_3": "b",
"param_4": 3,
}
if not literal_binds
else {}
),
)
def test_add_cte_one(self):
cte1 = (
values(
column("col1", String),
column("col2", Integer),
name="some_name",
).data([("a", 2), ("b", 3)])
).add_cte(select(1).cte())
self.assert_compile(
cte1.select(),
"WITH anon_1 AS (SELECT 1) "
"SELECT some_name.col1, some_name.col2 FROM "
"(VALUES (:param_1, :param_2), "
"(:param_3, :param_4)) AS some_name (col1, col2)",
)
def test_add_cte_two(self):
cte1 = (
(
values(
column("col1", String),
column("col2", Integer),
name="some_name",
).data([("a", 2), ("b", 3)])
)
.add_cte(select(1).cte())
.cte()
)
self.assert_compile(
cte1.select(),
"WITH anon_1 AS (SELECT 1), some_name_1(col1, col2) AS "
"(VALUES (:param_1, :param_2), (:param_3, :param_4)) "
"SELECT some_name_1.col1, some_name_1.col2 FROM some_name_1",
)
def test_no_cte_with_lateral(self):
values_ = (
values(
column("col1", String),
column("col2", Integer),
name="some_name",
)
.data([("a", 2), ("b", 3)])
.lateral()
)
cte = values_.cte()
with expect_raises_message(
exc.CompileError,
"Can't use a LATERAL VALUES expression inside of a CTE",
):
cte.select().compile()
@testing.fixture
def literal_parameter_fixture(self):
def go(literal_binds, omit=None):
cols = [
column("mykey", Integer),
column("mytext", String),
column("myint", Integer),
]
if omit:
for idx in omit:
cols[idx] = column(cols[idx].name)
return values(
*cols, name="myvalues", literal_binds=literal_binds
).data([(1, "textA", 99), (2, "textB", 88)])
return go
@testing.fixture
def tricky_types_parameter_fixture(self):
class SomeEnum:
# Implements PEP 435 in the minimal fashion needed by SQLAlchemy
__members__ = OrderedDict()
def __init__(self, name, value, alias=None):
self.name = name
self.value = value
self.__members__[name] = self
setattr(self.__class__, name, self)
if alias:
self.__members__[alias] = self
setattr(self.__class__, alias, self)
one = SomeEnum("one", 1)
two = SomeEnum("two", 2)
class MumPyString(str):
"""some kind of string, can't imagine where such a thing might
be found
"""
class MumPyNumber(int):
"""some kind of int, can't imagine where such a thing might
be found
"""
def go(literal_binds, omit=None):
cols = [
column("mykey", Integer),
column("mytext", String),
column("myenum", Enum(SomeEnum)),
]
if omit:
for idx in omit:
cols[idx] = column(cols[idx].name)
return values(
*cols, name="myvalues", literal_binds=literal_binds
).data(
[
(MumPyNumber(1), MumPyString("textA"), one),
(MumPyNumber(2), MumPyString("textB"), two),
]
)
return go
def test_bound_parameters(self, literal_parameter_fixture):
literal_parameter_fixture = literal_parameter_fixture(False)
stmt = select(literal_parameter_fixture)
self.assert_compile(
stmt,
"SELECT myvalues.mykey, myvalues.mytext, myvalues.myint FROM "
"(VALUES (:param_1, :param_2, :param_3), "
"(:param_4, :param_5, :param_6)"
") AS myvalues (mykey, mytext, myint)",
checkparams={
"param_1": 1,
"param_2": "textA",
"param_3": 99,
"param_4": 2,
"param_5": "textB",
"param_6": 88,
},
)
def test_literal_parameters(self, literal_parameter_fixture):
literal_parameter_fixture = literal_parameter_fixture(True)
stmt = select(literal_parameter_fixture)
self.assert_compile(
stmt,
"SELECT myvalues.mykey, myvalues.mytext, myvalues.myint FROM "
"(VALUES (1, 'textA', 99), (2, 'textB', 88)"
") AS myvalues (mykey, mytext, myint)",
checkparams={},
)
def test_literal_parameters_not_every_type_given(
self, literal_parameter_fixture
):
literal_parameter_fixture = literal_parameter_fixture(True, omit=(1,))
stmt = select(literal_parameter_fixture)
self.assert_compile(
stmt,
"SELECT myvalues.mykey, myvalues.mytext, myvalues.myint FROM "
"(VALUES (1, 'textA', 99), (2, 'textB', 88)"
") AS myvalues (mykey, mytext, myint)",
checkparams={},
)
def test_use_cols_tricky_not_every_type_given(
self, tricky_types_parameter_fixture
):
literal_parameter_fixture = tricky_types_parameter_fixture(
True, omit=(1,)
)
stmt = select(literal_parameter_fixture)
with expect_raises_message(
exc.CompileError,
r"No literal value renderer is available for literal "
r"value \"'textA'\" with datatype NULL",
):
str(stmt)
def test_use_cols_for_types(self, tricky_types_parameter_fixture):
literal_parameter_fixture = tricky_types_parameter_fixture(True)
stmt = select(literal_parameter_fixture)
self.assert_compile(
stmt,
"SELECT myvalues.mykey, myvalues.mytext, myvalues.myenum FROM "
"(VALUES (1, 'textA', 'one'), (2, 'textB', 'two')"
") AS myvalues (mykey, mytext, myenum)",
checkparams={},
)
def test_anon_alias(self):
people = self.tables.people
values_ = (
values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
)
.data([(1, 1), (2, 1), (3, 2), (3, 3)])
.alias()
)
stmt = select(people, values_).select_from(
people.join(
values_, values_.c.bookcase_owner_id == people.c.people_id
)
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, "
"anon_1.bookcase_id, anon_1.bookcase_owner_id FROM people "
"JOIN (VALUES (:param_1, :param_2), (:param_3, :param_4), "
"(:param_5, :param_6), (:param_7, :param_8)) AS anon_1 "
"(bookcase_id, bookcase_owner_id) "
"ON people.people_id = anon_1.bookcase_owner_id",
)
def test_with_join_unnamed(self):
people = self.tables.people
values_ = values(
column("column1", Integer),
column("column2", Integer),
).data([(1, 1), (2, 1), (3, 2), (3, 3)])
stmt = select(people, values_).select_from(
people.join(values_, values_.c.column2 == people.c.people_id)
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, column1, "
"column2 FROM people JOIN (VALUES (:param_1, :param_2), "
"(:param_3, :param_4), (:param_5, :param_6), "
"(:param_7, :param_8)) "
"ON people.people_id = column2",
checkparams={
"param_1": 1,
"param_2": 1,
"param_3": 2,
"param_4": 1,
"param_5": 3,
"param_6": 2,
"param_7": 3,
"param_8": 3,
},
)
def test_with_join_named(self):
people = self.tables.people
values_ = values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
name="bookcases",
).data([(1, 1), (2, 1), (3, 2), (3, 3)])
stmt = select(people, values_).select_from(
people.join(
values_, values_.c.bookcase_owner_id == people.c.people_id
)
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, "
"bookcases.bookcase_id, bookcases.bookcase_owner_id FROM people "
"JOIN (VALUES (:param_1, :param_2), (:param_3, :param_4), "
"(:param_5, :param_6), (:param_7, :param_8)) AS bookcases "
"(bookcase_id, bookcase_owner_id) "
"ON people.people_id = bookcases.bookcase_owner_id",
checkparams={
"param_1": 1,
"param_2": 1,
"param_3": 2,
"param_4": 1,
"param_5": 3,
"param_6": 2,
"param_7": 3,
"param_8": 3,
},
)
def test_with_aliased_join(self):
people = self.tables.people
values_ = (
values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
)
.data([(1, 1), (2, 1), (3, 2), (3, 3)])
.alias("bookcases")
)
stmt = select(people, values_).select_from(
people.join(
values_, values_.c.bookcase_owner_id == people.c.people_id
)
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, "
"bookcases.bookcase_id, bookcases.bookcase_owner_id FROM people "
"JOIN (VALUES (:param_1, :param_2), (:param_3, :param_4), "
"(:param_5, :param_6), (:param_7, :param_8)) AS bookcases "
"(bookcase_id, bookcase_owner_id) "
"ON people.people_id = bookcases.bookcase_owner_id",
checkparams={
"param_1": 1,
"param_2": 1,
"param_3": 2,
"param_4": 1,
"param_5": 3,
"param_6": 2,
"param_7": 3,
"param_8": 3,
},
)
def test_with_standalone_aliased_join(self):
people = self.tables.people
values_ = values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
).data([(1, 1), (2, 1), (3, 2), (3, 3)])
values_ = alias(values_, "bookcases")
stmt = select(people, values_).select_from(
people.join(
values_, values_.c.bookcase_owner_id == people.c.people_id
)
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, "
"bookcases.bookcase_id, bookcases.bookcase_owner_id FROM people "
"JOIN (VALUES (:param_1, :param_2), (:param_3, :param_4), "
"(:param_5, :param_6), (:param_7, :param_8)) AS bookcases "
"(bookcase_id, bookcase_owner_id) "
"ON people.people_id = bookcases.bookcase_owner_id",
checkparams={
"param_1": 1,
"param_2": 1,
"param_3": 2,
"param_4": 1,
"param_5": 3,
"param_6": 2,
"param_7": 3,
"param_8": 3,
},
)
def test_lateral(self):
people = self.tables.people
values_ = (
values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
name="bookcases",
)
.data([(1, 1), (2, 1), (3, 2), (3, 3)])
.lateral()
)
stmt = select(people, values_).select_from(
people.join(values_, true())
)
self.assert_compile(
stmt,
"SELECT people.people_id, people.age, people.name, "
"bookcases.bookcase_id, bookcases.bookcase_owner_id FROM people "
"JOIN LATERAL (VALUES (:param_1, :param_2), (:param_3, :param_4), "
"(:param_5, :param_6), (:param_7, :param_8)) AS bookcases "
"(bookcase_id, bookcase_owner_id) "
"ON true",
checkparams={
"param_1": 1,
"param_2": 1,
"param_3": 2,
"param_4": 1,
"param_5": 3,
"param_6": 2,
"param_7": 3,
"param_8": 3,
},
)
def test_from_linting_named(self):
people = self.tables.people
values_ = values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
name="bookcases",
).data([(1, 1), (2, 1), (3, 2), (3, 3)])
stmt = select(people, values_)
with testing.expect_warnings(
r"SELECT statement has a cartesian product between FROM "
r'element\(s\) "(?:bookcases|people)" and '
r'FROM element "(?:people|bookcases)"'
):
stmt.compile(linting=FROM_LINTING)
def test_from_linting_unnamed(self):
people = self.tables.people
values_ = values(
column("bookcase_id", Integer),
column("bookcase_owner_id", Integer),
).data([(1, 1), (2, 1), (3, 2), (3, 3)])
stmt = select(people, values_)
with testing.expect_warnings(
r"SELECT statement has a cartesian product between FROM "
r'element\(s\) "(?:\(unnamed VALUES element\)|people)" and '
r'FROM element "(?:people|\(unnamed VALUES element\))"'
):
stmt.compile(linting=FROM_LINTING)
|
ValuesTest
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 1622,
"end": 2754
}
|
class ____(ConcreteTemplate):
cases = [
signature(types.slice2_type, types.intp),
signature(types.slice2_type, types.none),
signature(types.slice2_type, types.none, types.none),
signature(types.slice2_type, types.none, types.intp),
signature(types.slice2_type, types.intp, types.none),
signature(types.slice2_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
signature(types.slice3_type, types.none, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.none, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.none),
signature(types.slice3_type, types.intp, types.none, types.none),
signature(types.slice3_type, types.none, types.intp, types.none),
signature(types.slice3_type, types.none, types.none, types.intp),
signature(types.slice3_type, types.none, types.none, types.none),
]
@infer_global(range, typing_key=range)
@infer_global(prange, typing_key=prange)
@infer_global(internal_prange, typing_key=internal_prange)
|
Slice
|
python
|
django-compressor__django-compressor
|
compressor/offline/jinja2.py
|
{
"start": 450,
"end": 1478
}
|
class ____(Extension):
"""
Functional "spaceless" extension equivalent to Django's.
See: https://github.com/django/django/blob/master/django/template/defaulttags.py
"""
tags = set(["spaceless"])
def parse(self, parser):
lineno = next(parser.stream).lineno
body = parser.parse_statements(["name:endspaceless"], drop_needle=True)
return nodes.CallBlock(
self.call_method("_spaceless", []), [], [], body
).set_lineno(lineno)
def _spaceless(self, caller):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(caller().strip())
def url_for(mod, filename):
"""
Incomplete emulation of Flask's url_for.
"""
try:
from django.contrib.staticfiles.templatetags import staticfiles
except ImportError:
# Django 3.0+
import django.templatetags.static as staticfiles
if mod == "static":
return staticfiles.static(filename)
return ""
|
SpacelessExtension
|
python
|
ray-project__ray
|
release/ray_release/reporter/db.py
|
{
"start": 300,
"end": 2329
}
|
class ____(Reporter):
def __init__(self):
self.firehose = boto3.client("firehose", config=Config(region_name="us-west-2"))
def report_result(self, test: Test, result: Result):
logger.info("Persisting result to the databricks delta lake...")
# Prometheus metrics are saved as buildkite artifacts
# and can be obtained using buildkite API.
result_json = {
"_table": "release_test_result",
"report_timestamp_ms": int(time.time() * 1000),
"status": result.status or "",
"branch": os.environ.get("BUILDKITE_BRANCH", ""),
"commit": os.environ.get("BUILDKITE_COMMIT", ""),
"results": result.results or {},
"name": test.get("name", ""),
"group": test.get("group", ""),
"team": test.get("team", ""),
"frequency": test.get("frequency", ""),
"cluster_url": result.cluster_url or "",
"job_id": result.job_id or "",
"job_url": result.job_url or "",
"cluster_id": result.cluster_id or "",
"buildkite_url": result.buildkite_url or "",
"buildkite_job_id": result.buildkite_job_id or "",
"runtime": result.runtime or -1.0,
"stable": result.stable,
"return_code": result.return_code,
"smoke_test": result.smoke_test,
"extra_tags": result.extra_tags or {},
"crash_pattern": LogAggregator(
result.last_logs or ""
).compute_crash_pattern(),
}
logger.debug(f"Result json: {json.dumps(result_json)}")
try:
self.firehose.put_record(
DeliveryStreamName="ray-ci-results",
Record={"Data": json.dumps(result_json)},
)
except Exception:
logger.exception("Failed to persist result to the databricks delta lake")
else:
logger.info("Result has been persisted to the databricks delta lake")
|
DBReporter
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/dotnet.py
|
{
"start": 21757,
"end": 27668
}
|
class ____(RegexLexer):
"""
For the F# language (version 3.0).
AAAAACK Strings
http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
.. versionadded:: 1.5
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
|
FSharpLexer
|
python
|
keon__algorithms
|
tests/test_backtrack.py
|
{
"start": 3534,
"end": 4865
}
|
class ____(unittest.TestCase):
def test_get_factors(self):
target1 = 32
answer1 = [
[2, 16],
[2, 2, 8],
[2, 2, 2, 4],
[2, 2, 2, 2, 2],
[2, 4, 4],
[4, 8]
]
self.assertEqual(sorted(get_factors(target1)), sorted(answer1))
target2 = 12
answer2 = [
[2, 6],
[2, 2, 3],
[3, 4]
]
self.assertEqual(sorted(get_factors(target2)), sorted(answer2))
self.assertEqual(sorted(get_factors(1)), [])
self.assertEqual(sorted(get_factors(37)), [])
def test_recursive_get_factors(self):
target1 = 32
answer1 = [
[2, 16],
[2, 2, 8],
[2, 2, 2, 4],
[2, 2, 2, 2, 2],
[2, 4, 4],
[4, 8]
]
self.assertEqual(sorted(recursive_get_factors(target1)),
sorted(answer1))
target2 = 12
answer2 = [
[2, 6],
[2, 2, 3],
[3, 4]
]
self.assertEqual(sorted(recursive_get_factors(target2)),
sorted(answer2))
self.assertEqual(sorted(recursive_get_factors(1)), [])
self.assertEqual(sorted(recursive_get_factors(37)), [])
|
TestFactorCombinations
|
python
|
Lightning-AI__lightning
|
tests/parity_fabric/models.py
|
{
"start": 804,
"end": 1283
}
|
class ____(ABC, nn.Module):
"""Defines the interface for a model in a Fabric-PyTorch parity test."""
# Benchmarking parameters that should be model-specific
batch_size = 1
num_steps = 1
@abstractmethod
def get_optimizer(self, *args, **kwargs) -> Optimizer:
pass
@abstractmethod
def get_dataloader(self, *args, **kwargs) -> DataLoader:
pass
@abstractmethod
def get_loss_function(self) -> Callable:
pass
|
ParityModel
|
python
|
keras-team__keras
|
keras/src/distribution/distribution_lib_test.py
|
{
"start": 13745,
"end": 20259
}
|
class ____(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["data", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
self.sharded_2d = distribution_lib.TensorLayout([None, "model"])
self.sharded_1d = distribution_lib.TensorLayout(["model"])
self.replicated_2d = distribution_lib.TensorLayout([None, None])
self.replicated_1d = distribution_lib.TensorLayout([None])
def test_add(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
# Test for adding list/tuple as shortcut for TensorLayout
layout_map["conv/bias"] = ("model",)
# Make there are two items in the map, and we access them via the
# underlying container at layout_map._layout_map
self.assertLen(layout_map, 3)
kernel_layout = layout_map["dense/kernel"]
self.assertEqual(kernel_layout.axes, (None, "model"))
self.assertIs(kernel_layout.device_mesh, self.device_mesh)
bias_layout = layout_map["dense/bias"]
self.assertEqual(bias_layout.axes, ("model",))
self.assertIs(bias_layout.device_mesh, self.device_mesh)
conv_bias_layout = layout_map["conv/bias"]
self.assertEqual(conv_bias_layout.axes, ("model",))
self.assertIs(bias_layout.device_mesh, self.device_mesh)
with self.assertRaisesRegex(ValueError, "dense/kernel already exist"):
layout_map["dense/kernel"] = self.sharded_2d
with self.assertRaisesRegex(ValueError, "should be a TensorLayout"):
layout_map["conv.kernel"] = ["a", "b"]
def test_get(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
layout_map["dense.*kernel"] = self.replicated_2d
layout_map["dense.*bias"] = self.replicated_1d
layout_map["bias"] = self.sharded_1d
self.assertEqual(layout_map["dense/kernel"], self.sharded_2d)
self.assertEqual(layout_map["dense/bias"], self.sharded_1d)
self.assertEqual(layout_map["dense_2/kernel"], self.replicated_2d)
# Map against the wildcard bias rule for dense. This will cause a
# ValueError
with self.assertRaisesRegex(
ValueError, "Path 'dense_2/bias' matches multiple layout"
):
layout_map["dense_2/bias"]
self.assertIsNone(layout_map["conv2d/kernel"])
self.assertEqual(layout_map["conv2d/bias"], self.sharded_1d)
def test_delete(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
self.assertEqual(layout_map.pop("dense/kernel"), self.sharded_2d)
# Make sure to match against the exact string, not the regex
with self.assertRaises(KeyError):
layout_map.pop(".*bias")
# Make sure del also works
del layout_map["dense/bias"]
self.assertLen(layout_map, 0)
def test_len(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
self.assertLen(layout_map, 0)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
self.assertLen(layout_map, 2)
def test_iter(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
# Make sure the items are ordered based on the insertion order.
self.assertEqual(
list(layout_map.keys()), ["dense/kernel", "dense/bias"]
)
keys = []
values = []
for k, v in layout_map.items():
keys.append(k)
values.append(v)
self.assertEqual(keys, ["dense/kernel", "dense/bias"])
self.assertEqual(values, [self.sharded_2d, self.sharded_1d])
# @pytest.mark.skipif(
# backend.backend() != "tensorflow",
# reason="Backend specific test",
# )
# class TensorflowDistributionLibTest(testing.TestCase):
# def setUp(self):
# super().setUp()
# # Config virtual devices for testing.
# cpus = tf.config.list_physical_devices("cpu")
# context._reset_context()
# tf.config.set_logical_device_configuration(
# cpus[0], [tf.config.LogicalDeviceConfiguration()] * 8
# )
# dtensor.initialize_accelerator_system("cpu")
# def tearDown(self) -> None:
# super().tearDown()
# dtensor.shutdown_accelerator_system()
# def test_list_devices(self):
# self.assertEqual(len(distribution_lib.list_devices()), 8)
# self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
# self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
# def test_to_dtensor_mesh(self):
# devices = [f"cpu:{i}" for i in range(8)]
# shape = (4, 2)
# axis_names = ["batch", "model"]
# mesh = distribution_lib.DeviceMesh(shape, axis_names, devices)
# dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh)
# self.assertIsInstance(dtensor_mesh, dtensor.Mesh)
# self.assertEqual(dtensor_mesh.shape(), list(shape))
# self.assertEqual(dtensor_mesh.dim_names, axis_names)
# def test_to_dtensor_layout(self):
# axes = ["data", None]
# mesh = distribution_lib.DeviceMesh(
# (4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)]
# )
# layout = distribution_lib.TensorLayout(axes, mesh)
# dtensor_layout = backend_dlib._to_dtensor_layout(layout)
# dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh)
# self.assertEqual(
# dtensor_layout,
# dtensor.Layout(["data", dtensor.UNSHARDED], dtensor_mesh),
# )
# def test_validation_for_device_mesh(self):
# axes = ["data", None]
# layout = distribution_lib.TensorLayout(axes, device_mesh=None)
# with self.assertRaisesRegex(
# ValueError, "Cannot create sharding when device mesh is not set"
# ):
# backend_dlib._to_dtensor_layout(layout)
|
LayoutMapTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/events.py
|
{
"start": 65145,
"end": 68512
}
|
class ____(Response):
"""
Response of events.get_task_events endpoint.
:param events: Events list
:type events: Sequence[dict]
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_task_events"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"events": {
"description": "Events list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
events: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskEventsResponse, self).__init__(**kwargs)
self.events = events
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("events")
def events(self) -> Optional[List[dict]]:
return self._property_events
@events.setter
def events(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (list, tuple))
self.assert_isinstance(value, "events", (dict,), is_array=True)
self._property_events = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
|
GetTaskEventsResponse
|
python
|
cython__cython
|
tests/run/pep557_dataclasses.py
|
{
"start": 108,
"end": 540
}
|
class ____:
"""
>>> list(Color.__dataclass_fields__.keys())
['red', 'green', 'blue', 'alpha']
>>> Color(1, 2, 3)
Color(red=1, green=2, blue=3, alpha=255)
>>> Color(1, 2, 3, 4)
Color(red=1, green=2, blue=3, alpha=4)
>>> Color(green=1, blue=2, red=3, alpha=40)
Color(red=3, green=1, blue=2, alpha=40)
"""
red: int
green: int
blue: int
alpha: int = 255
@dataclasses.dataclass
|
Color
|
python
|
jina-ai__jina
|
tests/unit/jaml/parsers/executors/test_legacy.py
|
{
"start": 419,
"end": 542
}
|
class ____(A, B, C):
def __init__(self, d, *args, **kwargs):
super.__init__(*args, **kwargs)
self.d = d
|
D
|
python
|
getsentry__sentry
|
src/sentry/preprod/analytics.py
|
{
"start": 122,
"end": 309
}
|
class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
@analytics.eventclass("preprod_artifact.api.update")
|
PreprodArtifactApiAssembleEvent
|
python
|
ray-project__ray
|
release/ray_release/exception.py
|
{
"start": 1268,
"end": 1361
}
|
class ____(ReleaseTestPackageError):
exit_code = ExitCode.SETUP_ERROR
|
ReleaseTestSetupError
|
python
|
encode__django-rest-framework
|
tests/test_generics.py
|
{
"start": 1771,
"end": 1979
}
|
class ____(InstanceView):
"""
A model with a slug-field.
"""
queryset = SlugBasedModel.objects.all()
serializer_class = SlugSerializer
lookup_field = 'slug'
# Tests
|
SlugBasedInstanceView
|
python
|
realpython__materials
|
inheritance-and-composition/inheritance/productivity.py
|
{
"start": 623,
"end": 727
}
|
class ____:
def work(self, hours):
return f"manufactures gadgets for {hours} hours."
|
FactoryRole
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 22369,
"end": 22722
}
|
class ____(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ("x",)
|
W23
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/run_tests/test_schema_only.py
|
{
"start": 270,
"end": 3067
}
|
class ____:
"""Test the Run schema model."""
def test_run_creation_minimal(self):
"""Test creating run with minimal required fields."""
run = DgApiRun(
id="test-run-123",
status=DgApiRunStatus.QUEUED,
created_at=1705311000.0, # 2024-01-15T10:30:00Z
)
assert run.id == "test-run-123"
assert run.status == DgApiRunStatus.QUEUED
assert run.created_at == 1705311000.0
assert run.started_at is None
assert run.ended_at is None
assert run.job_name is None
def test_run_creation_complete(self):
"""Test creating run with all fields."""
run = DgApiRun(
id="complete-run-456",
status=DgApiRunStatus.SUCCESS,
created_at=1705311000.0, # 2024-01-15T10:30:00Z
started_at=1705311060.0, # 2024-01-15T10:31:00Z
ended_at=1705311900.0, # 2024-01-15T10:45:00Z
job_name="my_pipeline",
)
assert run.id == "complete-run-456"
assert run.status == DgApiRunStatus.SUCCESS
assert run.created_at == 1705311000.0
assert run.started_at == 1705311060.0
assert run.ended_at == 1705311900.0
assert run.job_name == "my_pipeline"
def test_run_json_serialization(self):
"""Test that Run can be serialized to JSON."""
run = DgApiRun(
id="json-test-789",
status=DgApiRunStatus.FAILURE,
created_at=1705311000.0, # 2024-01-15T10:30:00Z
started_at=1705311060.0, # 2024-01-15T10:31:00Z
ended_at=1705311180.0, # 2024-01-15T10:33:00Z
job_name="failing_pipeline",
)
json_str = run.model_dump_json()
parsed = json.loads(json_str)
assert parsed["id"] == "json-test-789"
assert parsed["status"] == "FAILURE"
assert parsed["created_at"] == 1705311000.0
assert parsed["started_at"] == 1705311060.0
assert parsed["ended_at"] == 1705311180.0
assert parsed["job_name"] == "failing_pipeline"
def test_run_json_deserialization(self):
"""Test that Run can be created from JSON."""
json_data = {
"id": "from-json-abc",
"status": "STARTED",
"created_at": 1705311000.0, # 2024-01-15T10:30:00Z
"started_at": 1705311060.0, # 2024-01-15T10:31:00Z
"ended_at": None,
"job_name": "json_pipeline",
}
run = DgApiRun(**json_data)
assert run.id == "from-json-abc"
assert run.status == DgApiRunStatus.STARTED
assert run.created_at == 1705311000.0
assert run.started_at == 1705311060.0
assert run.ended_at is None
assert run.job_name == "json_pipeline"
|
TestRunSchema
|
python
|
astropy__astropy
|
astropy/coordinates/angles/errors.py
|
{
"start": 2247,
"end": 2818
}
|
class ____(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'minute' was found to be '{self.minute}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
|
IllegalMinuteWarning
|
python
|
Pylons__pyramid
|
src/pyramid/predicates.py
|
{
"start": 7048,
"end": 7558
}
|
class ____:
def __init__(self, val, config):
if is_nonstr_iter(val):
self.val = tuple(val)
else:
val = tuple(filter(None, val.split('/')))
self.val = ('',) + val
def text(self):
return f'physical_path = {self.val}'
phash = text
def __call__(self, context, request):
if getattr(context, '__name__', _marker) is not _marker:
return resource_path_tuple(context) == self.val
return False
|
PhysicalPathPredicate
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py
|
{
"start": 1751,
"end": 3262
}
|
class ____(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
if isinstance(init_from, _NodeState):
self.value = {
s: set(other_infos) for s, other_infos in init_from.value.items()
}
elif isinstance(init_from, dict):
self.value = {s: set((init_from[s],)) for s in init_from}
else:
assert False, init_from
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self)
for s, other_infos in other.value.items():
if s in result.value:
result.value[s].update(other_infos)
else:
result.value[s] = set(other_infos)
return result
def __sub__(self, other):
assert isinstance(other, set)
result = _NodeState(self)
for s in other:
result.value.pop(s, None)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
|
_NodeState
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/sentry_app_installation_token_created.py
|
{
"start": 94,
"end": 306
}
|
class ____(analytics.Event):
user_id: int
organization_id: int
sentry_app_installation_id: int
sentry_app: str
analytics.register(SentryAppInstallationTokenCreated)
|
SentryAppInstallationTokenCreated
|
python
|
joke2k__faker
|
faker/providers/person/ar_PS/__init__.py
|
{
"start": 55,
"end": 884
}
|
class ____(ArabicPersonProvider):
last_names = (
"أبو اسنينة",
"أبو شقدم",
"أبو شلبك",
"أبو غليون",
"أبو قمر",
"أستيتية",
"الأدغم",
"الإغباري",
"البرغوثي",
"التركمان",
"التميمي",
"الجنيدي",
"الحسيني",
"الحنبلي",
"الخازن",
"الخماش",
"الخياط",
"الزيتاوي",
"الزيدانية",
"السكاكيني",
"الصالحي",
"النشاشيبي",
"النعنيش",
"بدر",
"ترابين",
"جرار",
"جزار",
"حمامي",
"حوسة",
"خوري",
"دغمش",
"دلاشة",
"شاهين",
"صليبا",
"طوقان",
"فطاير",
"قرادة",
"كسواني",
"مرمش",
"مهيار",
"نسيبة",
"هاشم",
)
|
Provider
|
python
|
pypa__pip
|
tests/unit/test_options.py
|
{
"start": 7213,
"end": 8507
}
|
class ____(AddFakeCommandMixin):
def test_general_option_after_subcommand(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["fake", "--timeout", "-1"])
)
assert options.timeout == -1
def test_option_after_subcommand_arg(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["fake", "arg", "--timeout", "-1"])
)
assert options.timeout == -1
def test_additive_before_after_subcommand(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["-v", "fake", "-v"]))
assert options.verbose == 2
def test_subcommand_option_before_subcommand_fails(self) -> None:
with pytest.raises(SystemExit):
main(["--find-links", "F1", "fake"])
@contextmanager
def tmpconfig(option: str, value: Any, section: str = "global") -> Iterator[str]:
with NamedTemporaryFile(mode="w", delete=False) as f:
f.write(f"[{section}]\n{option}={value}\n")
name = f.name
try:
yield name
finally:
os.unlink(name)
|
TestOptionsInterspersed
|
python
|
ray-project__ray
|
python/ray/experimental/raysort/types.py
|
{
"start": 145,
"end": 303
}
|
class ____(NamedTuple):
part_id: PartId
node: NodeAddress
path: Path
def __repr__(self):
return f"Part({self.node}:{self.path})"
|
PartInfo
|
python
|
pytorch__pytorch
|
torch/_jit_internal.py
|
{
"start": 27404,
"end": 51307
}
|
class ____(contextlib.AbstractContextManager):
def __init__(self, **kwargs):
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
pass
def ignore(drop=False, **kwargs):
"""
This decorator indicates to the compiler that a function or method should
be ignored and left as a Python function. This allows you to leave code in
your model that is not yet TorchScript compatible. If called from TorchScript,
ignored functions will dispatch the call to the Python interpreter. Models with ignored
functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
Example (using ``@torch.jit.ignore`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore
def debugger(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
x += 10
# The compiler would normally try to compile `debugger`,
# but since it is `@ignore`d, it will be left as a call
# to Python
self.debugger(x)
return x
m = torch.jit.script(MyModule())
# Error! The call `debugger` cannot be saved since it calls into Python
m.save("m.pt")
Example (using ``@torch.jit.ignore(drop=True)`` on a method):
.. testcode::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore(drop=True)
def training_method(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
if self.training:
self.training_method(x)
return x
m = torch.jit.script(MyModule())
# This is OK since `training_method` is not saved, the call is replaced
# with a `raise`.
m.save("m.pt")
.. testcleanup::
import os
os.remove('m.pt')
"""
if callable(drop):
# used without any args, so drop is actually a function
# @torch.jit.ignore
# def fn(...):
fn = drop
# pyrefly: ignore [missing-attribute]
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
if not isinstance(drop, bool):
raise RuntimeError(
f"Argument to @torch.jit.ignore must be a bool or a function but got {drop}"
)
# for backwards compat
drop_on_export = kwargs.pop("drop_on_export", None)
if drop_on_export:
warnings.warn(
"ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}",
stacklevel=2,
category=FutureWarning,
)
drop = drop_on_export
elif drop:
warnings.warn(
"ignore(True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}",
stacklevel=2,
category=FutureWarning,
)
def decorator(fn):
if drop:
fn._torchscript_modifier = FunctionModifiers.UNUSED
else:
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
return decorator
def _drop(fn: Callable[_P, _R]) -> Callable[_P, _R]:
fn._torchscript_modifier = FunctionModifiers._DROP # type: ignore[attr-defined]
return fn
def _copy_to_script_wrapper(fn: Callable[_P, _R]) -> Callable[_P, _R]:
fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER # type: ignore[attr-defined]
return fn
def module_has_exports(mod):
for name in dir(mod):
if hasattr(mod, name):
item = getattr(mod, name)
if callable(item):
if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
return True
return False
# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
# allow JIT'd code to still be covered.
def should_drop(fn) -> bool:
attr = get_torchscript_modifier(fn)
if attr is None:
return False
return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
def is_ignored_fn(fn) -> bool:
mod = get_torchscript_modifier(fn)
return (
mod is FunctionModifiers.UNUSED
or mod is FunctionModifiers.IGNORE
or mod is FunctionModifiers._DROP
)
def _is_drop_fn(fn) -> bool:
mod = get_torchscript_modifier(fn)
return mod is FunctionModifiers._DROP
def is_static_fn(cls, fn) -> bool:
return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
def get_static_fn(cls, fn):
return inspect.getattr_static(cls, fn).__func__
def get_torchscript_modifier(fn):
if not callable(fn):
return None
if hasattr(fn, "__func__"):
fn = fn.__func__
return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
def copy_torchscript_modifier(orig, new) -> None:
attr = get_torchscript_modifier(orig)
if attr is None:
return
new._torchscript_modifier = attr
# overloading registration
# overloads get registered in this file, and compiled in torch/jit/__init__.py
# so that they can be imported in nn/functional.py without an import cycle
# qualified_name => list[overload_functions]
_overloaded_fns: dict[str, list[Callable]] = {} # noqa: T484
_OVERLOAD_EXAMPLE = """
Example usage of overload function:
@torch.jit._overload
def my_function(x: type0) -> type0: # decl 1
pass
@torch.jit._overload
def my_function(x: type1) -> type1: # decl 2
pass
def my_function(x): # implementation
if isinstance(x, type0):
return x
elif isinstance(x, type1):
return x
"""
def get_overload_no_implementation_error_message(kind, obj):
sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
return (
f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
f"sure a definition is provided and defined after all overload declarations.\n"
f'File "{filename}", line {file_lineno}:\n'
+ "".join(sourcelines)
+ "\n"
+ _OVERLOAD_EXAMPLE
)
def _check_overload_body(func):
try:
parsed_def = parse_def(func)
except OSError:
# Parsing the function definition can raise an OSError if source is unavailable.
# Since this is just an initial check, just raise a warning if this is the case.
warnings.warn(
f"Unable to retrieve source for @torch.jit._overload function: {func}.",
stacklevel=2,
)
return
body = parsed_def.ast.body[0].body
def is_pass(x):
return isinstance(x, ast.Pass)
def is_ellipsis(x):
return (
isinstance(x, ast.Expr)
and isinstance(x.value, ast.Constant)
and x.value.value is Ellipsis
)
if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
msg = (
"Only `pass` statement or `...` can be the body of overload declaration:\n"
)
msg += "\n".join(parsed_def.source.split("\n")[:3])
msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
raise RuntimeError(msg)
def _overload(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_fns
fn_overload_list = _overloaded_fns.get(qual_name)
if fn_overload_list is None:
fn_overload_list = []
_overloaded_fns[qual_name] = fn_overload_list
fn_overload_list.append(func)
return func
def _get_fn_overloads(qual_name):
return _overloaded_fns.get(qual_name)
def _clear_fn_overloads(qual_name) -> None:
del _overloaded_fns[qual_name]
def get_class_name_lineno(method) -> tuple[str, int]:
current_frame = inspect.currentframe()
# one for the get_class_name call, one for _overload_method call
for _ in range(2):
assert (
current_frame is not None
) # assert current frame is not an Optional[FrameType]
current_frame = current_frame.f_back
assert current_frame is not None # same here
class_name = current_frame.f_code.co_name
line_no = current_frame.f_code.co_firstlineno
return class_name, line_no
# At the point the decorator is applied to class methods the method
# has no reference to its owning class. _qualified_name would not include
# the class it is defined in, so any methods with the same name in the same file
# would have the same _qualified_name, even if they were defined in different
# classes. This problem only exists in python 2.
# We get around this problem by looking at the stack frame and identifying
# the class name, and throwing an error whenever overloads are used
# when modules of the same name are in the same file
# qualified_name => class name => list[overload_functions]
_overloaded_methods: dict[str, dict[str, list[Callable]]] = {} # noqa: T484
# (qualified_name, class name) => class_fileno
_overloaded_method_class_fileno: dict[tuple[str, str], int] = {}
def _overload_method(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_methods
class_name_map = _overloaded_methods.get(qual_name)
if class_name_map is None:
class_name_map = {}
_overloaded_methods[qual_name] = class_name_map
class_name, line_no = get_class_name_lineno(func)
method_overloads = class_name_map.get(class_name)
if method_overloads is None:
method_overloads = []
class_name_map[class_name] = method_overloads
_overloaded_method_class_fileno[(qual_name, class_name)] = line_no
else:
existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
if existing_lineno != line_no:
raise RuntimeError(
"Cannot currently overload the same method name in two different"
" classes with the same name in the same module"
)
method_overloads.append(func)
return func
def _get_overloaded_methods(method, mod_class):
# TODO: __name__ not set for submodules in recursive script
if not hasattr(method, "__name__"):
return None
qual_name = _qualified_name(method)
class_name_map = _overloaded_methods.get(qual_name)
if class_name_map is None:
return None
overloads = class_name_map.get(mod_class.__name__, None)
if overloads is None:
return None
method_line_no = get_source_lines_and_file(method)[1]
mod_class_fileno = get_source_lines_and_file(mod_class)[1]
mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
raise AssertionError(
"Overloads are not usable when a module is redeclared within the same file: "
+ str(method)
)
return overloads
def is_tuple(ann) -> bool:
# Check for typing.Tuple missing args (but `tuple` is fine)
if ann is typing.Tuple: # noqa: UP006
raise_error_container_parameter_missing("Tuple")
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
if not hasattr(ann, "__module__"):
return False
ann_origin = get_origin(ann)
return ann.__module__ in ("builtins", "typing") and ann_origin is tuple
def is_list(ann) -> bool:
# Check for typing.List missing args (but `list` is fine)
if ann is typing.List: # noqa: UP006
raise_error_container_parameter_missing("List")
if not hasattr(ann, "__module__"):
return False
ann_origin = get_origin(ann)
return ann.__module__ in ("builtins", "typing") and ann_origin is list
def is_dict(ann) -> bool:
# Check for typing.Dict missing args (but `dict` is fine)
if ann is typing.Dict: # noqa: UP006
raise_error_container_parameter_missing("Dict")
if not hasattr(ann, "__module__"):
return False
ann_origin = get_origin(ann)
return ann.__module__ in ("builtins", "typing") and ann_origin is dict
def is_union(ann):
if ann is Union:
raise_error_container_parameter_missing("Union")
return isinstance(ann, BuiltinUnionType) or (
hasattr(ann, "__module__")
and ann.__module__ == "typing"
and (get_origin(ann) is Union)
)
def is_optional(ann):
if ann is Optional:
raise_error_container_parameter_missing("Optional")
def is_optional_as_optional(ann):
return (
hasattr(ann, "__module__")
and ann.__module__ == "typing"
and (get_origin(ann) is Optional)
)
def is_union_as_optional(ann):
ann_args = get_args(ann)
return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
def is_future(ann) -> bool:
if ann is Future:
raise RuntimeError(
"Attempted to use Future without a "
"contained type. Please add a contained type, e.g. "
"Future[int]"
)
return get_origin(ann) is Future
def is_await(ann) -> bool:
if ann is _Await:
return True
return get_origin(ann) is _Await
if torch.distributed.rpc.is_available():
from torch._C._distributed_rpc import PyRRef
from torch.distributed.rpc import RRef
def is_rref(ann) -> bool:
if ann is RRef:
raise RuntimeError(
"Attempted to use RRef without a "
"contained type. Please add a contained type, e.g. "
"RRef[int]"
)
return get_origin(ann) is RRef
def is_rref_instance(obj) -> bool:
return isinstance(obj, PyRRef)
else:
def is_rref_instance(obj) -> bool:
# If the RPC module doesn't exist then RRefs don't exist either.
return False
def _try_get_dispatched_fn(fn):
if not callable(fn):
return None
return boolean_dispatched.get(fn)
def _get_named_tuple_properties(
obj,
loc: Optional[torch._C._jit_tree_views.SourceRange] = None,
rcb=None,
):
if loc is None:
loc = fake_range()
assert issubclass(obj, tuple) and hasattr(obj, "_fields")
if hasattr(obj, "_field_defaults"):
defaults = [
obj._field_defaults[field]
for field in obj._fields
if field in obj._field_defaults
]
else:
defaults = []
obj_annotations = inspect.get_annotations(obj)
if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
obj_annotations = inspect.get_annotations(
# pyrefly: ignore [bad-argument-type]
obj.__base__
)
annotations = []
for field in obj._fields:
if field in obj_annotations:
field_type = obj_annotations[field]
# [Note: ForwardRef annotations in NamedTuple attributes]
# NamedTuple types are slightly different from normal types.
#
# Normally, annotations are evaluated like this (during jit.script):
# 1. Load strings of python code into c++ and parse.
# 2. Get annotations as strings
# 3. Use the PythonResolver's resolution callback (rcb) to convert
# the string into a python object
# 4. We call into annotations.py:ann_to_type to convert python obj
# from step 3 into a type that torchscript understands.
#
# NamedTuples are more complicated, because it has sub-types.
# Normally, once we have the NamedTuple type object from #3,
# we can just look at the annotation literal values and use
# ann_to_type directly on them.
#
# But sometimes, users will annotate with string literals, e.g.
# x: 'int'
# This also happens with PEP563 (from __forward__ import annotations)
#
# These annotations appear in the annotation dict as ForwardRef('int').
#
# Then, we need to convert the string into a python object. This
# requires having local context for custom objects or imported types.
# rcb() is what gives us this. So, we plumb rcb through the stack so
# it can be used in this context for the if block below.
#
# FAQ:
# - Why do we need this special handling for NamedTuple but string
# annotations work fine for normal types? Normally, we parse the
# string directly and then call rcb() directly from C++.
# - Why not use ForwardRef._evaluate? For that, we need globals()
# and locals() for the local context where the NamedTuple was defined.
# rcb is what lets us look up into these. So, basically rcb does the
# hard work for us.
if isinstance(field_type, ForwardRef) and rcb is not None:
rcb_type = rcb(field_type.__forward_arg__)
# rcb returns None if it can't find anything.
if rcb_type is None:
raise ValueError(
f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}."
f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858."
f" Issue occurred at {loc.highlight()}"
)
field_type = rcb_type
the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb)
annotations.append(the_type)
else:
annotations.append(torch._C.TensorType.getInferred())
return type(obj).__name__, obj._fields, annotations, defaults
def _create_named_tuple(
t,
unqual_name: str,
field_names: list[str],
defaults: tuple[Any, ...],
):
TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
return TupleType(*t)
@contextlib.contextmanager
def _disable_emit_hooks():
hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
try:
yield
finally:
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
# noqa: F841
def __enter__(self) -> None:
self.hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
def __exit__(self, *args) -> None:
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
def _is_exception(obj) -> bool:
if not inspect.isclass(obj):
return False
return issubclass(obj, Exception)
def raise_error_container_parameter_missing(target_type) -> None:
if target_type.endswith("ict"):
raise RuntimeError(
f"Attempted to use {target_type} without "
"contained types. Please add contained type, e.g. "
f"{target_type}[int, int]"
)
raise RuntimeError(
f"Attempted to use {target_type} without a "
"contained type. Please add a contained type, e.g. "
f"{target_type}[int]"
)
_RAW_TYPE_NAME_MAPPING = {
dict: "dict",
list: "list",
tuple: "tuple",
typing.Dict: "Dict", # noqa: UP006
typing.List: "List", # noqa: UP006
typing.Optional: "Optional",
typing.Tuple: "Tuple", # noqa: UP006
}
def check_args_exist(target_type) -> None:
if name := _RAW_TYPE_NAME_MAPPING.get(target_type):
raise_error_container_parameter_missing(name)
def check_empty_containers(obj) -> None:
if obj == [] or obj == {} or obj == ():
warnings.warn(
"The inner type of a container is lost when "
"calling torch.jit.isinstance in eager mode. For "
"example, List[int] would become list and "
"therefore falsely return True for List[float] or"
" List[str].",
stacklevel=2,
)
# supports List/Dict/Tuple and Optional types
# TODO support future
def container_checker(obj, target_type) -> bool:
origin_type = get_origin(target_type)
check_args_exist(target_type)
if origin_type is None:
return False
elif origin_type is list or origin_type is typing.List: # noqa: UP006
check_empty_containers(obj)
if not isinstance(obj, list):
return False
arg_type = get_args(target_type)[0]
arg_origin = get_origin(arg_type)
for el in obj:
# check if nested container, ex: List[List[str]]
if arg_origin: # processes nested container, ex: List[List[str]]
if not container_checker(el, arg_type):
return False
elif not isinstance(el, arg_type):
return False
return True
elif origin_type is typing.Dict or origin_type is dict: # noqa: UP006
check_empty_containers(obj)
if not isinstance(obj, dict):
return False
key_type = get_args(target_type)[0]
val_type = get_args(target_type)[1]
for key, val in obj.items():
# check if keys are of right type
if not isinstance(key, key_type):
return False
val_origin = get_origin(val_type)
if val_origin:
if not container_checker(val, val_type):
return False
elif not isinstance(val, val_type):
return False
return True
elif origin_type is typing.Tuple or origin_type is tuple: # noqa: UP006
check_empty_containers(obj)
if not isinstance(obj, tuple):
return False
arg_types = get_args(target_type)
if len(obj) != len(arg_types):
return False
for el, el_type in zip(obj, arg_types):
el_origin = get_origin(el_type)
if el_origin:
if not container_checker(el, el_type):
return False
elif not isinstance(el, el_type):
return False
return True
elif origin_type is Union or issubclass(
# pyrefly: ignore [bad-argument-type]
origin_type,
BuiltinUnionType,
): # also handles Optional
if obj is None: # check before recursion because None is always fine
return True
inner_types = get_args(target_type)
for t in inner_types:
t_origin = get_origin(t)
if t_origin:
return container_checker(obj, t)
elif isinstance(obj, t):
return True
return False
def _isinstance(obj, target_type) -> bool:
if isinstance(target_type, collections.abc.Container):
if not isinstance(target_type, tuple):
raise RuntimeError(
"The second argument to "
"`torch.jit.isinstance` must be a type "
"or a tuple of types"
)
for t_type in target_type:
if _isinstance(obj, t_type):
return True
return False
origin_type = get_origin(target_type)
if origin_type:
return container_checker(obj, target_type)
# Check to handle non-typed optional origin returns as none instead
# of as optional in 3.7-3.8
check_args_exist(target_type)
# handle non-containers
return isinstance(obj, target_type)
|
_IgnoreContextManager
|
python
|
getsentry__sentry
|
src/sentry/models/releaseprojectenvironment.py
|
{
"start": 453,
"end": 590
}
|
class ____(str, Enum):
ADOPTED = "adopted"
LOW_ADOPTION = "low_adoption"
REPLACED = "replaced"
@region_silo_model
|
ReleaseStages
|
python
|
modin-project__modin
|
modin/config/envvars.py
|
{
"start": 41283,
"end": 41492
}
|
class ____(EnvironmentVariable, type=int):
"""Maximum number of rows which can be processed using local, native, pandas."""
varname = "MODIN_NATIVE_MAX_ROWS"
default = 10_000_000
|
NativePandasMaxRows
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP042.py
|
{
"start": 50,
"end": 76
}
|
class ____(Enum, str): ...
|
B
|
python
|
doocs__leetcode
|
solution/3200-3299/3270.Find the Key of the Numbers/Solution.py
|
{
"start": 0,
"end": 264
}
|
class ____:
def generateKey(self, num1: int, num2: int, num3: int) -> int:
ans, k = 0, 1
for _ in range(4):
x = min(num1 // k % 10, num2 // k % 10, num3 // k % 10)
ans += x * k
k *= 10
return ans
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/2029. Stone Game IX/2029.py
|
{
"start": 0,
"end": 238
}
|
class ____:
def stoneGameIX(self, stones: list[int]) -> bool:
count = collections.Counter(stone % 3 for stone in stones)
if count[0] % 2 == 0:
return min(count[1], count[2]) > 0
return abs(count[1] - count[2]) > 2
|
Solution
|
python
|
django__django
|
tests/foreign_object/models/article.py
|
{
"start": 1779,
"end": 2475
}
|
class ____(models.Model):
active_translation = ActiveTranslationField(
"ArticleTranslation",
from_fields=["id"],
to_fields=["article"],
related_name="+",
on_delete=models.CASCADE,
null=True,
)
active_translation_q = ActiveTranslationFieldWithQ(
"ArticleTranslation",
from_fields=["id"],
to_fields=["article"],
related_name="+",
on_delete=models.CASCADE,
null=True,
)
pub_date = models.DateField()
def __str__(self):
try:
return self.active_translation.title
except ArticleTranslation.DoesNotExist:
return "[No translation found]"
|
Article
|
python
|
google__jax
|
jax/_src/pallas/fuser/block_spec.py
|
{
"start": 4524,
"end": 5490
}
|
class ____:
scalar_prefetch: Any | None
program_ids: tuple[int | jax.Array, ...] | None
avals_in: tuple[core.AbstractValue, ...] | None
avals_out: tuple[core.AbstractValue, ...] | None
in_block_specs: tuple[pallas_core.BlockSpec, ...]
out_block_specs: tuple[pallas_core.BlockSpec, ...]
grid: tuple[int | jax.Array, ...] | None
scalar_prefetch_handler: Any | None
out_usages: tuple[set[Usage], ...] | None
def get_program_ids(self):
if self.program_ids is None:
raise ValueError('Program ids not available.')
return self.program_ids
def get_in_block_indices(self):
with _sp_context(*self.scalar_prefetch):
return tuple(
bs.index_map(*self.program_ids) for bs in self.in_block_specs
)
def get_out_block_indices(self):
with _sp_context(*self.scalar_prefetch):
return tuple(
bs.index_map(*self.program_ids) for bs in self.out_block_specs
)
_illegal = object()
|
KernelEvalContext
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_fsspec.py
|
{
"start": 6239,
"end": 7071
}
|
class ____(TestCase):
@with_temp_dir
def test_remove_on_fail(self):
fs = FileSystem()
path = fs.init_path(self.temp_dir)
write_file = fs.concat_path(path, "writeable")
with self.assertRaises(OSError):
with fs.create_stream(write_file, "w") as s:
s.write("aaa")
raise OSError("fail")
self.assertFalse(fs.exists(write_file))
read_file = fs.concat_path(path, "readable")
with fs.create_stream(read_file, "w") as s:
s.write("bbb")
self.assertTrue(fs.exists(read_file))
with self.assertRaises(OSError):
with fs.create_stream(read_file, "r") as s:
raise OSError("fail")
self.assertTrue(fs.exists(read_file))
if __name__ == "__main__":
run_tests()
|
TestFileSystem
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/schemas/events.py
|
{
"start": 6671,
"end": 8720
}
|
class ____(Event):
"""The server-side view of an event that has happened to a Resource after it has
been received by the server"""
model_config: ClassVar[ConfigDict] = ConfigDict(
extra="ignore", from_attributes=True
)
received: prefect.types._datetime.DateTime = Field(
default_factory=lambda: prefect.types._datetime.now("UTC"),
description="When the event was received by Prefect Cloud",
)
def as_database_row(self) -> dict[str, Any]:
row = self.model_dump()
row["resource_id"] = self.resource.id
row["recorded"] = prefect.types._datetime.now("UTC")
row["related_resource_ids"] = [related.id for related in self.related]
return row
def as_database_resource_rows(self) -> List[Dict[str, Any]]:
def without_id_and_role(resource: Resource) -> Dict[str, str]:
d: Dict[str, str] = resource.root.copy()
d.pop("prefect.resource.id", None)
d.pop("prefect.resource.role", None)
return d
return [
{
"occurred": self.occurred,
"resource_id": resource.id,
"resource_role": (
resource.role if isinstance(resource, RelatedResource) else ""
),
"resource": without_id_and_role(resource),
"event_id": self.id,
}
for resource in [self.resource, *self.related]
]
def matches(expected: str, value: Optional[str]) -> bool:
"""Returns true if the given value matches the expected string, which may
include a a negation prefix ("!this-value") or a wildcard suffix
("any-value-starting-with*")"""
if value is None:
return False
positive = True
if expected.startswith("!"):
expected = expected[1:]
positive = False
if expected.endswith("*"):
match = value.startswith(expected[:-1])
else:
match = value == expected
return match if positive else not match
|
ReceivedEvent
|
python
|
langchain-ai__langchain
|
libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py
|
{
"start": 6642,
"end": 9397
}
|
class ____(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake async callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@override
async def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
@override
async def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_start_common()
@override
async def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_new_token_common()
@override
async def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_end_common()
@override
async def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_error_common()
@override
async def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_start_common()
@override
async def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_end_common()
@override
async def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_error_common()
@override
async def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_start_common()
@override
async def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_end_common()
@override
async def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_error_common()
@override
async def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_action_common()
@override
async def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_finish_common()
@override
async def on_text(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_text_common()
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore[override]
return self
|
FakeAsyncCallbackHandler
|
python
|
django-haystack__django-haystack
|
test_haystack/discovery/search_indexes.py
|
{
"start": 83,
"end": 249
}
|
class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="body")
def get_model(self):
return Foo
|
FooIndex
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-k-beauty-of-a-number.py
|
{
"start": 52,
"end": 512
}
|
class ____(object):
def divisorSubstrings(self, num, k):
"""
:type num: int
:type k: int
:rtype: int
"""
result = curr = 0
s = map(int, str(num))
base = 10**(k-1)
for i, x in enumerate(s):
if i-k >= 0:
curr -= s[i-k]*base
curr = curr*10+x
if i+1 >= k:
result += int(curr and num%curr == 0)
return result
|
Solution
|
python
|
plotly__plotly.py
|
plotly/graph_objs/sunburst/_legendgrouptitle.py
|
{
"start": 233,
"end": 2946
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst"
_path_str = "sunburst.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.sunburst.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Legendgrouptitle
|
python
|
walkccc__LeetCode
|
solutions/1656. Design an Ordered Stream/1656.py
|
{
"start": 0,
"end": 412
}
|
class ____:
def __init__(self, n: int):
self.values = [''] * n
self.i = 0 # self.values' index (0-indexed)
def insert(self, idKey: int, value: str) -> list[str]:
idKey -= 1 # Converts to 0-indexed.
self.values[idKey] = value
if idKey > self.i:
return []
while self.i < len(self.values) and self.values[self.i]:
self.i += 1
return self.values[idKey:self.i]
|
OrderedStream
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/data_loss_prevention.py
|
{
"start": 3993,
"end": 4269
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Data Loss Prevention link."""
name = "Cloud DLP Inspect Templates List"
key = "cloud_dlp_inspect_templates_list_key"
format_str = DLP_INSPECT_TEMPLATES_LIST_LINK
|
CloudDLPInspectTemplatesListLink
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/rest_framework/groupsearchview.py
|
{
"start": 610,
"end": 1928
}
|
class ____(serializers.Serializer):
id = serializers.CharField(required=False)
name = serializers.CharField(required=True)
query = serializers.CharField(required=True, allow_blank=True)
querySort = serializers.ChoiceField(
required=False, choices=SortOptions.as_choices(), default=SortOptions.DATE
)
projects = serializers.ListField(required=True, allow_empty=True)
environments = serializers.ListField(required=True, allow_empty=True)
timeFilters = serializers.DictField(required=True, allow_empty=False)
def validate_projects(self, value):
if value != [-1]:
project_ids = set(value)
existing_project_ids = set(
Project.objects.filter(
id__in=project_ids,
organization=self.context["organization"],
).values_list("id", flat=True)
)
if project_ids != existing_project_ids:
raise ValidationError(detail="One or more projects do not exist")
return value
def validate(self, data) -> GroupSearchViewValidatorResponse:
if data["projects"] == [-1]:
data["projects"] = []
data["isAllProjects"] = True
else:
data["isAllProjects"] = False
return data
|
ViewValidator
|
python
|
pytorch__pytorch
|
torch/ao/quantization/pt2e/_affine_quantization.py
|
{
"start": 29917,
"end": 33819
}
|
class ____(AffineQuantizedObserverBase):
def __init__(
self,
mapping_type: MappingType,
target_dtype: torch.dtype,
granularity: Granularity,
averaging_constant=0.01,
quant_min: int | None = None,
quant_max: int | None = None,
eps: float | None = None,
is_dynamic=False,
scale_dtype: torch.dtype | None = None,
zero_point_dtype: torch.dtype | None = None,
preserve_zero: bool = True,
zero_point_domain: ZeroPointDomain | None = ZeroPointDomain.INT,
# there could be some extra args that's ignored
**kwargs,
):
self.is_dynamic = is_dynamic
self.averaging_constant = averaging_constant
if is_dynamic and self.averaging_constant != 1:
raise NotImplementedError(
"MovingAverageMinMaxObserver doesn't support dynamic quantization for "
f"averaging constant of {self.averaging_constant}"
)
super().__init__(
mapping_type=mapping_type,
target_dtype=target_dtype,
granularity=granularity,
quant_min=quant_min,
quant_max=quant_max,
eps=eps,
scale_dtype=scale_dtype,
zero_point_dtype=zero_point_dtype,
preserve_zero=preserve_zero,
zero_point_domain=zero_point_domain,
)
def forward(self, input: torch.Tensor):
if input.numel() == 0:
return input
input_detached = input.detach()
self.original_dtype = input_detached.dtype
if self.granularity is None:
raise AssertionError("granularity is None")
self.block_size = get_block_size(input_detached.shape, self.granularity)
shape_for_reduction, reduction_dims = _get_reduction_params(
self.block_size, input_detached.size()
)
input_detached = input_detached.view(shape_for_reduction)
min_val = torch.amin(input_detached, dim=reduction_dims, keepdim=False)
max_val = torch.amax(input_detached, dim=reduction_dims, keepdim=False)
if not hasattr(self, "min_val") or not hasattr(self, "max_val"):
self.min_val = min_val
self.max_val = max_val
else:
if self.min_val.shape != min_val.shape:
raise AssertionError(
f"Can't update existing min_val - shape mismatch, self.min_val:{self.min_val.shape} != min_val:{min_val.shape}"
)
if self.max_val.shape != max_val.shape:
raise AssertionError(
f"Can't update existing max_val - shape mismatch, self.max_val {self.max_val.shape} != max_val:{max_val.shape}"
)
min_val = self.min_val + self.averaging_constant * (min_val - self.min_val)
max_val = self.max_val + self.averaging_constant * (max_val - self.max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
# returning original input
return input
def calculate_qparams(self) -> tuple[torch.Tensor, torch.Tensor]:
if not (hasattr(self, "min_val") and hasattr(self, "max_val")):
raise AssertionError(
"Expecting the observer has min_val and max_val, please run the observer before calling calculate_qparams"
)
return choose_qparams_affine_with_min_max(
self.min_val,
self.max_val,
self.mapping_type,
[], # BlockSize is not needed because the min/max are already reduced
self.target_dtype,
self.quant_min,
self.quant_max,
self.eps,
self.scale_dtype,
self.zero_point_dtype,
self.preserve_zero,
self.zero_point_domain,
)
|
AffineQuantizedMovingAverageMinMaxObserver
|
python
|
FactoryBoy__factory_boy
|
tests/test_alchemy.py
|
{
"start": 575,
"end": 774
}
|
class ____(SQLAlchemyModelFactory):
class Meta:
model = models.NonIntegerPk
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: 'foo%d' % n)
|
NonIntegerPkFactory
|
python
|
scrapy__scrapy
|
tests/test_exporters.py
|
{
"start": 12591,
"end": 16222
}
|
class ____(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def assertXmlEquivalent(self, first, second, msg=None):
def xmltuple(elem):
children = list(elem.iterchildren())
if children:
return [(child.tag, sorted(xmltuple(child))) for child in children]
return [(elem.tag, [(elem.text, ())])]
def xmlsplit(xmlcontent):
doc = lxml.etree.fromstring(xmlcontent)
return xmltuple(doc)
assert xmlsplit(first) == xmlsplit(second), msg
def assertExportResult(self, item, expected_value):
fp = BytesIO()
ie = XmlItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
self.assertXmlEquivalent(fp.getvalue(), expected_value)
def _check_output(self):
expected_value = (
b'<?xml version="1.0" encoding="utf-8"?>\n'
b"<items><item><age>22</age><name>John\xc2\xa3</name></item></items>"
)
self.assertXmlEquivalent(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
self.assertExportResult(
self.item_class(name=["John\xa3", "Doe"], age=[1, 2, 3]),
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<name><value>John\xc2\xa3</value><value>Doe</value></name>
<age><value>1</value><value>2</value><value>3</value></age>
</item>
</items>
""",
)
def test_nested_item(self):
i1 = {"name": "foo\xa3hoo", "age": "22"}
i2 = {"name": "bar", "age": i1}
i3 = self.item_class(name="buz", age=i2)
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<age>
<age>22</age>
<name>foo\xc2\xa3hoo</name>
</age>
<name>bar</name>
</age>
<name>buz</name>
</item>
</items>
""",
)
def test_nested_list_item(self):
i1 = {"name": "foo"}
i2 = {"name": "bar", "v2": {"egg": ["spam"]}}
i3 = self.item_class(name="buz", age=[i1, i2])
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<value><name>foo</name></value>
<value><name>bar</name><v2><egg><value>spam</value></egg></v2></value>
</age>
<name>buz</name>
</item>
</items>
""",
)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.assertExportResult(
item,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<float>3.14</float>
<boolean>False</boolean>
<number>22</number>
<time>2015-01-01 01:01:01</time>
</item>
</items>
""",
)
|
TestXmlItemExporter
|
python
|
numba__numba
|
numba/tests/test_fastmath.py
|
{
"start": 162,
"end": 4564
}
|
class ____(unittest.TestCase):
def test_jit(self):
def foo(x):
return x + math.sin(x)
fastfoo = njit(fastmath=True)(foo)
slowfoo = njit(foo)
self.assertEqual(fastfoo(0.5), slowfoo(0.5))
fastllvm = fastfoo.inspect_llvm(fastfoo.signatures[0])
slowllvm = slowfoo.inspect_llvm(slowfoo.signatures[0])
# Ensure fast attribute in fast version only
self.assertIn('fadd fast', fastllvm)
self.assertIn('call fast', fastllvm)
self.assertNotIn('fadd fast', slowllvm)
self.assertNotIn('call fast', slowllvm)
def test_jit_subset_behaviour(self):
def foo(x, y):
return (x - y) + y
fastfoo = njit(fastmath={'reassoc', 'nsz'})(foo)
slowfoo = njit(fastmath={'reassoc'})(foo)
self.assertEqual(fastfoo(0.5, np.inf), 0.5)
self.assertTrue(np.isnan(slowfoo(0.5, np.inf)))
def test_jit_subset_code(self):
def foo(x):
return x + math.sin(x)
fastfoo = njit(fastmath={'reassoc', 'nsz'})(foo)
slowfoo = njit()(foo)
self.assertEqual(fastfoo(0.5), slowfoo(0.5))
fastllvm = fastfoo.inspect_llvm(fastfoo.signatures[0])
slowllvm = slowfoo.inspect_llvm(slowfoo.signatures[0])
# Ensure fast attributes in fast version only
self.assertNotIn('fadd fast', slowllvm)
self.assertNotIn('call fast', slowllvm)
self.assertNotIn('fadd reassoc nsz', slowllvm)
self.assertNotIn('call reassoc nsz', slowllvm)
self.assertNotIn('fadd nsz reassoc', slowllvm)
self.assertNotIn('call nsz reassoc', slowllvm)
self.assertTrue(
('fadd nsz reassoc' in fastllvm) or
('fadd reassoc nsz' in fastllvm),
fastllvm
)
self.assertTrue(
('call nsz reassoc' in fastllvm) or
('call reassoc nsz' in fastllvm),
fastllvm
)
def test_jit_subset_errors(self):
with self.assertRaises(ValueError) as raises:
njit(fastmath={'spqr'})(lambda x: x + 1)(1)
self.assertIn(
"Unrecognized fastmath flags:",
str(raises.exception),
)
with self.assertRaises(ValueError) as raises:
njit(fastmath={'spqr': False})(lambda x: x + 1)(1)
self.assertIn(
'Unrecognized fastmath flags:',
str(raises.exception),
)
with self.assertRaises(ValueError) as raises:
njit(fastmath=1337)(lambda x: x + 1)(1)
self.assertIn(
'Expected fastmath option(s) to be',
str(raises.exception),
)
def test_vectorize(self):
def foo(x):
return x + math.sin(x)
fastfoo = vectorize(fastmath=True)(foo)
slowfoo = vectorize(foo)
x = np.random.random(8).astype(np.float32)
# capture the optimized llvm to check for fast flag
with override_config('DUMP_OPTIMIZED', True):
with captured_stdout() as slow_cap:
expect = slowfoo(x)
slowllvm = slow_cap.getvalue()
with captured_stdout() as fast_cap:
got = fastfoo(x)
fastllvm = fast_cap.getvalue()
np.testing.assert_almost_equal(expect, got)
self.assertIn('fadd fast', fastllvm)
self.assertIn('call fast', fastllvm)
self.assertNotIn('fadd fast', slowllvm)
self.assertNotIn('call fast', slowllvm)
def test_guvectorize(self):
def foo(x, out):
out[0] = x + math.sin(x)
x = np.random.random(8).astype(np.float32)
with override_config('DUMP_OPTIMIZED', True):
types = ['(float32, float32[:])']
sig = '()->()'
with captured_stdout() as fast_cap:
fastfoo = guvectorize(types, sig, fastmath=True)(foo)
fastllvm = fast_cap.getvalue()
with captured_stdout() as slow_cap:
slowfoo = guvectorize(types, sig)(foo)
slowllvm = slow_cap.getvalue()
expect = slowfoo(x)
got = fastfoo(x)
np.testing.assert_almost_equal(expect, got)
self.assertIn('fadd fast', fastllvm)
self.assertIn('call fast', fastllvm)
self.assertNotIn('fadd fast', slowllvm)
self.assertNotIn('call fast', slowllvm)
if __name__ == '__main__':
unittest.main()
|
TestFastMath
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-steps-to-make-two-strings-anagram-ii.py
|
{
"start": 63,
"end": 346
}
|
class ____(object):
def minSteps(self, s, t):
"""
:type s: str
:type t: str
:rtype: int
"""
cnt1, cnt2 = collections.Counter(s), collections.Counter(t)
return sum((cnt1-cnt2).itervalues())+sum((cnt2-cnt1).itervalues())
|
Solution
|
python
|
google__jax
|
jax/_src/pallas/core.py
|
{
"start": 3425,
"end": 3561
}
|
class ____(semaphore_dtype):
"""Regular semaphore dtype.
Like its superclass, this class should never be instantiated.
"""
|
semaphore
|
python
|
google__jax
|
examples/ffi/tests/cpu_examples_test.py
|
{
"start": 2994,
"end": 3460
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu"]):
self.skipTest("Unsupported platform")
def test_basic_array(self):
x = jnp.linspace(0, 0.5, 10)
self.assertAllClose(cpu_examples.aliasing(x), x)
def test_basic_scalar(self):
x = jnp.int32(6)
self.assertAllClose(cpu_examples.aliasing(x), x)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
AliasingTests
|
python
|
celery__celery
|
examples/eventlet/bulk_task_producer.py
|
{
"start": 529,
"end": 1673
}
|
class ____:
"""Usage::
>>> app = Celery(broker='amqp://')
>>> ProducerPool(app)
"""
Receipt = Receipt
def __init__(self, app, size=20):
self.app = app
self.size = size
self.inqueue = LightQueue()
self._running = None
self._producers = None
def apply_async(self, task, args, kwargs, callback=None, **options):
if self._running is None:
self._running = spawn_n(self._run)
receipt = self.Receipt(callback)
self.inqueue.put((task, args, kwargs, options, receipt))
return receipt
def _run(self):
self._producers = [
spawn_n(self._producer) for _ in range(self.size)
]
def _producer(self):
inqueue = self.inqueue
with self.app.producer_or_acquire() as producer:
while 1:
task, args, kwargs, options, receipt = inqueue.get()
result = task.apply_async(args, kwargs,
producer=producer,
**options)
receipt.finished(result)
|
ProducerPool
|
python
|
fluentpython__example-code-2e
|
22-dyn-attr-prop/bulkfood/bulkfood_v2b.py
|
{
"start": 911,
"end": 1441
}
|
class ____:
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
def get_weight(self): # <1>
return self.__weight
def set_weight(self, value): # <2>
if value > 0:
self.__weight = value
else:
raise ValueError('value must be > 0')
weight = property(get_weight, set_weight) # <3>
# end::LINEITEM_V2B[]
|
LineItem
|
python
|
PyCQA__pyflakes
|
pyflakes/checker.py
|
{
"start": 6782,
"end": 7048
}
|
class ____(Binding):
"""
A binding that defines a function or a class.
"""
def redefines(self, other):
return (
super().redefines(other) or
(isinstance(other, Assignment) and self.name == other.name)
)
|
Definition
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/sensors/test_cloud_formation.py
|
{
"start": 1214,
"end": 3271
}
|
class ____:
@mock_aws
def setup_method(self, method):
self.client = boto3.client("cloudformation", region_name="us-east-1")
def test_init(self):
sensor = CloudFormationCreateStackSensor(
task_id="cf_create_stack_init",
stack_name="fake-stack",
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="eu-central-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert sensor.hook.client_type == "cloudformation"
assert sensor.hook.resource_type is None
assert sensor.hook.aws_conn_id == "fake-conn-id"
assert sensor.hook._region_name == "eu-central-1"
assert sensor.hook._verify is False
assert sensor.hook._config is not None
assert sensor.hook._config.read_timeout == 42
sensor = CloudFormationCreateStackSensor(task_id="cf_create_stack_init", stack_name="fake-stack")
assert sensor.hook.aws_conn_id == "aws_default"
assert sensor.hook._region_name is None
assert sensor.hook._verify is None
assert sensor.hook._config is None
@mock_aws
def test_poke(self):
self.client.create_stack(StackName="foobar", TemplateBody='{"Resources": {}}')
op = CloudFormationCreateStackSensor(task_id="task", stack_name="foobar")
assert op.poke({})
def test_poke_false(self, mocked_hook_client):
mocked_hook_client.describe_stacks.return_value = {"Stacks": [{"StackStatus": "CREATE_IN_PROGRESS"}]}
op = CloudFormationCreateStackSensor(task_id="task", stack_name="foo")
assert not op.poke({})
def test_poke_stack_in_unsuccessful_state(self, mocked_hook_client):
mocked_hook_client.describe_stacks.return_value = {"Stacks": [{"StackStatus": "bar"}]}
op = CloudFormationCreateStackSensor(task_id="task", stack_name="foo")
with pytest.raises(ValueError, match="Stack foo in bad state: bar"):
op.poke({})
|
TestCloudFormationCreateStackSensor
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/response_output_item_added_event.py
|
{
"start": 254,
"end": 713
}
|
class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""The item to add to the conversation."""
output_index: int
"""The index of the output item in the Response."""
response_id: str
"""The ID of the Response to which the item belongs."""
type: Literal["response.output_item.added"]
"""The event type, must be `response.output_item.added`."""
|
ResponseOutputItemAddedEvent
|
python
|
wandb__wandb
|
wandb/sdk/wandb_run.py
|
{
"start": 14680,
"end": 14844
}
|
class ____:
sync_items_total: int = field(default=0)
sync_items_pending: int = field(default=0)
sync_time: datetime | None = field(default=None)
|
RunStatus
|
python
|
allegroai__clearml
|
clearml/binding/frameworks/tensorflow_bind.py
|
{
"start": 10606,
"end": 41792
}
|
class ____(object):
"""
TF SummaryWriter implementation that converts the tensorboard's summary into
ClearML events and reports the events (metrics) for an ClearML task (logger).
"""
_current_task = None
__report_hparams = True
_add_lock = threading.RLock()
_series_name_lookup = {}
# store all the created tensorboard writers in the system
# this allows us to as weather a certain tile/series already exist on some EventWriter
# and if it does, then we add to the series name the last token from the logdir
# (so we can differentiate between the two)
# key, value: key=hash(title, graph), value=EventTrainsWriter._id
_title_series_writers_lookup = {}
_event_writers_id_to_logdir = {}
# Protect against step (iteration) reuse, for example,
# steps counter inside an epoch, but wrapping around when epoch ends
# i.e. step = 0..100 then epoch ends and again step = 0..100
# We store the first report per title/series combination, and if wraparound occurs
# we synthetically continue to increase the step/iteration based on the previous epoch counter
# example: _title_series_wraparound_counter[('title', 'series')] =
# {'first_step':None, 'last_step':None, 'adjust_counter':0,}
_title_series_wraparound_counter = {}
@property
def variants(self) -> defaultdict:
return self._variants
def prepare_report(self) -> Dict[str, Any]:
return self.variants.copy()
def tag_splitter(
self,
tag: str,
num_split_parts: int,
split_char: str = "/",
join_char: str = "_",
default_title: str = "variant",
logdir_header: str = "series",
auto_reduce_num_split: bool = False,
force_add_prefix: str = None,
) -> (str, str):
"""
Split a tf.summary tag line to variant and metric.
Variant is the first part of the split tag, metric is the second.
:param str tag:
:param int num_split_parts:
:param str split_char: a character to split the tag on
:param str join_char: a character to join the splits
:param str default_title: variant to use in case no variant can be inferred automatically
:param str logdir_header: if 'series_last' then series=header: series, if 'series then series=series :header,
if 'title_last' then title=header title, if 'title' then title=title header
:param bool auto_reduce_num_split: if True and the tag is split for less parts then requested,
then requested number of split parts is adjusted.
:param str force_add_prefix: always add the prefix to the series name
:return: (str, str) variant and metric
"""
splitted_tag = tag.split(split_char)
if auto_reduce_num_split and num_split_parts > len(splitted_tag) - 1:
num_split_parts = max(1, len(splitted_tag) - 1)
series = join_char.join(splitted_tag[-num_split_parts:])
title = join_char.join(splitted_tag[:-num_split_parts]) or default_title
if force_add_prefix:
series = str(force_add_prefix) + series
# check if we already decided that we need to change the title/series
graph_id = hash((title, series))
if graph_id in self._graph_name_lookup:
return self._graph_name_lookup[graph_id]
# check if someone other than us used this combination
with self._add_lock:
event_writer_id = self._title_series_writers_lookup.get(graph_id, None)
if not event_writer_id:
# put us there
self._title_series_writers_lookup[graph_id] = self._id
elif event_writer_id != self._id:
# if there is someone else, change our series name and store us
org_series = series
org_title = title
other_logdir = self._event_writers_id_to_logdir[event_writer_id]
split_logddir = self._logdir.split("/")
unique_logdir = set(split_logddir) - set(other_logdir.split("/"))
header = "/".join(s for s in split_logddir if s in unique_logdir)
if logdir_header == "series_last":
series = header + ": " + series
elif logdir_header == "series":
series = series + " :" + header
elif logdir_header == "title":
title = title + " " + header
else: # logdir_header == 'title_last':
title = header + " " + title
graph_id = hash((title, series))
# check if for some reason the new series is already occupied
new_event_writer_id = self._title_series_writers_lookup.get(graph_id)
if new_event_writer_id is not None and new_event_writer_id != self._id:
# well that's about it, nothing else we could do
if logdir_header == "series_last":
series = str(self._logdir) + ": " + org_series
elif logdir_header == "series":
series = org_series + " :" + str(self._logdir)
elif logdir_header == "title":
title = org_title + " " + str(self._logdir)
else: # logdir_header == 'title_last':
title = str(self._logdir) + " " + org_title
graph_id = hash((title, series))
self._title_series_writers_lookup[graph_id] = self._id
# store for next time
self._graph_name_lookup[graph_id] = (title, series)
return title, series
def __init__(
self,
logger: Any,
logdir: Union[str, None] = None,
report_freq: int = 100,
image_report_freq: Union[int, None] = None,
histogram_update_freq_multiplier: int = 10,
histogram_granularity: int = 50,
max_keep_images: Union[int, None] = None,
) -> None:
"""
Create a compatible ClearML backend to the TensorFlow SummaryToEventTransformer
Everything will be serialized directly to the ClearML backend, instead of to the standard TF FileWriter
:param logger: The task.logger to use for sending the metrics (def: task.get_logger())
:param report_freq: How often to update the statistics values
:param image_report_freq: How often to upload images (step % image_update_freq == 0)
:param histogram_update_freq_multiplier: How often to upload histogram
(step//update_freq) % histogram_update_freq_multiplier == 0
:param histogram_granularity: How many histograms (lines) to display in the 3d histogram plot
:param max_keep_images: Maximum number of images to save before starting to reuse files (per title/metric pair)
"""
# We are the events_writer, so that's what we'll pass
IsTensorboardInit.set_tensorboard_used()
self._logdir = logdir or ("unknown %d" % len(self._event_writers_id_to_logdir))
# conform directory structure to unix
if os.path.sep == "\\":
self._logdir = self._logdir.replace("\\", "/")
self._id = hash(self._logdir)
self._event_writers_id_to_logdir[self._id] = self._logdir
self.max_keep_images = max_keep_images
self.report_freq = report_freq
self.image_report_freq = image_report_freq if image_report_freq else report_freq
self.histogram_granularity = histogram_granularity
self.histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._logger = logger
self._visualization_mode = "RGB" # 'BGR'
self._variants = defaultdict(lambda: ())
self._scalar_report_cache = {}
self._hist_report_cache = {}
self._hist_x_granularity = 50
self._max_step = 0
self._graph_name_lookup = {}
self._generic_tensor_type_name_lookup = {}
self._grad_helper = WeightsGradientHistHelper(
logger=logger,
report_freq=report_freq,
histogram_update_freq_multiplier=histogram_update_freq_multiplier,
histogram_granularity=histogram_granularity,
)
def _decode_image(
self,
img_str: Union[str, bytes],
width: Optional[int] = None,
height: Optional[int] = None,
color_channels: Optional[int] = None,
) -> Union[str, np.ndarray, None]:
# noinspection PyBroadException
try:
if isinstance(img_str, bytes):
imdata = img_str
else:
imdata = base64.b64decode(img_str)
output = BytesIO(imdata)
im = Image.open(output)
image = np.asarray(im)
# if this is a GIF store as is
if getattr(im, "is_animated", None):
output.close()
fd, temp_file = mkstemp(
suffix=guess_extension(im.get_format_mimetype())
if hasattr(im, "get_format_mimetype")
else ".{}".format(str(im.format).lower())
)
with open(fd, "wb") as f:
f.write(imdata)
return temp_file
output.close()
if height is not None and height > 0 and width is not None and width > 0:
val = image.reshape((height, width, -1)).astype(np.uint8)
else:
val = image.astype(np.uint8)
if val.ndim == 3 and val.shape[2] == 3:
if self._visualization_mode == "BGR":
val = val[:, :, [2, 1, 0]]
else:
val = val
elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1):
val = np.tile(np.atleast_3d(val), (1, 1, 3))
elif val.ndim == 3 and val.shape[2] == 4:
if self._visualization_mode == "BGR":
val = val[:, :, [2, 1, 0]]
else:
val = val[:, :, [0, 1, 2]]
except KeyboardInterrupt:
raise
except Exception as e:
logger = LoggerRoot.get_base_logger(TensorflowBinding)
logger.warning("Failed decoding debug image [%s, %s, %s]" % (width, height, color_channels))
logger.warning("Error: %s" % e)
val = None
return val
def _add_image_numpy(
self,
tag: str,
step: int,
img_data_np: Union[None, np.ndarray, str],
max_keep_images: int = None,
) -> None:
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if img_data_np is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag,
num_split_parts=3,
default_title="Images",
logdir_header="title",
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix(),
)
step = self._fix_step_counter(title, series, step)
# check if this is a local temp file
if isinstance(img_data_np, str):
self._logger.report_image(
title=title,
series=series,
iteration=step,
local_path=img_data_np,
delete_after_upload=True,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
return
if img_data_np.dtype != np.uint8:
# assume scale 0-1
img_data_np = (img_data_np * 255).astype(np.uint8)
# if 3d, pack into one big image
if img_data_np.ndim == 4:
dims = img_data_np.shape
stack_dim = int(np.sqrt(dims[0]))
# noinspection PyArgumentList
res = img_data_np.reshape(stack_dim, stack_dim, *dims[1:]).transpose((0, 2, 1, 3, 4))
tile_size_h = res.shape[0] * res.shape[1]
tile_size_w = res.shape[2] * res.shape[3]
img_data_np = res.reshape(tile_size_h, tile_size_w, -1)
self._logger.report_image(
title=title,
series=series,
iteration=step,
image=img_data_np,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
def _add_image(self, tag: str, step: int, img_data: dict) -> None:
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
width = img_data.get("width")
height = img_data.get("height")
colorspace = img_data.get("colorspace")
img_str = img_data["encodedImageString"]
matrix = self._decode_image(img_str, width=width, height=height, color_channels=colorspace)
if matrix is None:
return
return self._add_image_numpy(tag=tag, step=step, img_data_np=matrix)
def _add_scalar(self, tag: str, step: int, scalar_data: Union[float, str]) -> None:
default_title = tag if not self._logger._get_tensorboard_auto_group_scalars() else "Scalars"
series_per_graph = self._logger._get_tensorboard_single_series_per_graph()
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag,
num_split_parts=1,
default_title=default_title,
logdir_header="title" if series_per_graph else "series_last",
force_add_prefix=self._logger._get_tensorboard_series_prefix(),
)
step = self._fix_step_counter(title, series, step)
tag = self._get_add_scalars_event_tag(default_title)
possible_title = tag if series_per_graph else None
possible_tag = None if series_per_graph else tag
title = title + possible_title if possible_title else title
series = possible_tag or series
# update scalar cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
# nan outputs is a string, it's probably a NaN
if isinstance(scalar_data, six.string_types):
# noinspection PyBroadException
try:
scalar_data = float(scalar_data)
except Exception:
scalar_data = float("nan")
# nan outputs nan
self._scalar_report_cache[(title, series)] = (
num + 1,
(value + scalar_data) if scalar_data == scalar_data else scalar_data,
)
# only report images every specific interval
if step % self.report_freq != 0:
return None
# calculate mean and zero cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
scalar_data = value / num
self._scalar_report_cache[(title, series)] = (0, 0)
self._logger.report_scalar(
title=title,
series=series,
iteration=step,
value=scalar_data,
)
def _add_histogram(self, tag: str, step: int, hist_data: Union[dict, np.ndarray]) -> None:
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag,
num_split_parts=1,
default_title="Histograms",
logdir_header="series",
force_add_prefix=self._logger._get_tensorboard_series_prefix(),
)
self._grad_helper.add_histogram(title=title, series=series, step=step, hist_data=hist_data)
def _add_plot(self, tag: str, step: int, values: dict, vdict: dict) -> None:
# noinspection PyBroadException
try:
if values.get("floatVal"):
plot_values = np.array(values.get("floatVal"), dtype=np.float32)
else:
plot_values = np.frombuffer(
base64.b64decode(values["tensorContent"].encode("utf-8")),
dtype=np.float32,
)
plot_values = plot_values.reshape(
(
int(values["tensorShape"]["dim"][0]["size"]),
int(values["tensorShape"]["dim"][1]["size"]),
)
)
if "metadata" in vdict:
if tag not in self._series_name_lookup:
self._series_name_lookup[tag] = [
(
tag,
vdict["metadata"].get("displayName", ""),
vdict["metadata"]["pluginData"]["pluginName"],
)
]
else:
# this should not happen, maybe it's another run, let increase the value
self._series_name_lookup[tag] += [
(
tag + "_%d" % (len(self._series_name_lookup[tag]) + 1),
vdict["metadata"].get("displayName", ""),
vdict["metadata"]["pluginData"]["pluginName"],
)
]
tag, series, plugin_name = self._series_name_lookup.get(tag, [(tag, tag, "")])[-1]
if "pr_curve" in plugin_name:
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
num_thresholds = plot_values.shape[1]
width = 1.0 / num_thresholds
thresholds = np.arange(0.0, 1.0, width, dtype=plot_values.dtype)
data_points = [
"Threshold ",
"TP ",
"FP ",
"TN ",
"FN ",
"Precision ",
" Recall",
]
series = [
{
"name": series,
"data": np.vstack((plot_values[-1], plot_values[-2])).T,
"labels": [
"".join(data_points)
+ "<br> {:.3f} ".format(thresholds[j])
+ " ".join(["%-3.2f" % v for v in plot_values[:, j]])
for j in range(num_thresholds)
],
}
]
reverse_xaxis = False
else:
reverse_xaxis = False
series = [{"name": series, "data": plot_values}]
self._logger.report_line_plot(
title=tag,
series=series,
xaxis="",
yaxis="",
iteration=step,
reverse_xaxis=reverse_xaxis,
)
except Exception:
pass
def _add_audio(
self,
tag: str,
step: int,
values: Union[None, dict],
audio_data: Union[None, bytes] = None,
) -> None:
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if values:
audio_str = values["encodedAudioString"]
audio_data = base64.b64decode(audio_str)
if audio_data is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag,
num_split_parts=3,
default_title="Audio",
logdir_header="title",
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix(),
)
step = self._fix_step_counter(title, series, step)
stream = BytesIO(audio_data)
if values:
file_extension = guess_extension(values["contentType"]) or ".{}".format(
values["contentType"].split("/")[-1]
)
else:
# assume wav as default
file_extension = ".wav"
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=stream,
file_extension=file_extension,
max_history=self.max_keep_images,
)
def _add_hparams(self, hparams_metadata: Any) -> None:
if not EventTrainsWriter.__report_hparams:
return
# noinspection PyBroadException
try:
from tensorboard.plugins.hparams.metadata import (
parse_session_start_info_plugin_data,
)
content = hparams_metadata["metadata"]["pluginData"]["content"]
content = base64.b64decode(content)
session_start_info = parse_session_start_info_plugin_data(content)
session_start_info = MessageToDict(session_start_info)
hparams = session_start_info["hparams"]
EventTrainsWriter._current_task.update_parameters(
{"TB_hparams/{}".format(k): v for k, v in hparams.items()}
)
except Exception:
pass
def _add_text(self, tag: str, step: int, tensor_bytes: bytes) -> None:
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag,
num_split_parts=3,
default_title="Text",
logdir_header="title",
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix(),
)
step = self._fix_step_counter(title, series, step)
text = tensor_bytes.decode("utf-8", errors="replace")
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=six.StringIO(text),
file_extension=".txt",
max_history=self.max_keep_images,
)
@staticmethod
def _fix_step_counter(title: str, series: str, step: int) -> int:
key = (title, series)
if key not in EventTrainsWriter._title_series_wraparound_counter:
EventTrainsWriter._title_series_wraparound_counter[key] = {
"first_step": step,
"last_step": step,
"adjust_counter": 0,
}
return step
wraparound_counter = EventTrainsWriter._title_series_wraparound_counter[key]
# we decide on wrap around if the current step is less than 10% of the previous step
# notice since counter is int and we want to avoid rounding error, we have double check in the if
if step < wraparound_counter["last_step"] and step < 0.9 * wraparound_counter["last_step"]:
# adjust step base line
wraparound_counter["adjust_counter"] += wraparound_counter["last_step"] + (1 if step <= 0 else step)
# return adjusted step
wraparound_counter["last_step"] = step
return step + wraparound_counter["adjust_counter"]
def add_event(self, event: Any, step: Union[int, None] = None, walltime: Union[int, None] = None, **_: Any) -> None:
supported_metrics = {"simpleValue", "image", "histo", "tensor", "audio"}
def get_data(value_dict: Dict[str, Any], metric_search_order: Iterable[str]) -> Tuple[str, Any]:
data = None
metric_type = "Unsupported"
for variant in metric_search_order:
data = value_dict.get(variant)
if data is not None:
metric_type = variant
break
return metric_type, data
# Support multiple threads accessing this instance (i.e. let TF/Keras do what they need)
with self._add_lock:
# TODO: add report frequency threshold (i.e. if we are sending too much data, increase the report_freq)
# we should measure reports per second and throttle back the reporting details accordingly
msg_dict = MessageToDict(event)
summary = msg_dict.get("summary")
if summary is None:
msg_dict.pop("step", None)
msg_dict.pop("wallTime", None)
keys_list = [key for key in msg_dict.keys() if len(key) > 0]
keys_list = ", ".join(keys_list)
LoggerRoot.get_base_logger(TensorflowBinding).debug(
"event summary not found, message type unsupported: %s" % keys_list
)
return
value_dicts = summary.get("value")
# noinspection PyUnusedLocal
walltime = walltime or msg_dict.get("step")
step = step or msg_dict.get("step")
if step is None:
# when we start a new epoch there is no step in the msg_dict,
# we have to extract it manually
if hasattr(event, "step"):
step = int(event.step)
else:
step = 0
LoggerRoot.get_base_logger(TensorflowBinding).debug(
"Received event without step, assuming step = {}".format(step)
)
else:
step = int(step)
step = tweak_step(step)
self._max_step = max(self._max_step, step)
if value_dicts is None:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Summary arrived without 'value'")
return
for vdict in value_dicts:
tag = vdict.pop("tag", None)
if tag is None:
# we should not get here
LoggerRoot.get_base_logger(TensorflowBinding).debug(
"No tag for 'value' existing keys %s" % ", ".join(vdict.keys())
)
continue
# noinspection PyBroadException
try:
from tensorboard.plugins.hparams.metadata import (
SESSION_START_INFO_TAG,
)
if tag == SESSION_START_INFO_TAG:
self._add_hparams(vdict)
continue
except Exception:
pass
metric, values = get_data(vdict, supported_metrics)
if metric == "simpleValue":
self._add_scalar(tag=tag, step=step, scalar_data=values)
elif metric == "histo":
self._add_histogram(tag=tag, step=step, hist_data=values)
elif metric == "image":
self._add_image(tag=tag, step=step, img_data=values)
elif metric == "audio":
self._add_audio(tag, step, values)
elif metric == "tensor" and values.get("dtype") == "DT_STRING":
# generic tensor
tensor_bytes = base64.b64decode("\n".join(values["stringVal"]))
plugin_type = (
self._generic_tensor_type_name_lookup.get(tag)
or vdict.get("metadata", {}).get("pluginData", {}).get("pluginName", "").lower()
)
if plugin_type == "audio":
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_audio(tag, step, None, tensor_bytes)
elif plugin_type == "text":
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_text(tag, step, tensor_bytes)
else:
# we do not support it
pass
elif metric == "tensor" and values.get("dtype") == "DT_FLOAT":
self._add_plot(tag, step, values, vdict)
else:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
"Event unsupported. tag = %s, vdict keys [%s]" % (tag, ", ".join(vdict.keys()))
)
continue
def get_logdir(self) -> str:
"""Returns a temporary directory name for compatibility with FileWriter. This directory is not actually used.
:return: '.'
"""
return "."
def flush(self) -> None:
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._logger.flush()
def close(self) -> None:
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self._logger.flush()
def reopen(self) -> None:
"""Reopens the EventFileWriter.
Can be called after `close` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
pass
def _get_add_scalars_event_tag(self, title_prefix: str) -> str:
"""
:param str title_prefix: the table title prefix that was added to the series.
:return: str same as tensorboard use
"""
# HACK - this is tensorboard Summary util function, original path:
# ~/torch/utils/tensorboard/summary.py
def _clean_tag(name: str) -> str:
import re as _re
# noinspection RegExpRedundantEscape
_INVALID_TAG_CHARACTERS = _re.compile(r"[^-/\w\.]")
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub("_", name)
new_name = new_name.lstrip("/") # Remove leading slashes
if new_name != name:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
"Summary name %s is illegal; using %s instead." % (name, new_name)
)
name = new_name
return name
main_path = self._logdir
# noinspection PyBroadException
try:
main_path = _clean_tag(main_path)
origin_tag = main_path.rpartition("/")[2].replace(title_prefix, "", 1)
if title_prefix and origin_tag[0] == "_": # add_scalars tag
origin_tag = origin_tag[1:] # Remove the first "_" that was added by the main_tag in tensorboard
else:
return ""
except Exception:
origin_tag = ""
return origin_tag
@classmethod
def update_current_task(cls, task: Any, **kwargs: Any) -> None:
cls.__report_hparams = kwargs.get("report_hparams", False)
if cls._current_task != task:
with cls._add_lock:
cls._series_name_lookup = {}
cls._title_series_writers_lookup = {}
cls._event_writers_id_to_logdir = {}
cls._title_series_wraparound_counter = {}
cls._current_task = task
@property
def current_task(self) -> None:
return self._current_task
# noinspection PyCallingNonCallable
|
EventTrainsWriter
|
python
|
huggingface__transformers
|
src/transformers/models/univnet/feature_extraction_univnet.py
|
{
"start": 1036,
"end": 22813
}
|
class ____(SequenceFeatureExtractor):
r"""
Constructs a UnivNet feature extractor.
This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The
STFT implementation follows that of TacoTron 2 and Hifi-GAN.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value to pad with when applying the padding strategy defined by the `padding` argument to
[`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to
`__call__` will also use this padding value.
do_normalize (`bool`, *optional*, defaults to `False`):
Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the
performance for some models.
num_mel_bins (`int`, *optional*, defaults to 100):
The number of mel-frequency bins in the extracted spectrogram features. This should match
`UnivNetModel.config.num_mel_bins`.
hop_length (`int`, *optional*, defaults to 256):
The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note
that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take
the `hop_length` in ms.
win_length (`int`, *optional*, defaults to 1024):
The direct number of samples for each sliding window. Note that this is different from other audio feature
extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms.
win_function (`str`, *optional*, defaults to `"hann_window"`):
Name for the window function used for windowing, must be accessible via `torch.{win_function}`
filter_length (`int`, *optional*, defaults to 1024):
The number of FFT components to use. If `None`, this is determined using
`transformers.audio_utils.optimal_fft_length`.
max_length_s (`int`, *optional*, defaults to 10):
The maximum input length of the model in seconds. This is used to pad the audio.
fmin (`float`, *optional*, defaults to 0.0):
Minimum mel frequency in Hz.
fmax (`float`, *optional*):
Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`.
mel_floor (`float`, *optional*, defaults to 1e-09):
Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is
different than in [`transformers.audio_utils.spectrogram`].
center (`bool`, *optional*, defaults to `False`):
Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame
`t` will start at time `t * hop_length`.
compression_factor (`float`, *optional*, defaults to 1.0):
The multiplicative compression factor for dynamic range compression during spectral normalization.
compression_clip_val (`float`, *optional*, defaults to 1e-05):
The clip value applied to the waveform before applying dynamic range compression during spectral
normalization.
normalize_min (`float`, *optional*, defaults to -11.512925148010254):
The min value used for Tacotron 2-style linear normalization. The default is the original value from the
Tacotron 2 implementation.
normalize_max (`float`, *optional*, defaults to 2.3143386840820312):
The max value used for Tacotron 2-style linear normalization. The default is the original value from the
Tacotron 2 implementation.
model_in_channels (`int`, *optional*, defaults to 64):
The number of input channels to the [`UnivNetModel`] model. This should match
`UnivNetModel.config.model_in_channels`.
pad_end_length (`int`, *optional*, defaults to 10):
If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The
number of appended samples will be `pad_end_length * hop_length`.
return_attention_mask (`bool`, *optional*, defaults to `True`):
Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`.
"""
model_input_names = ["input_features", "noise_sequence", "padding_mask"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 24000,
padding_value: float = 0.0,
do_normalize: bool = False,
num_mel_bins: int = 100,
hop_length: int = 256,
win_length: int = 1024,
win_function: str = "hann_window",
filter_length: Optional[int] = 1024,
max_length_s: int = 10,
fmin: float = 0.0,
fmax: Optional[float] = None,
mel_floor: float = 1e-9,
center: bool = False,
compression_factor: float = 1.0,
compression_clip_val: float = 1e-5,
normalize_min: float = -11.512925148010254,
normalize_max: float = 2.3143386840820312,
model_in_channels: int = 64,
pad_end_length: int = 10,
return_attention_mask=True,
**kwargs,
):
super().__init__(
feature_size=feature_size,
sampling_rate=sampling_rate,
padding_value=padding_value,
return_attention_mask=return_attention_mask,
**kwargs,
)
self.do_normalize = do_normalize
self.num_mel_bins = num_mel_bins
self.hop_length = hop_length
self.win_length = win_length
self.win_function = win_function
self.filter_length = filter_length
self.fmin = fmin
if fmax is None:
# Follows the librosa.filters.mel implementation
fmax = float(sampling_rate) / 2
self.fmax = fmax
self.mel_floor = mel_floor
self.max_length_s = max_length_s
self.num_max_samples = max_length_s * sampling_rate
if self.filter_length is None:
self.n_fft = optimal_fft_length(self.win_length)
else:
self.n_fft = self.filter_length
self.n_freqs = (self.n_fft // 2) + 1
self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True)
self.mel_filters = mel_filter_bank(
num_frequency_bins=self.n_freqs,
num_mel_filters=self.num_mel_bins,
min_frequency=self.fmin,
max_frequency=self.fmax,
sampling_rate=self.sampling_rate,
norm="slaney",
mel_scale="slaney",
)
self.center = center
self.compression_factor = compression_factor
self.compression_clip_val = compression_clip_val
self.normalize_min = normalize_min
self.normalize_max = normalize_max
self.model_in_channels = model_in_channels
self.pad_end_length = pad_end_length
def normalize(self, spectrogram):
return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1
def denormalize(self, spectrogram):
return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2)
def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray:
"""
Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by
`int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode.
Args:
waveform (`np.ndarray` of shape `(length,)`):
The input waveform. This must be a single real-valued, mono waveform.
Returns:
`numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`.
"""
# Do custom padding based on the official MelGAN and Hifi-GAN implementations
# See https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/utils/stft.py#L84-L86
waveform = np.pad(
waveform,
(int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)),
mode="reflect",
)
# Get the complex spectrogram.
# Note: waveform must be unbatched currently due to the implementation of spectrogram(...).
complex_spectrogram = spectrogram(
waveform,
window=self.window,
frame_length=self.n_fft,
hop_length=self.hop_length,
fft_length=self.n_fft,
power=None,
center=self.center,
mel_filters=None,
mel_floor=None,
)
# Apply the MEL filter bank and MEL floor manually since UnivNet uses a slightly different implementation
amplitude_spectrogram = np.sqrt(
np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor
)
mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram)
# Perform spectral normalization to get the log mel spectrogram.
log_mel_spectrogram = np.log(
np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor
)
# Return spectrogram with num_mel_bins last
return log_mel_spectrogram.T
def generate_noise(
self,
noise_length: int,
generator: Optional[np.random.Generator] = None,
) -> np.ndarray:
"""
Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of
[`UnivNetModel.forward`].
Args:
spectrogram_length (`int`):
The length (dim 0) of the generated noise.
model_in_channels (`int`, *optional*, defaults to `None`):
The number of features (dim 1) of the generated noise. This should correspond to the
`model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to
`self.config.model_in_channels`.
generator (`numpy.random.Generator`, *optional*, defaults to `None`)
An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a
new generator with fresh entropy will be created.
Returns:
`numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length,
model_in_channels)`.
"""
if generator is None:
generator = np.random.default_rng()
noise_shape = (noise_length, self.model_in_channels)
noise = generator.standard_normal(noise_shape, dtype=np.float32)
return noise
def batch_decode(self, waveforms, waveform_lengths=None) -> list[np.ndarray]:
r"""
Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D
audio waveform arrays and not a single tensor/array because in general the waveforms will have different
lengths after removing padding.
Args:
waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The batched output waveforms from the [`UnivNetModel`].
waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):
The batched lengths of each waveform before padding.
Returns:
`list[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed.
"""
# Collapse the batched waveform tensor to a list of 1D audio waveforms
waveforms = [waveform.detach().to(device="cpu", copy=True).numpy() for waveform in waveforms]
if waveform_lengths is not None:
waveforms = [waveform[: waveform_lengths[i]] for i, waveform in enumerate(waveforms)]
return waveforms
def __call__(
self,
raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
sampling_rate: Optional[int] = None,
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
truncation: bool = True,
pad_to_multiple_of: Optional[int] = None,
return_noise: bool = True,
generator: Optional[np.random.Generator] = None,
pad_end: bool = False,
pad_length: Optional[int] = None,
do_normalize: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and
padding index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
If `pad_end = True`, that padding will occur before the `padding` strategy is applied.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`, *optional*, defaults to `True`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_noise (`bool`, *optional*, defaults to `True`):
Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`].
generator (`numpy.random.Generator`, *optional*, defaults to `None`):
An optional `numpy.random.Generator` random number generator to use when generating noise.
pad_end (`bool`, *optional*, defaults to `False`):
Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the
generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This
padding will be done before the padding strategy specified in `padding` is performed.
pad_length (`int`, *optional*, defaults to `None`):
If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this
will default to `self.config.pad_end_length`.
do_normalize (`bool`, *optional*):
Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve
the performance for some models. If not set, this will default to `self.config.do_normalize`.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.np.array` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
is_batched = is_batched_numpy or (
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [np.asarray(raw_speech, dtype=np.float32)]
# Pad end to reduce artifacts
if pad_end:
pad_length = pad_length if pad_length is not None else self.pad_end_length
raw_speech = [
np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value)
for waveform in raw_speech
]
batched_speech = BatchFeature({"input_features": raw_speech})
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length if max_length is not None else self.num_max_samples,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
# make sure list is in array format
# input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
input_features = padded_inputs.get("input_features")
mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features]
if isinstance(input_features[0], list):
batched_speech["input_features"] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms]
else:
batched_speech["input_features"] = [mel.astype(np.float32) for mel in mel_spectrograms]
# convert attention_mask to correct format
attention_mask = padded_inputs.get("attention_mask")
if attention_mask is not None:
batched_speech["padding_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
if return_noise:
noise = [
self.generate_noise(spectrogram.shape[0], generator)
for spectrogram in batched_speech["input_features"]
]
batched_speech["noise_sequence"] = noise
if do_normalize:
batched_speech["input_features"] = [
self.normalize(spectrogram) for spectrogram in batched_speech["input_features"]
]
if return_tensors is not None:
batched_speech = batched_speech.convert_to_tensors(return_tensors)
return batched_speech
def to_dict(self) -> dict[str, Any]:
output = super().to_dict()
# Don't serialize these as they are derived from the other properties.
names = ["window", "mel_filters", "n_fft", "n_freqs", "num_max_samples"]
for name in names:
if name in output:
del output[name]
return output
__all__ = ["UnivNetFeatureExtractor"]
|
UnivNetFeatureExtractor
|
python
|
doocs__leetcode
|
solution/3500-3599/3590.Kth Smallest Path XOR Sum/Solution.py
|
{
"start": 1528,
"end": 3048
}
|
class ____:
def kthSmallest(
self, par: List[int], vals: List[int], queries: List[List[int]]
) -> List[int]:
n = len(par)
tree = [[] for _ in range(n)]
for i in range(1, n):
tree[par[i]].append(i)
path_xor = vals[:]
narvetholi = path_xor
def compute_xor(node, acc):
path_xor[node] ^= acc
for child in tree[node]:
compute_xor(child, path_xor[node])
compute_xor(0, 0)
node_queries = defaultdict(list)
for idx, (u, k) in enumerate(queries):
node_queries[u].append((k, idx))
trie_pool = {}
result = [0] * len(queries)
def dfs(node):
trie_pool[node] = BinarySumTrie()
trie_pool[node].add(path_xor[node], 1)
for child in tree[node]:
dfs(child)
if trie_pool[node].count < trie_pool[child].count:
trie_pool[node], trie_pool[child] = (
trie_pool[child],
trie_pool[node],
)
for val in trie_pool[child].collect():
if not trie_pool[node].exists(val):
trie_pool[node].add(val, 1)
for k, idx in node_queries[node]:
if trie_pool[node].count < k:
result[idx] = -1
else:
result[idx] = trie_pool[node].find_kth(k)
dfs(0)
return result
|
Solution
|
python
|
viewflow__viewflow
|
tests/_cases/test_workflow_undo_tasks.py
|
{
"start": 289,
"end": 2670
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.admin = User.objects.create_superuser(username="admin", password="admin")
def reverse(self, flow_task, name):
task = TestUndoFlow.task_class.objects.get(flow_task=flow_task)
return flow_task.reverse(name, args=[task.process_id, task.pk])
def test_undo_human_tasks(self):
# forward flow
self.assertTrue(self.client.login(username="admin", password="admin"))
self.assertRedirects(
self.client.post(
"/workflow/start/",
{
"text": "Hello, world",
"_viewflow_activation-started": "2000-01-01",
"_continue": 1,
},
),
self.reverse(TestUndoFlow.task, "index"),
fetch_redirect_response=False,
)
self.assertRedirects(
self.client.post(
self.reverse(TestUndoFlow.task, "assign"), {"_continue": 1}
),
self.reverse(TestUndoFlow.task, "index"),
fetch_redirect_response=False,
)
self.assertEqual(
self.client.post(
self.reverse(TestUndoFlow.task, "execute"),
{"_viewflow_activation-started": "2000-01-01"},
).status_code,
302,
)
# undo process finale
process = TestUndoFlow.process_class.objects.get()
end_task = process.task_set.get(flow_task=TestUndoFlow.end)
with end_task.activation() as activation:
activation.undo()
# undo task
task = process.task_set.get(flow_task=TestUndoFlow.task)
with task.activation() as activation:
activation.undo()
# revive task
with task.activation() as activation:
activation.revive()
# cancel task
task = process.task_set.get(flow_task=TestUndoFlow.task, status=STATUS.NEW)
with task.activation() as activation:
activation.cancel()
# undo process start _
start_task = process.task_set.get(flow_task=TestUndoFlow.start)
with start_task.activation() as activation:
activation.undo()
process.refresh_from_db()
self.assertTrue(process.finished)
self.assertEqual(PROCESS.CANCELED, process.status)
|
Test
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/index_add__test.py
|
{
"start": 554,
"end": 1589
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dim, dtype, device):
# creating the original tensor
tensor = torch.rand(M, N, K, dtype=dtype, device=device)
# creating index
index_max_len = tensor.shape[dim]
index_len = numpy.random.randint(1, index_max_len + 1)
index = torch.tensor(
numpy.random.choice(index_max_len, index_len, replace=False), device=device
)
src_dims = [M, N, K]
src_dims[dim] = index_len
source = torch.rand(*src_dims, dtype=dtype, device=device)
self.inputs = {
"tensor": tensor,
"dim": dim,
"index": index,
"source": source,
}
self.set_module_name("index_add_")
def forward(self, tensor, dim, index, source):
return tensor.index_add_(dim, index, source)
op_bench.generate_pt_test(configs_short + configs_long, IndexAddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
IndexAddBenchmark
|
python
|
fastai__fastai
|
fastai/text/models/core.py
|
{
"start": 1414,
"end": 2380
}
|
class ____(Module):
"To go on top of a RNNCore module and create a Language Model."
initrange=0.1
def __init__(self,
n_out:int, # Number of output channels
n_hid:int, # Number of features in encoder last layer output
output_p:float=0.1, # Input dropout probability
tie_encoder:nn.Module=None, # If module is supplied will tie decoder weight to `tie_encoder.weight`
bias:bool=True # If `False` the layer will not learn additive bias
):
self.decoder = nn.Linear(n_hid, n_out, bias=bias)
self.decoder.weight.data.uniform_(-self.initrange, self.initrange)
self.output_dp = RNNDropout(output_p)
if bias: self.decoder.bias.data.zero_()
if tie_encoder: self.decoder.weight = tie_encoder.weight
def forward(self, input):
dp_inp = self.output_dp(input)
return self.decoder(dp_inp), input, dp_inp
# %% ../../../nbs/33_text.models.core.ipynb 10
|
LinearDecoder
|
python
|
fastapi__sqlmodel
|
sqlmodel/sql/sqltypes.py
|
{
"start": 110,
"end": 558
}
|
class ____(types.TypeDecorator): # type: ignore
impl = types.String
cache_ok = True
mysql_default_length = 255
def load_dialect_impl(self, dialect: Dialect) -> "types.TypeEngine[Any]":
impl = cast(types.String, self.impl)
if impl.length is None and dialect.name == "mysql":
return dialect.type_descriptor(types.String(self.mysql_default_length))
return super().load_dialect_impl(dialect)
|
AutoString
|
python
|
spyder-ide__spyder
|
spyder/widgets/dock.py
|
{
"start": 902,
"end": 3208
}
|
class ____(QObject, SpyderConfigurationAccessor):
"""Filter event attached to each DockWidget QTabBar."""
CONF_SECTION = 'main'
def __init__(self, dock_tabbar, main):
QObject.__init__(self)
self.dock_tabbar: QTabBar = dock_tabbar
self.main = main
self.from_index = None
self._set_tabbar_stylesheet()
self.dock_tabbar.setElideMode(Qt.ElideNone)
self.dock_tabbar.setUsesScrollButtons(True)
def eventFilter(self, obj, event):
"""Filter mouse press events.
Events that are captured and not propagated return True. Events that
are not captured and are propagated return False.
"""
event_type = event.type()
if event_type == QEvent.MouseButtonPress:
self.tab_pressed(event)
return False
return False
def tab_pressed(self, event):
"""Method called when a tab from a QTabBar has been pressed."""
self.from_index = self.dock_tabbar.tabAt(event.pos())
self.dock_tabbar.setCurrentIndex(self.from_index)
try:
if event.button() == Qt.RightButton:
if self.from_index == -1:
self.show_nontab_menu(event)
else:
self.show_tab_menu(event)
except AttributeError:
# Needed to avoid an error when generating the
# context menu on top of the tab.
# See spyder-ide/spyder#11226
pass
def show_tab_menu(self, event):
"""Show the context menu assigned to tabs."""
self.show_nontab_menu(event)
def show_nontab_menu(self, event):
"""Show the context menu assigned to nontabs section."""
menu = self.main.createPopupMenu()
menu.exec_(self.dock_tabbar.mapToGlobal(event.pos()))
def _set_tabbar_stylesheet(self):
if self.get_conf('vertical_tabs'):
self.dock_tabbar.setStyleSheet(
str(VERTICAL_DOCK_TABBAR_STYLESHEET))
else:
self.dock_tabbar.setStyleSheet(
str(HORIZONTAL_DOCK_TABBAR_STYLESHEET))
# =============================================================================
# Title bar
# =============================================================================
|
TabFilter
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py
|
{
"start": 1554,
"end": 4983
}
|
class ____(ColumnMapMetricProvider):
# </snippet>
# This is the id string that will be used to reference your metric.
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py metric_name">
condition_metric_name = "column_values.equal_three"
# </snippet>
# This method implements the core logic for the PandasExecutionEngine
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py pandas">
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column == 3
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_definition">
@metric_partial(
engine=SparkDFExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.MAP_CONDITION_FN,
domain_type=MetricDomainTypes.COLUMN,
)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_selectable">
(
_,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column_name = accessor_domain_kwargs["column"]
column = F.col(column_name)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_query">
query = F.when(column == 3, F.lit(False)).otherwise(F.lit(True))
return (query, compute_domain_kwargs, accessor_domain_kwargs)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py sqlalchemy">
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return column.in_([3])
# </snippet>
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains"""
dependencies: Dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs: Dict = {
k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
}
dependencies["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=table_domain_kwargs,
metric_value_kwargs={
"include_nested": True,
},
)
return dependencies
# This class defines the Expectation itself
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py ExpectColumnValuesToEqualThree class_def">
|
ColumnValuesEqualThree
|
python
|
lazyprogrammer__machine_learning_examples
|
nlp_class2/glove_theano.py
|
{
"start": 1124,
"end": 8591
}
|
class ____:
def __init__(self, D, V, context_sz):
self.D = D
self.V = V
self.context_sz = context_sz
def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=False, use_tensorflow=False):
# build co-occurrence matrix
# paper calls it X, so we will call it X, instead of calling
# the training data X
# TODO: would it be better to use a sparse matrix?
t0 = datetime.now()
V = self.V
D = self.D
if not os.path.exists(cc_matrix):
X = np.zeros((V, V))
N = len(sentences)
print("number of sentences to process:", N)
it = 0
for sentence in sentences:
it += 1
if it % 10000 == 0:
print("processed", it, "/", N)
n = len(sentence)
for i in range(n):
# i is not the word index!!!
# j is not the word index!!!
# i just points to which element of the sequence (sentence) we're looking at
wi = sentence[i]
start = max(0, i - self.context_sz)
end = min(n, i + self.context_sz)
# we can either choose only one side as context, or both
# here we are doing both
# make sure "start" and "end" tokens are part of some context
# otherwise their f(X) will be 0 (denominator in bias update)
if i - self.context_sz < 0:
points = 1.0 / (i + 1)
X[wi,0] += points
X[0,wi] += points
if i + self.context_sz > n:
points = 1.0 / (n - i)
X[wi,1] += points
X[1,wi] += points
# left side
for j in range(start, i):
wj = sentence[j]
points = 1.0 / (i - j) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# right side
for j in range(i + 1, end):
wj = sentence[j]
points = 1.0 / (j - i) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# save the cc matrix because it takes forever to create
np.save(cc_matrix, X)
else:
X = np.load(cc_matrix)
print("max in X:", X.max())
# weighting
fX = np.zeros((V, V))
fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha
fX[X >= xmax] = 1
print("max in f(X):", fX.max())
# target
logX = np.log(X + 1)
# cast
fX = fX.astype(np.float32)
logX = logX.astype(np.float32)
print("max in log(X):", logX.max())
print("time to build co-occurrence matrix:", (datetime.now() - t0))
# initialize weights
W = np.random.randn(V, D) / np.sqrt(V + D)
b = np.zeros(V)
U = np.random.randn(V, D) / np.sqrt(V + D)
c = np.zeros(V)
mu = logX.mean()
# initialize weights, inputs, targets placeholders
thW = theano.shared(W.astype(np.float32))
thb = theano.shared(b.astype(np.float32))
thU = theano.shared(U.astype(np.float32))
thc = theano.shared(c.astype(np.float32))
thLogX = T.matrix('logX')
thfX = T.matrix('fX')
params = [thW, thb, thU, thc]
thDelta = thW.dot(thU.T) + T.reshape(thb, (V, 1)) + T.reshape(thc, (1, V)) + mu - thLogX
thCost = ( thfX * thDelta * thDelta ).sum()
# regularization
regularized_cost = thCost + reg*((thW * thW).sum() + (thU * thU).sum())
updates = momentum_updates(regularized_cost, params, learning_rate)
train_op = theano.function(
inputs=[thfX, thLogX],
updates=updates,
)
cost_op = theano.function(inputs=[thfX, thLogX], outputs=thCost)
costs = []
sentence_indexes = range(len(sentences))
for epoch in range(epochs):
train_op(fX, logX)
cost = cost_op(fX, logX)
costs.append(cost)
print("epoch:", epoch, "cost:", cost)
self.W = thW.get_value()
self.U = thU.get_value()
plt.plot(costs)
plt.show()
def save(self, fn):
# function word_analogies expects a (V,D) matrx and a (D,V) matrix
arrays = [self.W, self.U.T]
np.savez(fn, *arrays)
def main(we_file, w2i_file, use_brown=True, n_files=50):
if use_brown:
cc_matrix = "cc_matrix_brown.npy"
else:
cc_matrix = "cc_matrix_%s.npy" % n_files
# hacky way of checking if we need to re-load the raw data or not
# remember, only the co-occurrence matrix is needed for training
if os.path.exists(cc_matrix):
with open(w2i_file) as f:
word2idx = json.load(f)
sentences = [] # dummy - we won't actually use it
else:
if use_brown:
keep_words = set([
'king', 'man', 'woman',
'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england',
'french', 'english', 'japan', 'japanese', 'chinese', 'italian',
'australia', 'australian', 'december', 'november', 'june',
'january', 'february', 'march', 'april', 'may', 'july', 'august',
'september', 'october',
])
sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words)
else:
sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000)
with open(w2i_file, 'w') as f:
json.dump(word2idx, f)
V = len(word2idx)
model = Glove(100, V, 10)
model.fit(
sentences,
cc_matrix=cc_matrix,
learning_rate=1e-4,
reg=0.1,
epochs=200,
)
model.save(we_file)
if __name__ == '__main__':
we = 'glove_model_50.npz'
w2i = 'glove_word2idx_50.json'
# we = 'glove_model_brown.npz'
# w2i = 'glove_word2idx_brown.json'
main(we, w2i, use_brown=False)
# load back embeddings
npz = np.load(we)
W1 = npz['arr_0']
W2 = npz['arr_1']
with open(w2i) as f:
word2idx = json.load(f)
idx2word = {i:w for w,i in word2idx.items()}
for concat in (True, False):
print("** concat:", concat)
if concat:
We = np.hstack([W1, W2.T])
else:
We = (W1 + W2.T) / 2
find_analogies('king', 'man', 'woman', We, word2idx, idx2word)
find_analogies('france', 'paris', 'london', We, word2idx, idx2word)
find_analogies('france', 'paris', 'rome', We, word2idx, idx2word)
find_analogies('paris', 'france', 'italy', We, word2idx, idx2word)
find_analogies('france', 'french', 'english', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word)
find_analogies('december', 'november', 'june', We, word2idx, idx2word)
|
Glove
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_health_checks.py
|
{
"start": 4624,
"end": 4742
}
|
class ____(RuleBasedStateMachine):
@rule()
def r(self):
return "any non-None value"
|
ReturningRuleMachine
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1150504,
"end": 1157657
}
|
class ____(sgqlc.types.Type, Node, AnnouncementBanner):
"""An account to manage multiple organizations with consolidated
policy and billing.
"""
__schema__ = github_schema
__field_names__ = (
"avatar_url",
"billing_info",
"created_at",
"database_id",
"description",
"description_html",
"location",
"members",
"name",
"organizations",
"owner_info",
"resource_path",
"slug",
"url",
"viewer_is_admin",
"website_url",
)
avatar_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="avatarUrl",
args=sgqlc.types.ArgDict((("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)),
)
"""A URL pointing to the enterprise's public avatar.
Arguments:
* `size` (`Int`): The size of the resulting square image.
"""
billing_info = sgqlc.types.Field(EnterpriseBillingInfo, graphql_name="billingInfo")
"""Enterprise billing information visible to enterprise billing
managers.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The description of the enterprise."""
description_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="descriptionHTML")
"""The description of the enterprise as HTML."""
location = sgqlc.types.Field(String, graphql_name="location")
"""The location of the enterprise."""
members = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseMemberConnection),
graphql_name="members",
args=sgqlc.types.ArgDict(
(
(
"organization_logins",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="organizationLogins", default=None),
),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(EnterpriseMemberOrder, graphql_name="orderBy", default={"field": "LOGIN", "direction": "ASC"}),
),
("role", sgqlc.types.Arg(EnterpriseUserAccountMembershipRole, graphql_name="role", default=None)),
("deployment", sgqlc.types.Arg(EnterpriseUserDeployment, graphql_name="deployment", default=None)),
("has_two_factor_enabled", sgqlc.types.Arg(Boolean, graphql_name="hasTwoFactorEnabled", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users who are members of this enterprise.
Arguments:
* `organization_logins` (`[String!]`): Only return members within
the organizations with these logins
* `query` (`String`): The search string to look for.
* `order_by` (`EnterpriseMemberOrder`): Ordering options for
members returned from the connection. (default: `{field: LOGIN,
direction: ASC}`)
* `role` (`EnterpriseUserAccountMembershipRole`): The role of the
user in the enterprise organization or server.
* `deployment` (`EnterpriseUserDeployment`): Only return members
within the selected GitHub Enterprise deployment
* `has_two_factor_enabled` (`Boolean`): Only return members with
this two-factor authentication status. Does not include members
who only have an account on a GitHub Enterprise Server instance.
(default: `null`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the enterprise."""
organizations = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationConnection),
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("viewer_organization_role", sgqlc.types.Arg(RoleInOrganization, graphql_name="viewerOrganizationRole", default=None)),
("order_by", sgqlc.types.Arg(OrganizationOrder, graphql_name="orderBy", default={"field": "LOGIN", "direction": "ASC"})),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of organizations that belong to this enterprise.
Arguments:
* `query` (`String`): The search string to look for.
* `viewer_organization_role` (`RoleInOrganization`): The viewer's
role in an organization.
* `order_by` (`OrganizationOrder`): Ordering options for
organizations returned from the connection. (default: `{field:
LOGIN, direction: ASC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
owner_info = sgqlc.types.Field(EnterpriseOwnerInfo, graphql_name="ownerInfo")
"""Enterprise information visible to enterprise owners or enterprise
owners' personal access tokens (classic) with read:enterprise or
admin:enterprise scope.
"""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this enterprise."""
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
"""The URL-friendly identifier for the enterprise."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this enterprise."""
viewer_is_admin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerIsAdmin")
"""Is the current viewer an admin of this enterprise?"""
website_url = sgqlc.types.Field(URI, graphql_name="websiteUrl")
"""The URL of the enterprise website."""
|
Enterprise
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 167465,
"end": 169487
}
|
class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_negatives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('false_negatives/false_negatives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.false_negatives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn)
self.assertAllEqual((0, 2, 3), fn_update_op)
self.assertAllEqual((0, 2, 3), fn)
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.false_negatives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn)
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op)
self.assertAllEqual((0.0, 8.0, 11.0), fn)
|
FalseNegativesAtThresholdsTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/sharedstrings/test_initialisation.py
|
{
"start": 309,
"end": 872
}
|
class ____(unittest.TestCase):
"""
Test initialisation of the SharedStrings class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.sharedstrings = SharedStrings()
self.sharedstrings._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Sharedstrings xml_declaration()"""
self.sharedstrings._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestInitialisation
|
python
|
huggingface__transformers
|
src/transformers/generation/continuous_batching/cache.py
|
{
"start": 20311,
"end": 34222
}
|
class ____:
"""A helper class to determine the best number of pages and maximum number of tokens per batch for the paged
attention cache, providing automatic sizing based on available GPU memory.
The helper works using the number of pages, which is tied to the number of blocks by:
num_blocks = num_pages // block_size
The memory footprint consists of three main components:
- Cache memory: the space needed to store the cache tensors:
2 * layer_group_size * [num_pages, page_size] * cache_dtype
- Activation memory: the space temporarily taken by the largest activation during the model forward pass:
peak_activation_per_token * max_tokens_per_batch * activation_dtype_size
- Static tensors: the space taken by the input/output buffers and metadata tensors for batch processing, sum of:
- inputs_ids + outputs_ids + position_ids + logits_indices: 4 * max_tokens_per_batch * int32_size
- attention_mask: num_attention_masks * num_pages * max_tokens_per_batch * activation_dtype_size
- cumulative_seqlens_q + cumulative_seqlens_k: (1 + 2) * max_tokens_per_batch * int32_size
- write_index_tensor: num_groups * max_tokens_per_batch * int32_size
- read_index_tensor: num_groups * (num_pages + max_tokens_per_batch) * int32_size
The handler can operate in three modes:
1. Auto-sizing: Determines both number of pages and maximum number of tokens per batch using quadratic optimization
2. Fixed cache: Calculates max batch tokens given a fixed number of pages
3. Fixed batch: Calculates number of pages given a fixed maximum batch size
"""
_activation_dtype = torch.bfloat16
_input_dtype = torch.int32
_upper_bound_max_batch_tokens = 256
_upper_bound_num_blocks = 4096
def __init__(
self,
block_size: int,
page_size: int,
num_groups: int,
group_size: int,
peak_activation_per_token: int,
num_attention_masks: int,
) -> None:
"""Initialize the memory handler with the parameters that cannot be automatically inferred.
Args:
block_size: Size of the cache blocks
page_size: Size of the cache pages
num_groups: Number of layer groups
group_size: Number of layers per layer group
peak_activation_per_token: Maximum size of activation tensor per token, = hidden_size + vocab_size
num_attention_masks: Number of attention masks, 0 if no attention mask is used, 2 if hybrid model, else 1
"""
self.block_size = block_size
self.page_size = page_size
self.num_groups = num_groups
self.group_size = group_size
self.peak_activation_per_token = peak_activation_per_token
self.num_attention_masks = num_attention_masks
@staticmethod
def get_available_memory(max_memory_percent: float = 1.0) -> int:
"""Calculate available GPU memory for cache allocation, accounting for already allocated tensors.
This method queries the current memory state and applies the specified percentage limit to determine
how much memory can be safely used for the paged attention cache.
Args:
max_memory_percent: Fraction of available memory to use (0.0-1.0). 1.0 means use all available memory.
Returns:
int: Available memory in bytes for cache allocation
"""
_, total, reserved, allocated = get_device_and_memory_breakdown()
available_memory = total - max(allocated, reserved)
available_memory = int(available_memory * max_memory_percent)
return available_memory
def infer_num_blocks_and_max_batch_tokens(
self,
num_blocks: int | None = None,
max_batch_tokens: int | None = None,
max_memory_percent: float = 0.8, # FIXME: it seems we overcommit memory, was changed from 0.9 which caused OOMs in our benchmarking CI
cache_dtype: torch.dtype = torch.float16,
) -> tuple[int, int]:
"""Determine optimal number of blocks and maximum number of tokens per batch based on available memory and
constraints. Check the class docstring for more details. Naming the number of pages as N and the maximum number
of tokens per batch as M, the equation solved is:
available_memory = sum([
MN * num_attention_masks * activation_dtype_size,
2N * (layer_group_size * page_size * cache_dtype + 2 * num_group),
M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group),
])
where we already simplified int32_size = 4.
"""
# If neither num_blocks nor max_batch_tokens are provided, we use a second-order polynomial
if num_blocks is None and max_batch_tokens is None:
num_blocks, max_batch_tokens = self.compute_num_blocks_and_max_batch_tokens(
max_memory_percent, cache_dtype
)
# If only num_blocks is provided, we infer the max_batch_tokens
elif num_blocks is not None and max_batch_tokens is None:
max_batch_tokens = self.compute_max_batch_tokens(num_blocks, max_memory_percent, cache_dtype)
# If only max_batch_tokens is provided, we infer the num_blocks
elif max_batch_tokens is not None and num_blocks is None:
num_blocks = self.compute_num_blocks(max_batch_tokens, max_memory_percent, cache_dtype)
# We check if the memory footprint is too large in all cases
available_memory = self.get_available_memory(max_memory_percent)
memory_footprint = self.compute_memory_footprint(
max_batch_tokens=max_batch_tokens,
num_blocks=num_blocks,
cache_dtype=cache_dtype,
)
if memory_footprint > available_memory:
raise MemoryError(f"Memory footprint {memory_footprint} is more than available memory {available_memory}")
return num_blocks, max_batch_tokens
def compute_num_blocks_and_max_batch_tokens(
self,
max_memory_percent: float,
cache_dtype: torch.dtype = torch.float16,
m: float = 0.01,
) -> tuple[int, int]:
"""Calculate optimal number of blocks and maximum number of tokens per batch using quadratic optimization when
neither is fixed. This method assumes a relationship M = m * N where m is a small ratio below 1 and solves the
resulting quadratic equation to find the optimal N that maximizes utilization within memory constraints. m is
the amount of cache we can fill with one batch: m=0.01 means a batch fills at most 1% of the cache. The equation
to solve is:
available_memory = sum([
m * N^2 * num_attention_masks * activation_dtype_size,
2N * (layer_group_size * page_size * cache_dtype + 2 * num_group),
m * N * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group),
])
If num_attention_masks is 0, the equation simplifies to a 1st degree polynomial.
"""
cache_memory = self.get_available_memory(max_memory_percent)
logger.info(f"Cache memory: {cache_memory}")
# Compute second-degree polynomial coefficients
a = m * self.num_attention_masks * self._activation_dtype.itemsize
b = 2 * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups)
b += m * (self.peak_activation_per_token * self._activation_dtype.itemsize + 28 + 4 * self.num_groups)
c = -cache_memory
logger.debug(f"Coefficients of 2nd degree polynomial: {a = }, {b = }, {c = }")
# If num_attention_masks is 0, the equation simplifies to a 1st degree polynomial
if self.num_attention_masks == 0:
greatest_solution = -c / b
# Otherwise, we solve the quadratic equation
else:
discriminant = b**2 - 4 * a * c
if discriminant < 0:
raise ValueError(f"Discriminant is negative: {discriminant = }")
greatest_solution = (-b + sqrt(discriminant)) / (2 * a)
if greatest_solution < 0:
raise ValueError(f"Greatest solution is negative: {greatest_solution = }")
# Infer number of blocks and max batch tokens
num_pages = floor(greatest_solution)
num_blocks = num_pages // self.block_size
if num_blocks > self._upper_bound_num_blocks:
logger.info(f"{num_blocks = } is too large, setting to {self._upper_bound_num_blocks = }")
num_blocks = self._upper_bound_num_blocks
max_batch_tokens = int(greatest_solution * m)
if max_batch_tokens > self._upper_bound_max_batch_tokens:
logger.info(f"{max_batch_tokens = } is too large, setting to {self._upper_bound_max_batch_tokens = }")
max_batch_tokens = self._upper_bound_max_batch_tokens
return num_blocks, max_batch_tokens
def compute_max_batch_tokens(
self,
num_blocks: int,
max_memory_percent: float,
cache_dtype: torch.dtype = torch.float16,
) -> int:
"""Calculate maximum batch tokens M given a fixed number of cache blocks. The formula for M is given by:
M = (available_memory - 2N * (layer_group_size * page_size * cache_dtype + 2 * num_group))
/ (activation_dtype_size * (N * num_attention_masks + peak_activation_per_token) + 28 + 4 * num_group)
"""
cache_memory = self.get_available_memory(max_memory_percent)
num_pages = num_blocks * self.block_size
# Compute numerator
num = cache_memory
num -= 2 * num_pages * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups)
# Compute denominator
denum = self._activation_dtype.itemsize * (
num_pages * self.num_attention_masks + self.peak_activation_per_token
)
denum += 28 + 4 * self.num_groups
# Compute max batch tokens and return
max_batch_tokens = floor(num / denum)
if max_batch_tokens > self._upper_bound_max_batch_tokens:
logger.info(f"{max_batch_tokens = } is too large, setting to {self._upper_bound_max_batch_tokens = }")
max_batch_tokens = self._upper_bound_max_batch_tokens
return max_batch_tokens
def compute_num_blocks(
self,
max_batch_tokens: int,
max_memory_percent: float,
cache_dtype: torch.dtype = torch.float16,
) -> int:
"""Calculate number of cache blocks N given a fixed maximum token per token M. The formula for N is given by:
N = (available_memory - M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group))
/ (2 * (layer_group_size * page_size * cache_dtype + 2 * num_group) + M * (num_attention_masks * activation_dtype_size))
"""
cache_memory = self.get_available_memory(max_memory_percent)
# Compute numerator
num = cache_memory
num -= max_batch_tokens * self.peak_activation_per_token * self._activation_dtype.itemsize
num -= max_batch_tokens * (28 + 4 * self.num_groups)
# Compute denominator
denum = 2 * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups)
denum += max_batch_tokens * (self.num_attention_masks * self._activation_dtype.itemsize)
denum += max_batch_tokens * self._activation_dtype.itemsize
# Compute cache size and return number of blocks
num_pages = floor(num / denum)
num_blocks = num_pages // self.block_size
if num_blocks > self._upper_bound_num_blocks:
logger.info(f"{num_blocks = } is too large, setting to {self._upper_bound_num_blocks = }")
num_blocks = self._upper_bound_num_blocks
return num_blocks
def compute_memory_footprint(
self,
num_blocks: int | None = None,
max_batch_tokens: int | None = None,
cache_dtype: torch.dtype = torch.float16,
) -> tuple[int, int, int]:
"""Calculate the memory footprint breakdown for a given number of blocks and maximum batch tokens. The memory
footprint is given by:
available_memory = sum([
MN * num_attention_masks * activation_dtype_size,
2N * (layer_group_size * page_size * cache_dtype + 2 * num_group),
M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group),
])
but is broken down below.
"""
num_pages = num_blocks * self.block_size
cache_memory_footprint = 2 * self.group_size * num_pages * self.page_size * cache_dtype.itemsize
activation_memory_footprint = self.peak_activation_per_token * self._activation_dtype.itemsize
activation_memory_footprint *= max_batch_tokens
inputs_outputs_positions_and_logits_memory_footprint = 4 * max_batch_tokens * 4 # second 4 is for int32 size
attention_memory_footprint = self.num_attention_masks * self._activation_dtype.itemsize
attention_memory_footprint *= num_pages * max_batch_tokens
cumulative_seqlens_memory_footprint = 3 * max_batch_tokens * 4 # 4 is for int32 size
write_index_memory_footprint = self.num_groups * max_batch_tokens * 4 # 4 is for int32 size
read_index_memory_footprint = self.num_groups * (num_pages + max_batch_tokens) * 4 # 4 is for int32 size
total_memory_footprint = sum(
[
cache_memory_footprint,
activation_memory_footprint,
inputs_outputs_positions_and_logits_memory_footprint,
attention_memory_footprint,
cumulative_seqlens_memory_footprint,
write_index_memory_footprint,
read_index_memory_footprint,
]
)
return total_memory_footprint
|
PagedAttentionMemoryHandler
|
python
|
pypa__warehouse
|
warehouse/oidc/models/_core.py
|
{
"start": 2895,
"end": 3470
}
|
class ____(db.Model):
__tablename__ = "oidc_publisher_project_association"
__table_args__ = (UniqueConstraint("oidc_publisher_id", "project_id"),)
oidc_publisher_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("oidc_publishers.id"),
nullable=False,
primary_key=True,
)
project_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
primary_key=True,
)
|
OIDCPublisherProjectAssociation
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-good-numbers.py
|
{
"start": 32,
"end": 541
}
|
class ____(object):
def countGoodNumbers(self, n):
"""
:type n: int
:rtype: int
"""
def powmod(a, b, mod):
a %= mod
result = 1
while b:
if b&1:
result = (result*a)%mod
a = (a*a)%mod
b >>= 1
return result
MOD = 10**9 + 7
return powmod(5, (n+1)//2%(MOD-1), MOD)*powmod(4, n//2%(MOD-1), MOD) % MOD
# Time: O(logn)
# Space: O(1)
|
Solution
|
python
|
mlflow__mlflow
|
tests/crewai/test_crewai_autolog.py
|
{
"start": 4535,
"end": 29728
}
|
class ____(BaseTool):
name: str = "TestTool"
description: str = "test tool"
def _run(self, argument: str) -> str:
return "Tool Answer"
@pytest.fixture
def tool_agent_1():
return Agent(
role="City Selection Expert",
goal=_AGENT_1_GOAL,
backstory=_AGENT_1_BACKSTORY,
tools=[SampleTool()],
llm=llm(),
)
_TASK_1_DESCRIPTION = "Analyze and select the best city for the trip"
_TASK_1_OUTPUT = "Detailed report on the chosen city"
@pytest.fixture
def task_1(simple_agent_1):
return Task(
description=(_TASK_1_DESCRIPTION),
agent=simple_agent_1,
expected_output=_TASK_1_OUTPUT,
)
@pytest.fixture
def task_1_with_tool(tool_agent_1):
return Task(
description=(_TASK_1_DESCRIPTION),
agent=tool_agent_1,
expected_output=_TASK_1_OUTPUT,
)
_TASK_2_DESCRIPTION = "Compile an in-depth guide"
@pytest.fixture
def task_2(simple_agent_2):
return Task(
description=(_TASK_2_DESCRIPTION),
agent=simple_agent_2,
expected_output="Comprehensive city guide",
)
@pytest.fixture
def task_named(simple_agent_1):
return Task(
name="Custom Task Name",
agent=simple_agent_1,
description="noop",
expected_output="noop",
)
def global_autolog():
if IS_TRACING_SDK_ONLY:
pytest.skip("Global autolog is not supported in tracing SDK")
# Libraries used within tests or crewai library
mlflow.autolog(exclude_flavors=["openai", "litellm", "langchain"])
mlflow.utils.import_hooks.notify_module_loaded(crewai)
def clear_autolog_state():
from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS
for key in AUTOLOGGING_INTEGRATIONS.keys():
AUTOLOGGING_INTEGRATIONS[key].clear()
mlflow.utils.import_hooks._post_import_hooks = {}
@pytest.fixture(params=[mlflow.crewai.autolog, global_autolog])
def autolog(request):
clear_autolog_state()
yield request.param
clear_autolog_state()
def test_kickoff_enable_disable_autolog(simple_agent_1, task_1, autolog):
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
)
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 5
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs == _CREW_OUTPUT
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert span_1.inputs == {
"context": "",
"tools": [],
}
assert span_1.outputs is not None
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs == _LLM_ANSWER
# LLM
span_3 = traces[0].data.spans[3]
assert span_3.name == "openai/gpt-4o-mini"
assert span_3.span_type == SpanType.LLM
assert span_3.parent_id is span_2.span_id
assert span_3.inputs["messages"] is not None
assert span_3.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_4 = traces[0].data.spans[4]
assert span_4.name == "CrewAgentExecutor._create_long_term_memory"
assert span_4.span_type == SpanType.MEMORY
assert span_4.parent_id is span_2.span_id
assert span_4.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_4.outputs is None
assert traces[0].info.token_usage == {
TokenUsageKey.INPUT_TOKENS: 9,
TokenUsageKey.OUTPUT_TOKENS: 12,
TokenUsageKey.TOTAL_TOKENS: 21,
}
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
mlflow.crewai.autolog(disable=True)
crew.kickoff()
# No new trace should be created
traces = get_traces()
assert len(traces) == 1
def test_kickoff_failure(simple_agent_1, task_1, autolog):
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
)
with patch("litellm.completion", side_effect=Exception("error")):
autolog()
with pytest.raises(Exception, match="error"):
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "ERROR"
assert len(traces[0].data.spans) == 4
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.status.status_code == "ERROR"
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert span_1.inputs == {
"context": "",
"tools": [],
}
assert span_1.status.status_code == "ERROR"
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.status.status_code == "ERROR"
# LLM
span_3 = traces[0].data.spans[3]
assert span_3.name == "openai/gpt-4o-mini"
assert span_3.span_type == SpanType.LLM
assert span_3.parent_id is span_2.span_id
assert span_3.inputs["messages"] is not None
assert span_3.status.status_code == "ERROR"
def test_kickoff_tool_calling(tool_agent_1, task_1_with_tool, autolog):
crew = Crew(
agents=[
tool_agent_1,
],
tasks=[task_1_with_tool],
)
with patch("litellm.completion", side_effect=[_TOOL_CHAT_COMPLETION, _SIMPLE_CHAT_COMPLETION]):
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 6
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs == _CREW_OUTPUT
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert len(span_1.inputs["tools"]) == 1
assert span_1.inputs["tools"][0]["name"] == "TestTool"
assert span_1.outputs is not None
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert len(span_2.inputs["tools"]) == 1
assert span_2.inputs["tools"][0]["name"] == "TestTool"
assert span_2.outputs == _LLM_ANSWER
# LLM - tool calling
span_3 = traces[0].data.spans[3]
assert span_3.name == "openai/gpt-4o-mini"
assert span_3.span_type == SpanType.LLM
assert span_3.parent_id is span_2.span_id
assert span_3.inputs["messages"] is not None
assert "Action: TestTool" in span_3.outputs
# LLM - return answer
span_4 = traces[0].data.spans[4]
assert span_4.name == "openai/gpt-4o-mini"
assert span_4.span_type == SpanType.LLM
assert span_4.parent_id is span_2.span_id
assert span_4.inputs["messages"] is not None
assert span_4.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_5 = traces[0].data.spans[5]
assert span_5.name == "CrewAgentExecutor._create_long_term_memory"
assert span_5.span_type == SpanType.MEMORY
assert span_5.parent_id is span_2.span_id
assert span_5.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_5.outputs is None
assert traces[0].info.token_usage == {
TokenUsageKey.INPUT_TOKENS: 18,
TokenUsageKey.OUTPUT_TOKENS: 24,
TokenUsageKey.TOTAL_TOKENS: 42,
}
def test_multi_tasks(simple_agent_1, simple_agent_2, task_1, task_2, autolog):
crew = Crew(
agents=[
simple_agent_1,
simple_agent_2,
],
tasks=[task_1, task_2],
)
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 9
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs is not None
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert span_1.inputs == {
"context": "",
"tools": [],
}
assert span_1.outputs is not None
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs == _LLM_ANSWER
# LLM
span_3 = traces[0].data.spans[3]
assert span_3.name == "openai/gpt-4o-mini"
assert span_3.span_type == SpanType.LLM
assert span_3.parent_id is span_2.span_id
assert span_3.inputs["messages"] is not None
assert span_3.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_4 = traces[0].data.spans[4]
assert span_4.name == "CrewAgentExecutor._create_long_term_memory"
assert span_4.span_type == SpanType.MEMORY
assert span_4.parent_id is span_2.span_id
assert span_4.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_4.outputs is None
# Task
span_5 = traces[0].data.spans[5]
assert span_5.name == "Task.execute_sync"
assert span_5.span_type == SpanType.CHAIN
assert span_5.parent_id is span_0.span_id
assert span_5.inputs == {
"context": _LLM_ANSWER,
"tools": [],
}
assert span_5.outputs is not None
# Agent
span_6 = traces[0].data.spans[6]
assert span_6.name == "Local Expert at this city"
assert span_6.span_type == SpanType.AGENT
assert span_6.parent_id is span_5.span_id
assert span_6.inputs == {
"context": _LLM_ANSWER,
"tools": [],
}
assert span_6.outputs == _LLM_ANSWER
# LLM
span_7 = traces[0].data.spans[7]
assert span_7.name == "openai/gpt-4o-mini"
assert span_7.span_type == SpanType.LLM
assert span_7.parent_id is span_6.span_id
assert span_7.inputs["messages"] is not None
assert span_7.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_8 = traces[0].data.spans[8]
assert span_8.name == "CrewAgentExecutor._create_long_term_memory"
assert span_8.span_type == SpanType.MEMORY
assert span_8.parent_id is span_6.span_id
assert span_8.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_8.outputs is None
assert traces[0].info.token_usage == {
TokenUsageKey.INPUT_TOKENS: 18,
TokenUsageKey.OUTPUT_TOKENS: 24,
TokenUsageKey.TOTAL_TOKENS: 42,
}
@pytest.mark.skipif(
Version(crewai.__version__) < Version("0.83.0"),
reason=("Memory feature in the current style is not available before 0.83.0"),
)
def test_memory(simple_agent_1, task_1, monkeypatch, autolog):
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
memory=True,
)
with (
patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION),
patch("openai.OpenAI") as client,
):
client().embeddings.create.return_value = _EMBEDDING
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 10 if _IS_CREWAI_V1 else 9
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs == _CREW_OUTPUT
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert span_1.inputs == {
"context": "",
"tools": [],
}
assert span_1.outputs is not None
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs == _LLM_ANSWER
# LongTermMemory
span_3 = traces[0].data.spans[3]
assert span_3.name == "LongTermMemory.search"
assert span_3.span_type == SpanType.MEMORY
assert span_3.parent_id is span_2.span_id
assert span_3.inputs == {
"latest_n": 2,
"task": "Analyze and select the best city for the trip",
}
assert span_3.outputs is None
# ShortTermMemory
span_4 = traces[0].data.spans[4]
assert span_4.name == "ShortTermMemory.search"
assert span_4.span_type == SpanType.MEMORY
assert span_4.parent_id is span_2.span_id
assert span_4.inputs == {"query": "Analyze and select the best city for the trip"}
assert span_4.outputs == []
# EntityMemory
span_5 = traces[0].data.spans[5]
assert span_5.name == "EntityMemory.search"
assert span_5.span_type == SpanType.MEMORY
assert span_5.parent_id is span_2.span_id
assert span_5.inputs == {
"query": "Analyze and select the best city for the trip",
}
assert span_5.outputs == []
# LLM
span_6 = traces[0].data.spans[6]
assert span_6.name == "openai/gpt-4o-mini"
assert span_6.span_type == SpanType.LLM
assert span_6.parent_id is span_2.span_id
assert span_6.inputs["messages"] is not None
assert span_6.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# ShortTermMemory.save
span_7 = traces[0].data.spans[7]
assert span_7.name == "ShortTermMemory.save"
assert span_7.span_type == SpanType.MEMORY
assert span_7.parent_id is span_2.span_id
# CrewAI changed the memory save input format - agent field was removed in newer versions
expected_memory_inputs = {
"metadata": {
"observation": "Analyze and select the best city for the trip",
},
"value": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
}
# Add agent field for older CrewAI versions
if _CREWAI_VERSION < Version("0.175.0"):
expected_memory_inputs["agent"] = "City Selection Expert"
assert span_7.inputs == expected_memory_inputs
assert span_7.outputs is None
# Create Long Term Memory
span_8 = traces[0].data.spans[8]
assert span_8.name == "CrewAgentExecutor._create_long_term_memory"
assert span_8.span_type == SpanType.MEMORY
assert span_8.parent_id is span_2.span_id
assert span_8.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_8.outputs is None
@pytest.mark.skipif(
Version(crewai.__version__) < Version("0.83.0")
or Version(crewai.__version__) >= Version("0.85.0"),
reason=("Knowledge feature in the current style is available only with 0.83.0"),
)
def test_knowledge(simple_agent_1, task_1, monkeypatch, autolog):
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
content = "Users name is John"
string_source = StringKnowledgeSource(content=content, metadata={"preference": "personal"})
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}},
)
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 6
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Crew.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs == _CREW_OUTPUT
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Task.execute_sync"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
assert span_1.inputs == {
"context": "",
"tools": [],
}
assert span_1.outputs is not None
# Agent
span_2 = traces[0].data.spans[2]
assert span_2.name == "City Selection Expert"
assert span_2.span_type == SpanType.AGENT
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs == _LLM_ANSWER
# Knowledge
span_3 = traces[0].data.spans[3]
assert span_3.name == "Knowledge.query"
assert span_3.span_type == SpanType.RETRIEVER
assert span_3.parent_id is span_2.span_id
assert span_3.inputs["query"] is not None
assert span_3.outputs is not None
# LLM
span_4 = traces[0].data.spans[4]
assert span_4.name == "openai/gpt-4o-mini"
assert span_4.span_type == SpanType.LLM
assert span_4.parent_id is span_2.span_id
assert span_4.inputs["messages"] is not None
assert span_4.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_5 = traces[0].data.spans[5]
assert span_5.name == "CrewAgentExecutor._create_long_term_memory"
assert span_5.span_type == SpanType.MEMORY
assert span_5.parent_id is span_2.span_id
assert span_5.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_5.outputs is None
assert traces[0].info.token_usage == {
TokenUsageKey.INPUT_TOKENS: 9,
TokenUsageKey.OUTPUT_TOKENS: 12,
TokenUsageKey.TOTAL_TOKENS: 21,
}
def test_kickoff_for_each(simple_agent_1, task_1, autolog):
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
)
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
crew.kickoff_for_each([{}])
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 6
span_0 = traces[0].data.spans[0]
# kickoff_for_each
assert span_0.name == "Crew.kickoff_for_each"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {"inputs": [{}]}
assert span_0.outputs == [_CREW_OUTPUT]
# Crew
span_1 = traces[0].data.spans[1]
assert span_1.name == "Crew.kickoff"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id == span_0.span_id
assert span_1.inputs == {
"inputs": {},
}
assert span_1.outputs == _CREW_OUTPUT
# Task
span_2 = traces[0].data.spans[2]
assert span_2.name == "Task.execute_sync"
assert span_2.span_type == SpanType.CHAIN
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs is not None
# Agent
span_3 = traces[0].data.spans[3]
assert span_3.name == "City Selection Expert"
assert span_3.span_type == SpanType.AGENT
assert span_3.parent_id is span_2.span_id
assert span_3.inputs == {
"context": "",
"tools": [],
}
assert span_3.outputs == _LLM_ANSWER
# LLM
span_4 = traces[0].data.spans[4]
assert span_4.name == "openai/gpt-4o-mini"
assert span_4.span_type == SpanType.LLM
assert span_4.parent_id is span_3.span_id
assert span_4.inputs["messages"] is not None
assert span_4.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_5 = traces[0].data.spans[5]
assert span_5.name == "CrewAgentExecutor._create_long_term_memory"
assert span_5.span_type == SpanType.MEMORY
assert span_5.parent_id is span_3.span_id
assert span_5.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_5.outputs is None
def test_flow(simple_agent_1, task_1, autolog):
crew = Crew(
agents=[
simple_agent_1,
],
tasks=[task_1],
)
class TestFlow(Flow):
@start()
def start(self):
return crew.kickoff()
flow = TestFlow()
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
flow.kickoff()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 6
span_0 = traces[0].data.spans[0]
# kickoff_for_each
assert span_0.name == "TestFlow.kickoff"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
assert span_0.inputs == {}
assert span_0.outputs == _CREW_OUTPUT
# Crew
span_1 = traces[0].data.spans[1]
assert span_1.name == "Crew.kickoff"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id == span_0.span_id
assert span_1.inputs == {}
assert span_1.outputs == _CREW_OUTPUT
# Task
span_2 = traces[0].data.spans[2]
assert span_2.name == "Task.execute_sync"
assert span_2.span_type == SpanType.CHAIN
assert span_2.parent_id is span_1.span_id
assert span_2.inputs == {
"context": "",
"tools": [],
}
assert span_2.outputs is not None
# Agent
span_3 = traces[0].data.spans[3]
assert span_3.name == "City Selection Expert"
assert span_3.span_type == SpanType.AGENT
assert span_3.parent_id is span_2.span_id
assert span_3.inputs == {
"context": "",
"tools": [],
}
assert span_3.outputs == _LLM_ANSWER
# LLM
span_4 = traces[0].data.spans[4]
assert span_4.name == "openai/gpt-4o-mini"
assert span_4.span_type == SpanType.LLM
assert span_4.parent_id is span_3.span_id
assert span_4.inputs["messages"] is not None
assert span_4.outputs == f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}"
# Create Long Term Memory
span_5 = traces[0].data.spans[5]
assert span_5.name == "CrewAgentExecutor._create_long_term_memory"
assert span_5.span_type == SpanType.MEMORY
assert span_5.parent_id is span_3.span_id
assert span_5.inputs == {
"output": {
"output": _LLM_ANSWER,
"text": f"{_FINAL_ANSWER_KEYWORD} {_LLM_ANSWER}",
"thought": "",
}
}
assert span_5.outputs is None
def test_crew_task_named(simple_agent_1, task_named, autolog):
crew = Crew(
name="Custom Crew Name",
agents=[
simple_agent_1,
],
tasks=[task_named],
)
with patch("litellm.completion", return_value=_SIMPLE_CHAT_COMPLETION):
autolog()
crew.kickoff()
traces = get_traces()
assert len(traces) == 1
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) >= 1
# Crew
span_0 = traces[0].data.spans[0]
assert span_0.name == "Custom Crew Name"
assert span_0.span_type == SpanType.CHAIN
assert span_0.parent_id is None
# Task
span_1 = traces[0].data.spans[1]
assert span_1.name == "Custom Task Name"
assert span_1.span_type == SpanType.CHAIN
assert span_1.parent_id is span_0.span_id
|
SampleTool
|
python
|
pytorch__pytorch
|
test/dynamo/test_autograd_function.py
|
{
"start": 1138,
"end": 1240
}
|
class ____(torch.nn.Module):
def forward(self, foo):
return CustomFunc1().apply(foo)
|
Module1
|
python
|
PyCQA__pylint
|
tests/functional/u/unused/unused_private_member.py
|
{
"start": 8886,
"end": 9034
}
|
class ____:
def __bar(self, x):
print(x)
fizz = partialmethod(__bar, 'fizz')
test = FalsePositive4756a()
test.fizz()
|
FalsePositive4756a
|
python
|
encode__django-rest-framework
|
tests/test_api_client.py
|
{
"start": 5381,
"end": 6159
}
|
class ____(APIView):
def get(self, request):
headers = {
key[5:].replace('_', '-'): value
for key, value in request.META.items()
if key.startswith('HTTP_')
}
return Response({
'method': request.method,
'headers': headers
})
urlpatterns = [
path('', SchemaView.as_view()),
path('example/', ListView.as_view()),
re_path(r'^example/(?P<id>[0-9]+)/$', DetailView.as_view()),
path('upload/', UploadView.as_view()),
path('download/', DownloadView.as_view()),
path('text/', TextView.as_view()),
path('headers/', HeadersView.as_view()),
]
@unittest.skipUnless(coreapi, 'coreapi not installed')
@override_settings(ROOT_URLCONF='tests.test_api_client')
|
HeadersView
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_custom_autoscaling_metrics.py
|
{
"start": 1591,
"end": 11438
}
|
class ____:
"""Check that redeploying a deployment doesn't reset its start time."""
def test_custom_serve_metrics(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
}
)
class DummyMetricIncrementer:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
# Increments each time the deployment has been called
return {"counter": self.counter}
app_name = "test_custom_metrics_app"
handle = serve.run(
DummyMetricIncrementer.bind(), name=app_name, route_prefix="/"
)
dep_id = DeploymentID(name="DummyMetricIncrementer", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
def check_counter_value():
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
return "counter" in metrics and metrics["counter"][-1][0].value == 3
# The final counter value recorded by the controller should be 3
wait_for_condition(
check_counter_value,
timeout=15,
)
def test_custom_serve_timeout(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 10,
"metrics_interval_s": 1,
"look_back_period_s": 1,
}
)
class DummyMetricTimeout:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
async def record_autoscaling_stats(self) -> Dict[str, int]:
# Block here until it is forced to cancel due to timeout beyond RAY_SERVE_RECORD_AUTOSCALING_STATS_TIMEOUT_S
await asyncio.sleep(1000)
app_name = "test_custom_metrics_app"
handle = serve.run(DummyMetricTimeout.bind(), name=app_name, route_prefix="/")
dep_id = DeploymentID(name="DummyMetricTimeout", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
# There should be no counter metric because asyncio timeout would have stopped the method execution
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
assert metrics.get("counter", None) is None
def test_custom_serve_invalid_metric_type(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 10,
"metrics_interval_s": 1,
"look_back_period_s": 1,
}
)
class DummyInvalidMetric:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, str]:
# Return an invalid metric dict whose valuse are neither int nor float
return {"counter": "not_an_int"}
app_name = "test_custom_metrics_app"
handle = serve.run(DummyInvalidMetric.bind(), name=app_name, route_prefix="/")
dep_id = DeploymentID(name="DummyInvalidMetric", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
# There should be no counter metric because it failed validation, must be int or float
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
assert metrics.get("counter", None) is None
def test_policy_using_custom_metrics(self, serve_instance):
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 1,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(policy_function=custom_autoscaling_policy),
},
max_ongoing_requests=100,
)
class CustomMetricsDeployment:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
return {"counter": self.counter}
handle = serve.run(CustomMetricsDeployment.bind())
[handle.remote() for _ in range(10)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 10)
wait_for_condition(
check_num_replicas_eq, name="CustomMetricsDeployment", target=3
)
signal.send.remote()
def test_max_cpu_usage_autoscaling_policy(self, serve_instance):
"""Test autoscaling policy based on max CPU usage from documentation example."""
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(
policy_function=max_cpu_usage_autoscaling_policy
),
},
max_ongoing_requests=100,
)
class MaxCpuUsageDeployment:
def __init__(self):
self.cpu_usage = 0
async def __call__(self) -> str:
self.cpu_usage += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
return {"cpu_usage": self.cpu_usage}
handle = serve.run(MaxCpuUsageDeployment.bind())
# Test scale up when CPU usage > 80
# Set CPU usage to 90 to trigger scale up
dep_id = DeploymentID(name="MaxCpuUsageDeployment")
# Send requests to increase CPU usage
[handle.remote() for _ in range(90)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 90)
# Wait for metrics to be recorded and policy to trigger scale up
def check_scale_up():
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
return "cpu_usage" in metrics and metrics["cpu_usage"][-1][0].value >= 90
wait_for_condition(check_scale_up, timeout=10)
# Should scale up to 2 replicas due to high CPU usage
wait_for_condition(
check_num_replicas_eq, name="MaxCpuUsageDeployment", target=2, timeout=15
)
# Release signal and test scale down when CPU usage < 30
signal.send.remote()
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 0)
signal = SignalActor.remote()
# Reset CPU usage to low value by creating new deployment instance
# This simulates low CPU usage scenario
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(
policy_function=max_cpu_usage_autoscaling_policy
),
},
max_ongoing_requests=100,
)
class LowCpuUsageDeployment:
def __init__(self):
self.cpu_usage = 0
async def __call__(self) -> str:
self.cpu_usage += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
# Return low CPU usage to trigger scale down
return {"cpu_usage": 20}
handle = serve.run(LowCpuUsageDeployment.bind())
# Send a few requests to establish low CPU usage
[handle.remote() for _ in range(5)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 5)
# Wait for metrics to be recorded
dep_id_low = DeploymentID(name="LowCpuUsageDeployment")
def check_low_cpu():
metrics = get_autoscaling_metrics_from_controller(
serve_instance, dep_id_low
)
return "cpu_usage" in metrics and metrics["cpu_usage"][-1][0].value <= 30
wait_for_condition(check_low_cpu, timeout=10)
# Should downscale to 1 replica due to low CPU usage
wait_for_condition(
check_num_replicas_eq, name="LowCpuUsageDeployment", target=1, timeout=15
)
signal.send.remote()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestCustomServeMetrics
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-api-set-translator/conf.py
|
{
"start": 951,
"end": 1002
}
|
class ____(XMLTranslator):
pass
|
ConfXMLTranslator
|
python
|
spyder-ide__spyder
|
external-deps/spyder-kernels/spyder_kernels/console/kernelapp.py
|
{
"start": 675,
"end": 1413
}
|
class ____(Thread):
"""
Daemon thread that terminates the program immediately when the parent
process no longer exists.
Notes
-----
This is based on the ParentPollerUnix class from ipykernel.
"""
def __init__(self, parent_pid=0):
"""Initialize the poller."""
super().__init__()
self.parent_pid = parent_pid
self.daemon = True
def run(self):
"""Run the poller."""
while True:
if self.parent_pid != 0 and not psutil.pid_exists(self.parent_pid):
get_logger().warning(
"Parent appears to have exited, shutting down."
)
os._exit(1)
time.sleep(1.0)
|
SpyderParentPoller
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
|
{
"start": 1324,
"end": 8976
}
|
class ____(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square lower triangular matrix.
This operator acts like a [batch] lower triangular matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix.
`LinearOperatorLowerTriangular` is initialized with a `Tensor` having
dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two
dimensions is ignored.
```python
# Create a 2 x 2 lower-triangular linear operator.
tril = [[1., 2.], [3., 4.]]
operator = LinearOperatorLowerTriangular(tril)
# The upper triangle is ignored.
operator.to_dense()
==> [[1., 0.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
tril = tf.random.normal(shape=[2, 3, 4, 4])
operator = LinearOperatorLowerTriangular(tril)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N^2 * R` multiplications.
* `operator.solve(x)` involves `N * R` size `N` back-substitutions.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
tril,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowerTriangular"):
r"""Initialize a `LinearOperatorLowerTriangular`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
upper triangle is ignored.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This operator is self-adjoint only if it is diagonal with
real-valued diagonal entries. In this case it is advised to use
`LinearOperatorDiag`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_square` is `False`.
"""
parameters = dict(
tril=tril,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
if is_square is False:
raise ValueError(
"Only square lower triangular operators supported at this time.")
is_square = True
with ops.name_scope(name, values=[tril]):
self._tril = linear_operator_util.convert_nonref_to_tensor(tril,
name="tril")
self._check_tril(self._tril)
super(LinearOperatorLowerTriangular, self).__init__(
dtype=self._tril.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
@property
def tril(self):
"""The lower triangular matrix defining this operator."""
return self._tril
def _check_tril(self, tril):
"""Static check of the `tril` argument."""
if tril.shape.ndims is not None and tril.shape.ndims < 2:
raise ValueError(
"Argument tril must have at least 2 dimensions. Found: %s"
% tril)
def _get_tril(self):
"""Gets the `tril` kwarg, with upper part zero-d out."""
return array_ops.matrix_band_part(self._tril, -1, 0)
def _get_diag(self):
"""Gets the diagonal part of `tril` kwarg."""
return array_ops.matrix_diag_part(self._tril)
def _shape(self):
return self._tril.shape
def _shape_tensor(self):
return array_ops.shape(self._tril)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._get_diag(),
message="Singular operator: Diagonal contained zero values.")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._get_tril(), x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _linop_matmul(
self,
left_operator: "LinearOperatorLowerTriangular",
right_operator: linear_operator.LinearOperator,
) -> linear_operator.LinearOperator:
# instance check of linear_operator_diag.LinearOperatorDiag
if hasattr(right_operator, "_check_diag"):
return LinearOperatorLowerTriangular(
tril=left_operator.to_dense() * right_operator.diag,
is_non_singular=property_hint_util.combined_non_singular_hint(
right_operator, left_operator),
# This is safe to do since the Triangular matrix is only self-adjoint
# when it is a diagonal matrix, and hence commutes.
is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
right_operator, left_operator),
is_positive_definite=None,
is_square=True)
return super()._linop_matmul(left_operator, right_operator)
def _determinant(self):
return math_ops.reduce_prod(self._get_diag(), axis=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._get_diag())), axis=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
return linalg.triangular_solve(
self._get_tril(), rhs, lower=True, adjoint=adjoint)
def _to_dense(self):
return self._get_tril()
def _eigvals(self):
return self._get_diag()
@property
def _composite_tensor_fields(self):
return ("tril",)
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"tril": 2}
|
LinearOperatorLowerTriangular
|
python
|
doocs__leetcode
|
solution/0400-0499/0421.Maximum XOR of Two Numbers in an Array/Solution.py
|
{
"start": 0,
"end": 683
}
|
class ____:
__slots__ = ("children",)
def __init__(self):
self.children: List[Trie | None] = [None, None]
def insert(self, x: int):
node = self
for i in range(30, -1, -1):
v = x >> i & 1
if node.children[v] is None:
node.children[v] = Trie()
node = node.children[v]
def search(self, x: int) -> int:
node = self
ans = 0
for i in range(30, -1, -1):
v = x >> i & 1
if node.children[v ^ 1]:
ans |= 1 << i
node = node.children[v ^ 1]
else:
node = node.children[v]
return ans
|
Trie
|
python
|
walkccc__LeetCode
|
solutions/3467. Transform Array by Parity/3467.py
|
{
"start": 0,
"end": 181
}
|
class ____:
def transformArray(self, nums: list[int]) -> list[int]:
return ([0] * sum(num % 2 == 0 for num in nums) +
[1] * sum(num % 2 == 1 for num in nums))
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/2127. Maximum Employees to Be Invited to a Meeting/2127.py
|
{
"start": 85,
"end": 1732
}
|
class ____:
def maximumInvitations(self, favorite: list[int]) -> int:
n = len(favorite)
sumComponentsLength = 0 # the component: a -> b -> c <-> x <- y
graph = [[] for _ in range(n)]
inDegrees = [0] * n
maxChainLength = [1] * n
# Build the graph.
for i, f in enumerate(favorite):
graph[i].append(f)
inDegrees[f] += 1
# Perform topological sorting.
q = collections.deque([i for i, d in enumerate(inDegrees) if d == 0])
while q:
u = q.popleft()
for v in graph[u]:
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
maxChainLength[v] = max(maxChainLength[v], 1 + maxChainLength[u])
for i in range(n):
if favorite[favorite[i]] == i:
# i <-> favorite[i] (the cycle's length = 2)
sumComponentsLength += maxChainLength[i] + maxChainLength[favorite[i]]
maxCycleLength = 0 # Cycle: a -> b -> c -> a
parent = [-1] * n
seen = set()
states = [State.INIT] * n
def findCycle(u: int) -> None:
nonlocal maxCycleLength
seen.add(u)
states[u] = State.VISITING
for v in graph[u]:
if v not in seen:
parent[v] = u
findCycle(v)
elif states[v] == State.VISITING:
# Find the cycle's length.
curr = u
cycleLength = 1
while curr != v:
curr = parent[curr]
cycleLength += 1
maxCycleLength = max(maxCycleLength, cycleLength)
states[u] = State.VISITED
for i in range(n):
if i not in seen:
findCycle(i)
return max(sumComponentsLength // 2, maxCycleLength)
|
Solution
|
python
|
realpython__materials
|
python-serialize/http-payload/django-rest-api/rest_api/serializers.py
|
{
"start": 190,
"end": 313
}
|
class ____(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ["name"]
|
UserSerializerIn
|
python
|
jina-ai__jina
|
tests/integration/runtimes/test_runtimes.py
|
{
"start": 21712,
"end": 30390
}
|
class ____(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
if doc.text == 'slow':
time.sleep(1.0)
async def _create_worker(pod, port_generator, type='worker', executor=None):
worker_port = port_generator()
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'{pod}/{type}', executor)
)
worker_process.start()
return worker_port, worker_process
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = [port]
args.name = name
if executor:
args.uses = executor
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _create_head_runtime(
port,
connection_list_dict,
name='',
polling='ANY',
uses_before=None,
uses_after=None,
retries=-1,
):
args = _generate_pod_args()
args.port = [port]
args.name = name
args.retries = retries
args.polling = PollingType.ANY if polling == 'ANY' else PollingType.ALL
if uses_before:
args.uses_before_address = uses_before
if uses_after:
args.uses_after_address = uses_after
args.connection_list = json.dumps(connection_list_dict)
with AsyncNewLoopRuntime(args, req_handler_cls=HeaderRequestHandler) as runtime:
runtime.run_forever()
def _create_gateway_runtime(
graph_description,
pod_addresses,
port,
protocol='grpc',
retries=-1,
log_config='default',
):
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--retries',
str(retries),
'--protocol',
protocol,
'--log-config',
log_config,
]
),
req_handler_cls=GatewayRequestHandler,
) as runtime:
runtime.run_forever()
async def async_inputs():
for _ in range(20):
yield Document(text='client0-Request')
@pytest.mark.asyncio
async def test_head_runtime_with_offline_shards(port_generator):
head_port = port_generator()
# create the shards
shard_processes = []
worker_ports = []
connection_list_dict = defaultdict(list)
for i in range(2):
# create worker
worker_port = port_generator()
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'pod0/shard/{i}')
)
shard_processes.append(worker_process)
worker_process.start()
await asyncio.sleep(0.1)
worker_ports.append(worker_port)
connection_list_dict[i].append(f'127.0.0.1:{worker_port}')
# create a failing connection/port
worker_port = port_generator()
worker_ports.append(worker_port)
connection_list_dict[i + 1].append(f'127.0.0.1:{worker_port}')
# create a single head runtime
head_process = multiprocessing.Process(
target=_create_head_runtime,
args=(head_port, connection_list_dict, 'head', 'ALL'),
)
head_process.start()
BaseServer.wait_for_ready_or_shutdown(
timeout=1.0,
ctrl_address=f'0.0.0.0:{head_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
with grpc.insecure_channel(
f'0.0.0.0:{head_port}',
options=get_default_grpc_options(),
) as channel:
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(channel)
_, call = stub.process_single_data.with_call(
list(request_generator('/index', DocumentArray([Document(text='abc')])))[0]
)
call_metadata = dict(call.trailing_metadata())
assert len(call_metadata) == 2
assert call_metadata['total_shards'] == '3'
assert call_metadata['failed_shards'] == '1'
# clean up runtimes
head_process.terminate()
for shard_process in shard_processes:
shard_process.terminate()
head_process.join()
for shard_process in shard_processes:
shard_process.join()
def test_runtime_slow_processing_readiness(port_generator):
class SlowProcessingExecutor(Executor):
@requests
def foo(self, **kwargs):
time.sleep(10)
worker_port = port_generator()
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime,
args=(worker_port, f'pod0', 'SlowProcessingExecutor'),
)
try:
worker_process.start()
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
def _send_messages():
with grpc.insecure_channel(
f'0.0.0.0:{worker_port}',
options=get_default_grpc_options(),
) as channel:
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(channel)
resp, _ = stub.process_single_data.with_call(
list(request_generator('/', DocumentArray([Document(text='abc')])))[
0
]
)
assert resp.docs[0].text == 'abc'
send_message_process = multiprocessing.Process(target=_send_messages)
send_message_process.start()
for _ in range(50):
is_ready = BaseServer.is_ready(f'0.0.0.0:{worker_port}')
assert is_ready
time.sleep(0.5)
except Exception:
raise
finally:
worker_process.terminate()
send_message_process.terminate()
worker_process.join()
send_message_process.join()
assert worker_process.exitcode == 0
assert send_message_process.exitcode == 0
@pytest.mark.parametrize('runtime', ['gateway', 'head', 'worker'])
def test_grpc_server_and_channel_args(monkeypatch, mocker, runtime):
call_recording_mock = mocker.Mock()
monkeypatch.setattr(
jina.serve.networking.utils,
'get_server_side_grpc_options',
partial(_custom_grpc_options, call_recording_mock),
)
monkeypatch.setattr(
jina.serve.runtimes.servers.grpc,
'get_server_side_grpc_options',
partial(_custom_grpc_options, call_recording_mock),
)
if runtime == 'gateway':
deployment0_port = random_port()
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["deployment0"], "deployment0": ["end-gateway"]}',
'--deployments-addresses',
'{"deployment0": ["0.0.0.0:' + f'{deployment0_port}' + '"]}',
'--grpc-server-options',
'{"grpc.max_send_message_length": 10000}',
'--grpc-channel-options',
'{"grpc.keepalive_time_ms": 9999}',
]
),
req_handler_cls=GatewayRequestHandler,
):
pass
# there should be at least two calls:
# 1 when creating the grpc server
# 2 when creating the connection to the deployment0
assert call_recording_mock.call_count >= 2
elif runtime == 'head':
args = _generate_pod_args()
args.polling = PollingType.ANY
connection_list_dict = {0: [f'fake_ip:8080']}
args.connection_list = json.dumps(connection_list_dict)
args.grpc_server_options = {"grpc.max_send_message_length": 10000}
args.grpc_channel_options = {"grpc.keepalive_time_ms": 9999}
with AsyncNewLoopRuntime(
args,
req_handler_cls=HeaderRequestHandler,
):
pass
# there should be at least two calls:
# 1 when creating the grpc server
# 2 when creating the connection to the fake deployment
assert call_recording_mock.call_count >= 2
else:
args = _generate_pod_args()
args.grpc_server_options = {"grpc.max_send_message_length": 10000}
args.grpc_channel_options = {"grpc.keepalive_time_ms": 9999}
with AsyncNewLoopRuntime(
args,
req_handler_cls=WorkerRequestHandler,
):
pass
# there should be one call:
# 1 when creating the grpc server
assert call_recording_mock.call_count == 1
|
FastSlowExecutor
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 166440,
"end": 167637
}
|
class ____(Response):
"""
Response of tasks.delete_artifacts endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_artifacts"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteArtifactsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
|
DeleteArtifactsResponse
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/pytest/test_statistics.py
|
{
"start": 2759,
"end": 3287
}
|
class ____(TestCase):
@given(integers())
def test_all_valid(self, x):
pass
"""
def test_prints_statistics_for_unittest_tests(testdir):
script = testdir.makepyfile(UNITTEST_TESTSUITE)
result = testdir.runpytest(script, PRINT_STATISTICS_OPTION)
out = "\n".join(result.stdout.lines)
assert "Hypothesis Statistics" in out
assert "TestStuff::test_all_valid" in out
assert "max_examples=100" in out
STATEFUL_TESTSUITE = """
from hypothesis.stateful import RuleBasedStateMachine, rule
|
TestStuff
|
python
|
ansible__ansible
|
lib/ansible/modules/hostname.py
|
{
"start": 24966,
"end": 25085
}
|
class ____(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = FileStrategy
|
DevuanHostname
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/generative.py
|
{
"start": 44283,
"end": 45000
}
|
class ____(BaseModel):
prompt: str
non_blob_properties: Optional[List[str]]
image_properties: Optional[List[str]]
images: Optional[Iterable[str]]
metadata: bool = False
def _to_grpc(
self, provider: _GenerativeConfigRuntime
) -> generative_pb2.GenerativeSearch.Grouped:
return generative_pb2.GenerativeSearch.Grouped(
task=self.prompt,
properties=_to_text_array(self.non_blob_properties),
queries=[
provider._to_grpc(
_GenerativeConfigRuntimeOptions(
self.metadata, self.images, self.image_properties
)
)
],
)
|
_GroupedTask
|
python
|
astropy__astropy
|
astropy/convolution/tests/test_convolve.py
|
{
"start": 24946,
"end": 43480
}
|
class ____:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
]
z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize("dtype_array", VALID_DTYPES)
@pytest.mark.parametrize("dtype_kernel", VALID_DTYPES)
def test_dtype(self, dtype_array, dtype_kernel):
"""
Test that 32- and 64-bit floats are correctly handled
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=dtype_array
)
y = np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype_kernel
)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
"""
Test that a 1x1x1 unit kernel returns the same array
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, 0.0, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.array([[[1.0]]], dtype=">f8")
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
"""
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, 3.0, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.zeros((3, 3, 3), dtype=">f8")
y[1, 1, 1] = 1.0
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(
z
== np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=">f8",
)
)
else:
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, 3.0, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.ones((3, 3, 3), dtype=">f8")
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 81.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=">f8",
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[23.0, 28.0, 16.0], [35.0, 46.0, 25.0], [25.0, 34.0, 18.0]],
[[40.0, 50.0, 23.0], [63.0, 81.0, 36.0], [46.0, 60.0, 27.0]],
[[32.0, 40.0, 16.0], [50.0, 61.0, 22.0], [36.0, 44.0, 16.0]],
],
dtype=">f8",
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]],
[[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]],
[[81.0, 81.0, 81.0], [81.0, 81.0, 81.0], [81.0, 81.0, 81.0]],
],
dtype=">f8",
),
10,
)
else:
assert_array_almost_equal_nulp(
z,
np.array(
[
[[65.0, 54.0, 43.0], [75.0, 66.0, 57.0], [85.0, 78.0, 71.0]],
[[96.0, 71.0, 46.0], [108.0, 81.0, 54.0], [120.0, 91.0, 62.0]],
[
[127.0, 88.0, 49.0],
[141.0, 96.0, 51.0],
[155.0, 104.0, 53.0],
],
],
dtype=">f8",
),
10,
)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
@pytest.mark.parametrize("nan_treatment", NANHANDLING_OPTIONS)
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
"""
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.zeros((3, 3, 3), dtype=">f8")
y[1, 1, 1] = 1.0
z = convolve(
x, y, boundary=boundary, nan_treatment=nan_treatment, preserve_nan=True
)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(
z
== np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=">f8",
)
)
else:
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.ones((3, 3, 3), dtype=">f8")
z = convolve(
x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False
)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 78.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=">f8",
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[20.0, 25.0, 13.0], [32.0, 43.0, 22.0], [22.0, 31.0, 15.0]],
[[37.0, 47.0, 20.0], [60.0, 78.0, 33.0], [43.0, 57.0, 24.0]],
[[29.0, 37.0, 13.0], [47.0, 58.0, 19.0], [33.0, 41.0, 13.0]],
],
dtype=">f8",
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]],
[[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]],
[[78.0, 78.0, 78.0], [78.0, 78.0, 78.0], [78.0, 78.0, 78.0]],
],
dtype=">f8",
),
10,
)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[62.0, 51.0, 40.0], [72.0, 63.0, 54.0], [82.0, 75.0, 68.0]],
[[93.0, 68.0, 43.0], [105.0, 78.0, 51.0], [117.0, 88.0, 59.0]],
[
[124.0, 85.0, 46.0],
[138.0, 93.0, 48.0],
[152.0, 101.0, 50.0],
],
],
dtype=">f8",
),
10,
)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
"""
x = np.array(
[
[[1.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 5.0]],
[[4.0, 3.0, 1.0], [5.0, np.nan, 2.0], [6.0, 1.0, 1.0]],
[[7.0, 0.0, 2.0], [8.0, 2.0, 3.0], [9.0, 2.0, 2.0]],
],
dtype=">f8",
)
y = np.ones((3, 3, 3), dtype=">f8")
z = convolve(
x, y, boundary=boundary, nan_treatment="interpolate", normalize_kernel=True
)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 78.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=">f8",
)
/ kernsum,
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[20.0, 25.0, 13.0], [32.0, 43.0, 22.0], [22.0, 31.0, 15.0]],
[[37.0, 47.0, 20.0], [60.0, 78.0, 33.0], [43.0, 57.0, 24.0]],
[[29.0, 37.0, 13.0], [47.0, 58.0, 19.0], [33.0, 41.0, 13.0]],
],
dtype=">f8",
)
/ kernsum,
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(z, np.tile(mid.astype(">f8"), [3, 3, 3]), 10)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z,
np.array(
[
[[62.0, 51.0, 40.0], [72.0, 63.0, 54.0], [82.0, 75.0, 68.0]],
[[93.0, 68.0, 43.0], [105.0, 78.0, 51.0], [117.0, 88.0, 59.0]],
[
[124.0, 85.0, 46.0],
[138.0, 93.0, 48.0],
[152.0, 101.0, 50.0],
],
],
dtype=">f8",
)
/ kernsum,
10,
)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
"""
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
"""
x = np.array([3.0, 0.0, 1.0], dtype=">f8")
y = np.array([1, 2, 3], dtype=">f8")
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == "fill":
assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0.0, 10.0, 0.0], dtype="float"), 10)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z, np.array([15.0, 10.0, 3.0], dtype="float"), 10
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10)
@pytest.mark.parametrize("ndims", (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3] * ndims))
np.random.seed(0)
kernel = np.random.rand(*([3] * ndims))
conv_f = convolve_fft(array, kernel, boundary="fill")
conv_d = convolve(array, kernel, boundary="fill")
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(
np.convolve(y, x, "same"), convolve(y, x, normalize_kernel=False)
)
assert_array_almost_equal(
np.convolve(y, x, "same"), convolve_fft(y, x, normalize_kernel=False)
)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(
fftconvolve(y, x, "same"), convolve(y, x, normalize_kernel=False)
)
assert_array_almost_equal(
fftconvolve(y, x, "same"), convolve_fft(y, x, normalize_kernel=False)
)
@pytest.mark.skipif(not HAS_PANDAS, reason="Requires pandas")
def test_regression_6099():
import pandas as pd
wave = np.array(np.linspace(5000, 5100, 10))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,)) / boxcar)
wave_series = pd.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,)) / boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3) / 3.0
with pytest.raises(TypeError):
convolve("glork", kernel)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_non_square_kernel_asymmetric(boundary):
# Regression test for a bug that occurred when using non-square kernels in
# 2D when using boundary=None
kernel = np.array([[1, 2, 3, 2, 1], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]])
image = np.zeros((13, 13))
image[6, 6] = 1
result = convolve(image, kernel, normalize_kernel=False, boundary=boundary)
assert_allclose(result[5:8, 4:9], kernel)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
@pytest.mark.parametrize("normalize_kernel", NORMALIZE_OPTIONS)
def test_uninterpolated_nan_regions(boundary, normalize_kernel):
# Issue #8086
# Test NaN interpolation of contiguous NaN regions with kernels of size
# identical and greater than that of the region of NaN values.
# Test case: kernel.shape == NaN_region.shape
kernel = Gaussian2DKernel(1, 5, 5)
nan_centroid = np.full(kernel.shape, np.nan)
image = np.pad(
nan_centroid, pad_width=kernel.shape[0] * 2, mode="constant", constant_values=1
)
with pytest.warns(
AstropyUserWarning,
match=r"nan_treatment='interpolate', however, NaN values detected "
r"post convolution. A contiguous region of NaN values, larger "
r"than the kernel size, are present in the input array. "
r"Increase the kernel size to avoid this.",
):
result = convolve(
image,
kernel,
boundary=boundary,
nan_treatment="interpolate",
normalize_kernel=normalize_kernel,
)
assert np.any(np.isnan(result))
# Test case: kernel.shape > NaN_region.shape
nan_centroid = np.full(
(kernel.shape[0] - 1, kernel.shape[1] - 1), np.nan
) # 1 smaller than kerenel
image = np.pad(
nan_centroid, pad_width=kernel.shape[0] * 2, mode="constant", constant_values=1
)
result = convolve(
image,
kernel,
boundary=boundary,
nan_treatment="interpolate",
normalize_kernel=normalize_kernel,
)
assert ~np.any(np.isnan(result)) # Note: negation
def test_regressiontest_issue9168():
"""
Issue #9168 pointed out that kernels can be (unitless) quantities, which
leads to crashes when inplace modifications are made to arrays in
convolve/convolve_fft, so we now strip the quantity aspects off of kernels.
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
)
kernel_fwhm = 1 * u.arcsec
pixel_size = 1 * u.arcsec
kernel = Gaussian2DKernel(x_stddev=kernel_fwhm / pixel_size)
convolve_fft(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True)
convolve(x, kernel, boundary="fill", fill_value=np.nan, preserve_nan=True)
def test_convolve_nan_zero_sum_kernel():
with pytest.raises(
ValueError,
match=(
"Setting nan_treatment='interpolate' "
"requires the kernel to be normalized, but the "
"input kernel has a sum close to zero. For a "
"zero-sum kernel and data with NaNs, set "
"nan_treatment='fill'."
),
):
convolve([1, np.nan, 3], [-1, 2, -1], normalize_kernel=False)
|
TestConvolve3D
|
python
|
neetcode-gh__leetcode
|
python/0283-move-zeroes.py
|
{
"start": 0,
"end": 395
}
|
class ____:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
slow = 0
for fast in range(len(nums)):
if nums[fast] != 0 and nums[slow] == 0:
nums[slow], nums[fast] = nums[fast], nums[slow]
if nums[slow] != 0:
slow += 1
|
Solution
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 15817,
"end": 16031
}
|
class ____(VegaLiteSchema):
"""AnyMarkConfig schema wrapper."""
_schema = {"$ref": "#/definitions/AnyMarkConfig"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
AnyMarkConfig
|
python
|
keras-team__keras
|
keras/src/distillation/distillation_loss.py
|
{
"start": 2983,
"end": 9661
}
|
class ____(DistillationLoss):
"""Feature distillation loss.
Feature distillation transfers knowledge from intermediate layers of the
teacher model to corresponding layers of the student model. This approach
helps the student learn better internal representations and often leads
to better performance compared to logits-only distillation.
Arguments:
loss: Loss function to use for feature distillation. Can be:
- String identifier (e.g., 'mse', 'cosine_similarity', 'mae')
- Keras loss instance
- Nested structure of losses matching the layer output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'mse'.
teacher_layer_name: Name of the teacher layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
student_layer_name: Name of the student layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
Examlpe(s):
```python
# Basic feature distillation from final outputs
distillation_loss = FeatureDistillation(loss="mse")
# Distill from specific intermediate layers
distillation_loss = FeatureDistillation(
loss="mse",
teacher_layer_name="dense_1",
student_layer_name="dense_1"
)
# Use cosine similarity for different feature sizes
distillation_loss = FeatureDistillation(
loss="cosine_similarity",
teacher_layer_name="conv2d_2",
student_layer_name="conv2d_1"
)
# With custom loss instance
distillation_loss = FeatureDistillation(
loss=keras.losses.MeanAbsoluteError()
)
# For multi-output models
distillation_loss = FeatureDistillation(
loss=["mse", "cosine_similarity"]
)
# For multi-output models, only distill some outputs
distillation_loss = FeatureDistillation(
loss=["mse", None, "cosine_similarity"] # Skip middle output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self, loss="mse", teacher_layer_name=None, student_layer_name=None
):
self.teacher_layer_name = teacher_layer_name
self.student_layer_name = student_layer_name
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError(
"The `loss` argument in `FeatureDistillation` must "
"contain at least one non-`None` value."
)
def validate_model_compatibility(self, teacher, student):
"""Validate that teacher and student models are compatible for feature
distillation."""
if (
self.teacher_layer_name is not None
or self.student_layer_name is not None
):
teacher_is_subclassed = (
not hasattr(teacher, "inputs") or teacher.inputs is None
)
student_is_subclassed = (
not hasattr(student, "inputs") or student.inputs is None
)
if teacher_is_subclassed or student_is_subclassed:
subclassed_models = []
if teacher_is_subclassed:
subclassed_models.append("teacher")
if student_is_subclassed:
subclassed_models.append("student")
models_str = " and ".join(subclassed_models)
raise ValueError(
f"FeatureDistillation with specific layer names requires "
f"Functional or Sequential models. The {models_str} "
f"model(s) appear to be subclassed (no symbolic "
f"inputs/outputs). Either use Functional/Sequential "
f"models, or use FeatureDistillation without layer names "
f"(to distill final outputs only), or use "
f"LogitsDistillation instead."
)
if self.teacher_layer_name is not None:
try:
teacher.get_layer(name=self.teacher_layer_name)
except ValueError as e:
raise ValueError(f"In teacher model: {e}")
if self.student_layer_name is not None:
try:
student.get_layer(name=self.student_layer_name)
except ValueError as e:
raise ValueError(f"In student model: {e}")
def validate_outputs(self, teacher_outputs, student_outputs):
"""Validate that outputs are compatible for feature distillation."""
super().validate_outputs(teacher_outputs, student_outputs)
try:
tree.assert_same_structure(self.loss, teacher_outputs)
except ValueError as e:
raise ValueError(
f"Loss structure mismatch. "
f"Loss structure: {tree.structure(self.loss)}, "
f"Output structure: {tree.structure(teacher_outputs)}. "
f"Error: {e}"
)
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute feature distillation loss using extracted features.
Arguments:
teacher_outputs: Extracted features from teacher layer.
student_outputs: Extracted features from student layer.
**kwargs: Additional arguments (ignored).
Returns:
Scalar distillation loss tensor.
"""
def apply_loss(loss_fn, teacher_features, student_features):
if loss_fn is None:
return 0.0
loss = keras.ops.mean(loss_fn(teacher_features, student_features))
return loss
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_outputs, student_outputs
)
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"loss": keras.losses.serialize(self.loss),
"teacher_layer_name": self.teacher_layer_name,
"student_layer_name": self.student_layer_name,
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
@keras_export("keras.distillation.LogitsDistillation")
|
FeatureDistillation
|
python
|
encode__httpx
|
httpx/_exceptions.py
|
{
"start": 4818,
"end": 4999
}
|
class ____(ProtocolError):
"""
The protocol was violated by the server.
For example, returning malformed HTTP.
"""
# Other request exceptions...
|
RemoteProtocolError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.