language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | benchmarks/benchmarks/optimize.py | {
"start": 20664,
"end": 22341
} | class ____(Benchmark):
"""
Benchmark the optimizers with the CUTEST DFO benchmark of Moré and Wild.
The original benchmark suite is available at
https://github.com/POptUS/BenDFO
"""
params = [
list(range(53)), # adjust which problems to solve
["COBYLA", "COBYQA", "SLSQP", "Powell", "nelder-mead", "L-BFGS-B",
"BFGS",
"trust-constr"], # note: methods must also be listed in bench_run
["mean_nfev", "min_obj"], # defined in average_results
]
param_names = ["DFO benchmark problem number", "solver", "result type"]
def setup(self, prob_number, method_name, ret_val):
probs = np.loadtxt(os.path.join(os.path.dirname(__file__),
"cutest", "dfo.txt"))
params = probs[prob_number]
nprob = int(params[0])
n = int(params[1])
m = int(params[2])
s = params[3]
factor = 10 ** s
def func(x):
return calfun(x, m, nprob)
x0 = dfoxs(n, nprob, factor)
b = getattr(self, "run_cutest")(
func, x0, prob_number=prob_number, methods=[method_name]
)
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, prob_number, method_name, ret_val):
return self.result
def run_cutest(self, func, x0, prob_number, methods=None):
if methods is None:
methods = MINIMIZE_METHODS
b = _BenchOptimizers(f"DFO benchmark problem {prob_number}", fun=func)
b.bench_run(x0, methods=methods)
return b
| BenchDFO |
python | keras-team__keras | keras/src/optimizers/optimizer_test.py | {
"start": 285,
"end": 16340
} | class ____(testing.TestCase):
def test_iterations_counter(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.Adam(learning_rate=1.0)
self.assertAllClose(optimizer.iterations, 0)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(optimizer.iterations, 1)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(optimizer.iterations, 2)
def test_empty_gradients(self):
# Test no valid gradient
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = None
optimizer = optimizers.SGD(learning_rate=1.0)
with self.assertRaisesRegex(
ValueError, "No gradients provided for any variable."
):
optimizer.apply_gradients([(grads, v)])
# Test filtering of empty gradients
v2 = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads2 = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=1.0)
with self.assertWarns(Warning):
optimizer.apply_gradients([(grads, v), (grads2, v2)])
self.assertAllClose(v, [[3.0, 4.0], [5.0, 6.0]])
self.assertAllClose(v2, [[2.0, 3.0], [4.0, 5.0]])
def test_clip_args(self):
optimizer = optimizers.SGD(learning_rate=1.0, clipnorm=0.1)
self.assertEqual(optimizer.clipnorm, 0.1)
optimizer = optimizers.SGD(learning_rate=1.0, clipvalue=0.1)
self.assertEqual(optimizer.clipvalue, 0.1)
optimizer = optimizers.SGD(learning_rate=1.0, global_clipnorm=0.1)
self.assertEqual(optimizer.global_clipnorm, 0.1)
# Test invalid arguments
with self.assertRaisesRegex(
ValueError,
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can",
):
optimizers.SGD(
learning_rate=1.0,
clipnorm=0.1,
clipvalue=0.1,
)
with self.assertRaisesRegex(
ValueError,
"Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can",
):
optimizers.SGD(
learning_rate=1.0,
clipnorm=0.1,
global_clipnorm=0.1,
)
def test_clip_norm(self):
optimizer = optimizers.SGD(clipnorm=1)
grad = backend.convert_to_tensor([100.0, 100.0])
clipped_grad = optimizer._clip_gradients([grad])
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = optimizers.SGD(clipvalue=1)
grad = backend.convert_to_tensor([100.0, 100.0])
clipped_grad = optimizer._clip_gradients([grad])
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_global_clip_norm(self):
optimizer = optimizers.SGD(global_clipnorm=1)
grad = np.array([50.0, 100.0], dtype="float32")
global_norm = np.linalg.norm(grad)
clipped_grad = optimizer._clip_gradients(
[backend.convert_to_tensor(grad)]
)
self.assertAllClose(clipped_grad[0], grad / global_norm)
def test_ema(self):
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(
learning_rate=1.0,
use_ema=True,
ema_momentum=0.9,
ema_overwrite_frequency=3,
)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[2.0, 3.0], [4.0, 5.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[2.0, 3.0], [4.0, 5.0]], # initialized after first step
)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.9, 2.9], [3.9, 4.9]],
)
optimizer.apply_gradients([(grads, v)])
# Variables were overwritten with EMA
self.assertAllClose(v, [[1.71, 2.71], [3.71, 4.71]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.71, 2.71], [3.71, 4.71]],
)
@pytest.mark.requires_trainable_backend
def test_ema_with_model_fit(self):
x_train = np.ones((1, 1)).astype("float32")
y_train = np.zeros((1, 1)).astype("float32")
optimizer = optimizers.SGD(
learning_rate=0.1, use_ema=True, ema_momentum=0.9
)
model = models.Sequential(
[layers.Dense(2, kernel_initializer="ones", use_bias=False)]
)
model.compile(loss="mse", optimizer=optimizer, run_eagerly=True)
model.fit(x_train, y_train, batch_size=1, epochs=2)
self.assertAllClose(
optimizer._model_variables_moving_average[0].numpy(),
[[0.891, 0.891]],
atol=1e-5,
)
self.assertAllClose(
model.trainable_variables[0].numpy(),
[[0.891, 0.891]],
atol=1e-5,
)
def test_constraints_are_applied(self):
v = backend.Variable(np.random.random((2, 2)) - 1.0)
v.constraint = constraints.NonNeg()
optimizer = optimizers.SGD(learning_rate=0.0001)
grad = backend.numpy.zeros((2, 2))
optimizer.apply_gradients([(grad, v)])
self.assertAlmostEqual(np.min(v), 0.0)
def test_get_method(self):
obj = optimizers.get("sgd")
self.assertIsInstance(obj, optimizers.SGD)
obj = optimizers.get("adamw")
self.assertIsInstance(obj, optimizers.AdamW)
obj = optimizers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
optimizers.get("typo")
def test_static_loss_scaling(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]]) * 1024.0
optimizer = optimizers.SGD(learning_rate=1.0, loss_scale_factor=1024.0)
optimizer.apply_gradients([(grads, v)])
self.assertEqual(optimizer.scale_loss(1.0), 1024.0)
self.assertAllClose(v, [[0.0, 0.0], [0.0, 0.0]])
def test_set_weights(self):
x = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer_1 = optimizers.Adam()
grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]])
optimizer_1.apply_gradients(zip([grads], [x]))
optimizer_2 = optimizers.Adam()
with self.assertRaisesRegex(ValueError, "You are calling*"):
optimizer_2.set_weights(optimizer_1.variables)
optimizer_2.build([x])
optimizer_2.set_weights(optimizer_1.variables)
for i in range(len(optimizer_1.variables)):
self.assertAllClose(
optimizer_1.variables[i],
optimizer_2.variables[i],
)
def test_gradient_accumulation(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(
learning_rate=1.0, gradient_accumulation_steps=3
)
self.assertEqual(optimizer.gradient_accumulation_steps, 3)
# Iteration 1
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(optimizer._iterations, 1)
self.assertAllClose(optimizer.iterations, 0)
# Iteration 2
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[2.0, 2.0], [2.0, 2.0]]
)
self.assertAllClose(optimizer._iterations, 2)
self.assertAllClose(optimizer.iterations, 0)
# Iteration 3
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(optimizer._iterations, 3)
self.assertAllClose(optimizer.iterations, 1)
# Iteration 4
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(optimizer._iterations, 4)
self.assertAllClose(optimizer.iterations, 1)
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="Requires TF")
def test_tf_checkpointing(self):
import tensorflow as tf
model = models.Sequential([layers.Dense(2)])
optimizer = optimizers.Adam()
x, y = np.random.random((1, 2)), np.random.random((1, 2))
model.compile(optimizer, "mse")
model.train_on_batch(x, y)
ref_pred = model.predict(x)
# Both model and optimizer are Trackables
checkpoint = tf.train.Checkpoint(model, optimizer=optimizer)
temp_filepath = os.path.join(self.get_temp_dir(), "tf_ckpt")
save_path = checkpoint.save(temp_filepath)
# Keep training the model (predictions now differ)
model.train_on_batch(x, y)
pred = model.predict(x)
self.assertNotAllClose(pred, ref_pred, atol=1e-3)
# Restore the model and check prediction correctness
checkpoint.restore(save_path)
pred = model.predict(x)
self.assertAllClose(pred, ref_pred, atol=1e-5)
def test_callable_learning_rate(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=lambda: 0.1)
self.assertAllClose(optimizer.iterations, 0)
optimizer.apply_gradients([(grads, v)])
self.assertAllClose(v, [[0.9, 1.9], [2.9, 3.9]])
self.assertAllClose(optimizer.iterations, 1)
def test_overwrite_with_gradient(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
v.overwrite_with_gradient = True
v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
grads2 = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.SGD(learning_rate=1.0)
optimizer.apply_gradients([(grads, v), (grads2, v2)])
# `v` is overwritten by its gradient but `v2` is updated normally
self.assertAllClose(v, [[1.0, 1.0], [1.0, 1.0]])
self.assertAllClose(v2, [[0.0, 1.0], [2.0, 3.0]])
def test_overwrite_with_gradient_with_gradient_accumulation(self):
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
v.overwrite_with_gradient = True
v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grad_ones = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
grad_twos = backend.convert_to_tensor([[2.0, 2.0], [2.0, 2.0]])
optimizer = optimizers.SGD(
learning_rate=1.0, gradient_accumulation_steps=2
)
# Iteration 1
optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)])
self.assertAllClose(optimizer._iterations, 1)
self.assertAllClose(optimizer.iterations, 0)
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(v2, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]]
)
# Iteration 2
optimizer.apply_gradients([(grad_twos, v), (grad_twos, v2)])
self.assertAllClose(optimizer._iterations, 2)
self.assertAllClose(optimizer.iterations, 1)
self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]])
self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[0.0, 0.0], [0.0, 0.0]]
)
# Iteration 3
optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)])
self.assertAllClose(optimizer._iterations, 3)
self.assertAllClose(optimizer.iterations, 1)
self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]])
self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]]
)
self.assertAllClose(
optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]]
)
@parameterized.parameters(
[
("adam",),
("sgd",),
("adamw",),
("adagrad",),
("rmsprop",),
("adadelta",),
("adamax",),
("lion",),
("nadam",),
("ftrl",),
("adafactor",),
]
)
def test_gradient_accumulation_with_weigth_decay(self, optimizer):
optimizer1 = optimizers.get(
{"class_name": optimizer, "config": {"weight_decay": 0.05}}
)
optimizer3 = optimizers.get(
{
"class_name": optimizer,
"config": {
"weight_decay": 0.05,
"gradient_accumulation_steps": 3,
},
}
)
variable1 = backend.Variable([[0.9], [0.5]])
variable3 = backend.Variable([[0.9], [0.5]])
for epoch in range(8):
grads3 = np.random.random([3, 2, 1]).astype("float32")
grads1 = backend.convert_to_tensor(grads3.mean(axis=0))
optimizer1.apply_gradients([(grads1, variable1)])
for batch in range(3):
grads3_ = backend.convert_to_tensor(grads3[batch])
optimizer3.apply_gradients([(grads3_, variable3)])
self.assertAllClose(variable1, variable3)
def test_setting_lr_to_callable_untracks_lr_var(self):
adam = optimizers.Adam(learning_rate=0.001)
self.assertLen(adam.variables, 2)
adam.learning_rate = optimizers.schedules.PolynomialDecay(
adam.learning_rate, 4
)
self.assertLen(adam.variables, 1)
@parameterized.parameters(
[
("adam",),
("sgd",),
("adamw",),
("adagrad",),
("rmsprop",),
("adadelta",),
("adamax",),
("lion",),
("nadam",),
("ftrl",),
("adafactor",),
]
)
def test_pickleable_optimizers(self, optimizer):
optimizer = optimizers.get(optimizer)
reloaded = pickle.loads(pickle.dumps(optimizer))
self.assertEqual(optimizer.get_config(), reloaded.get_config())
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The tf.Variable test can only run with TensorFlow backend.",
)
def test_mixed_with_tf_variables(self):
import tensorflow as tf
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
tf_v = tf.Variable([[1.0, 2.0], [3.0, 4.0]])
tf_grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = optimizers.Adam(learning_rate=1.0)
optimizer.apply_gradients([(grads, v), (tf_grads, tf_v)])
self.assertAllClose(optimizer.iterations, 1)
# Test with no grads
with self.assertWarnsRegex(
UserWarning, "Gradients do not exist for variables"
):
optimizer.apply_gradients([(grads, v), (None, tf_v)])
self.assertAllClose(optimizer.iterations, 2)
| OptimizerTest |
python | keras-team__keras | keras/src/layers/reshaping/reshape_test.py | {
"start": 266,
"end": 4869
} | class ____(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_reshape(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, 4)},
input_shape=(3, 8),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (1, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 1, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_reshape_with_dynamic_batch_size(self):
input_layer = layers.Input(shape=(2, 4))
reshaped = layers.Reshape((8,))(input_layer)
self.assertEqual(reshaped.shape, (None, 8))
def test_reshape_with_dynamic_batch_size_and_minus_one(self):
input = KerasTensor((None, 6, 4))
layer = layers.Reshape((-1, 8))
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (None, 3, 8))
def test_reshape_layer_with_varying_input_size_and_minus_one(self):
layer = layers.Reshape((-1, 8))
res = layer(ops.ones((1, 6, 4), dtype="float32"))
self.assertEqual(res.shape, (1, 3, 8))
res = layer(ops.ones((1, 10, 4), dtype="float32"))
self.assertEqual(res.shape, (1, 5, 8))
def test_reshape_with_dynamic_dim_and_minus_one(self):
input = KerasTensor((4, 6, None, 3))
layer = layers.Reshape((-1, 3))
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (4, None, 3))
def test_reshape_sets_static_shape(self):
input_layer = layers.Input(batch_shape=(2, None))
reshaped = layers.Reshape((3, 5))(input_layer)
# Also make sure the batch dim is not lost after reshape.
self.assertEqual(reshaped.shape, (2, 3, 5))
@pytest.mark.requires_trainable_backend
def test_reshape_model_fit_with_varying_input_size_and_minus_one(self):
def generator():
yield (
ops.ones((1, 12, 2), dtype="float32"),
ops.zeros((1, 3, 8), dtype="float32"),
)
yield (
ops.ones((1, 20, 2), dtype="float32"),
ops.zeros((1, 5, 8), dtype="float32"),
)
layer = layers.Reshape((-1, 8))
model = Sequential([layer])
model.compile(loss="mean_squared_error")
model.fit(generator(), steps_per_epoch=2, epochs=1)
| ReshapeTest |
python | mitmproxy__pdoc | test/testdata/ast_parsing.py | {
"start": 0,
"end": 145
} | class ____:
def __init__(self):
self.no_docstring = 42
self.with_docstring = 43
"""This is an attribute docstring."""
| Foo |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 23196,
"end": 24355
} | class ____:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self, *args):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output, \
f'input value: {repr(input_value)}'
def test_invalid_inputs(self, *args):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure, \
f'input value: {repr(input_value)}'
def test_outputs(self, *args):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output, \
f'output value: {repr(output_value)}'
# Boolean types...
| FieldValues |
python | pytorch__pytorch | torch/distributed/pipelining/_IR.py | {
"start": 15271,
"end": 15365
} | class ____:
def __init__(self, name):
self.name = name
name: str
| _NodeReference |
python | miyuchina__mistletoe | mistletoe/block_token.py | {
"start": 29321,
"end": 36523
} | class ____(BlockToken):
"""
Footnote token. One or more "link reference definitions" according to the CommonMark spec.
This is a leaf block token without children.
The constructor returns None, because available footnote definitions are parsed
and stored into the root node within `Footnote.read()`. We don't put instances of
this class into the resulting AST.
"""
def __new__(cls, _):
return None
@classmethod
def start(cls, line):
return line.lstrip().startswith('[')
@classmethod
def read(cls, lines):
line_buffer = []
next_line = lines.peek()
while next_line is not None and next_line.strip() != '':
line_buffer.append(next(lines))
next_line = lines.peek()
string = ''.join(line_buffer)
offset = 0
matches = []
while offset < len(string) - 1:
match_info = cls.match_reference(string, offset)
if match_info is None:
# backtrack the lines that have not been consumed
lines._index -= string[offset:].count('\n')
break
offset, match = match_info
matches.append(match)
cls.append_footnotes(matches, token._root_node)
return matches or None
@classmethod
def match_reference(cls, string, offset):
# up to three spaces, "[", label, "]"
match_info = cls.match_link_label(string, offset)
if not match_info:
return None
_, label_end, label = match_info
# ":"
if not follows(string, label_end - 1, ':'):
return None
# optional spaces or tabs (including up to one line ending)
dest_start = shift_whitespace(string, label_end + 1)
if dest_start == len(string):
return None
# link destination
match_info = cls.match_link_dest(string, dest_start)
if not match_info:
return None
_, dest_end, dest = match_info
dest_type = "angle_uri" if string[dest_start] == "<" else "uri"
# either of:
# 1) optional spaces or tabs and then a line break to finish the link reference definition.
# 2) optional spaces or tabs (including up to one line ending) followed by a title.
# in any case, if the destination is followed directly by non-whitespace, then it's not
# a valid link reference definition.
title_start = shift_whitespace(string, dest_end)
if title_start == dest_end and title_start < len(string):
return None
# link title
match_info = cls.match_link_title(string, title_start)
if not match_info:
# no valid title found. if there was a line break following the destination,
# we still have a valid link reference definition. otherwise not.
eol_pos = string[dest_end:title_start].find("\n")
if eol_pos >= 0:
return dest_end + eol_pos + 1, (label, dest, "", dest_type, None)
else:
return None
_, title_end, title = match_info
# optional spaces or tabs. final line ending.
line_end = title_end
while line_end < len(string):
if string[line_end] == '\n':
title_delimiter = string[title_start] if title_start < title_end else None
return line_end + 1, (label, dest, title, dest_type, title_delimiter)
elif string[line_end] in whitespace:
line_end += 1
else:
break
# non-whitespace found on the same line as the title, making it invalid.
# if there was a line break following the destination,
# we still have a valid link reference definition. otherwise not.
eol_pos = string[dest_end:title_start].find("\n")
if eol_pos >= 0:
return dest_end + eol_pos + 1, (label, dest, "", dest_type, None)
else:
return None
@classmethod
def match_link_label(cls, string, offset):
"""
Matches: up to three spaces, "[", label, "]".
"""
start = -1
escaped = False
for i, c in enumerate(string[offset:], start=offset):
if escaped:
escaped = False
elif c == '\\':
escaped = True
elif c == '[':
if start == -1:
start = i
else:
return None
elif c == ']':
label = string[start + 1:i]
if label.strip() != '':
return start, i + 1, label
return None
# only spaces allowed before the opening bracket
if start == -1 and not (c == " " and i - offset < 3):
return None
return None
@classmethod
def match_link_dest(cls, string, offset):
if string[offset] == '<':
escaped = False
for i, c in enumerate(string[offset + 1:], start=offset + 1):
if c == '\\' and not escaped:
escaped = True
elif c == '\n' or (c == '<' and not escaped):
return None
elif c == '>' and not escaped:
return offset, i + 1, string[offset + 1:i]
elif escaped:
escaped = False
return None
else:
escaped = False
count = 0
for i, c in enumerate(string[offset:], start=offset):
if c == '\\' and not escaped:
escaped = True
elif c in whitespace:
break
elif not escaped:
if c == '(':
count += 1
elif c == ')':
count -= 1
elif is_control_char(c):
return None
elif escaped:
escaped = False
if count != 0:
return None
return offset, i, string[offset:i]
@classmethod
def match_link_title(cls, string, offset):
if offset == len(string):
return None
if string[offset] == '"':
closing = '"'
elif string[offset] == "'":
closing = "'"
elif string[offset] == '(':
closing = ')'
else:
return None
escaped = False
for i, c in enumerate(string[offset + 1:], start=offset + 1):
if c == '\\' and not escaped:
escaped = True
elif c == closing and not escaped:
return offset, i + 1, string[offset + 1:i]
elif escaped:
escaped = False
return None
@staticmethod
def append_footnotes(matches, root):
for key, dest, title, *_ in matches:
key = normalize_label(key)
dest = span_token.EscapeSequence.strip(dest.strip())
title = span_token.EscapeSequence.strip(title)
if key not in root.footnotes:
root.footnotes[key] = dest, title
| Footnote |
python | pydata__xarray | xarray/core/extensions.py | {
"start": 181,
"end": 285
} | class ____(Warning):
"""Warning for conflicts in accessor registration."""
| AccessorRegistrationWarning |
python | coleifer__peewee | tests/regressions.py | {
"start": 29906,
"end": 30077
} | class ____(TestModel):
name = TextField()
created_by = ForeignKeyField(RU, backref='recipes')
changed_by = ForeignKeyField(RU, backref='recipes_modified')
| Recipe |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 35848,
"end": 36195
} | class ____(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'"
)
default_args = ("1.3",)
| W54 |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 2500,
"end": 2685
} | class ____(GQLResult):
typename__: Typename[
Literal["GenericWebhookIntegration", "GitHubOAuthIntegration", "Integration"]
]
| NotificationActionFieldsIntegrationIntegration |
python | google__pytype | pytype/pytd/pytd_visitors.py | {
"start": 3042,
"end": 3727
} | class ____(base_visitor.Visitor):
"""Visitor for extracting all superclasses (i.e., the class hierarchy).
When called on a TypeDeclUnit, this yields a dictionary mapping pytd.Class
to lists of pytd.Type.
"""
def __init__(self):
super().__init__()
self._superclasses = {}
def _Key(self, node):
# This method should be implemented by subclasses.
return node
def VisitTypeDeclUnit(self, module):
del module
return self._superclasses
def EnterClass(self, cls):
bases = []
for p in cls.bases:
base = self._Key(p)
if base is not None:
bases.append(base)
self._superclasses[self._Key(cls)] = bases
| ExtractSuperClasses |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/groupsearchview.py | {
"start": 279,
"end": 610
} | class ____(TypedDict):
id: NotRequired[str]
name: str
query: str
querySort: SORT_LITERALS
position: int
projects: list[int]
isAllProjects: NotRequired[bool]
environments: list[str]
timeFilters: dict[str, Any]
dateCreated: str | None
dateUpdated: str | None
| GroupSearchViewValidatorResponse |
python | getsentry__sentry | src/sentry/issues/endpoints/source_map_debug.py | {
"start": 841,
"end": 957
} | class ____(TypedDict):
type: str
message: str
data: dict[str, Any] | None
| SourceMapProcessingIssueResponse |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 98653,
"end": 98914
} | class ____(TestCase):
def test(self) -> None:
alert_rule = self.create_alert_rule()
trigger = create_alert_rule_trigger(alert_rule, "hi", 1000)
assert get_triggers_for_alert_rule(alert_rule).get() == trigger
| GetTriggersForAlertRuleTest |
python | streamlit__streamlit | lib/tests/streamlit/elements/balloons_test.py | {
"start": 751,
"end": 1451
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall balloons protos."""
def test_st_balloons(self):
"""Test st.balloons."""
st.balloons()
el = self.get_delta_from_queue().new_element
assert el.balloons.show
@pytest.mark.usefixtures("benchmark")
def test_st_balloons_perf(self):
"""
Simple test to measure performance of st.balloons. Since the underlying
code doesn't do too much, this is a good baseline benchmark test.
Additionally, this is a simple use case for ensuring the infra for
`pytest-benchmark` is working in unittest-style tests.
"""
self.benchmark(st.balloons)
| BalloonsTest |
python | pandas-dev__pandas | pandas/tests/indexing/test_indexing.py | {
"start": 20753,
"end": 27718
} | class ____:
def test_float_index_to_mixed(self):
df = DataFrame(
{
0.0: np.random.default_rng(2).random(10),
1.0: np.random.default_rng(2).random(10),
}
)
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right_iloc)
xs = np.arange(20).reshape(5, 4)
cols = ["jim", "joe", "jolie", "joline"]
df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64")
# right hand side; permute the indices and multiply by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right_iloc = df.copy()
right_iloc["joe"] = [1, 14, 10, 6, 17]
right_iloc["jolie"] = [2, 13, 9, 5, 18]
right_iloc.iloc[1:4, 1:3] *= -2
right_loc = df.copy()
right_loc.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right_loc, right_iloc)
# make frames multi-type & re-run tests
for frame in [df, rhs, right_loc, right_iloc]:
frame["joe"] = frame["joe"].astype("float64")
frame["jolie"] = frame["jolie"].map(lambda x: f"@{x}")
right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0]
right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"]
with pytest.raises(TypeError, match="Invalid value"):
run_tests(df, rhs, right_loc, right_iloc)
@pytest.mark.parametrize(
"idx", [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]
)
def test_str_label_slicing_with_negative_step(self, idx):
SLC = pd.IndexSlice
idx = Index(idx)
ser = Series(np.arange(20), index=idx)
tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1])
tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1])
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]
)
tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] : idx[13] : -1], SLC[:0])
def test_slice_with_zero_step_raises(self, index, indexer_sl, frame_or_series):
obj = frame_or_series(np.arange(len(index)), index=index)
with pytest.raises(ValueError, match="slice step cannot be zero"):
indexer_sl(obj)[::0]
def test_loc_setitem_indexing_assignment_dict_already_exists(self):
index = Index([-5, 0, 5], name="z")
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8]}, index=index)
expected = df.copy()
rhs = {"x": 9, "y": 99}
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2, 6], "y": [2.0, 2.0, 8.0]}, index=index)
df.loc[5] = rhs
expected = DataFrame({"x": [1, 2, 9], "y": [2.0, 2.0, 99.0]}, index=index)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_indexing_dtypes_on_empty(self):
# Check that .iloc returns correct dtypes GH9983
df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]})
df2 = df.iloc[[], :]
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
@pytest.mark.parametrize("size", [5, 999999, 1000000])
def test_loc_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
s = Series(index=range(size), dtype=np.float64)
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=range(1)))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=range(2)))
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(
np.arange(9.0).reshape(3, 3), index=list("abc"), columns=list("ABC")
)
index_df = DataFrame(1, index=list("ab"), columns=list("AB"))
result = df[index_df.notnull()]
expected = DataFrame(
np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),
index=list("abc"),
columns=list("ABC"),
)
tm.assert_frame_equal(result, expected)
def test_period_column_slicing(self):
# GH#60273 The transpose operation creates a single 5x1 block of PeriodDtype
# Make sure it is reindexed correctly
df = DataFrame(
pd.period_range("2021-01-01", periods=5, freq="D"),
columns=["A"],
).T
result = df[[0, 1, 2]]
expected = DataFrame(
[
[
pd.Period("2021-01-01", freq="D"),
pd.Period("2021-01-02", freq="D"),
pd.Period("2021-01-03", freq="D"),
]
],
index=["A"],
columns=[0, 1, 2],
)
tm.assert_frame_equal(result, expected)
def test_no_reference_cycle(self):
df = DataFrame({"a": [0, 1], "b": [2, 3]})
for name in ("loc", "iloc", "at", "iat"):
getattr(df, name)
wr = weakref.ref(df)
del df
assert wr() is None
def test_label_indexing_on_nan(self, nulls_fixture):
# GH 32431
df = Series([1, "{1,2}", 1, nulls_fixture])
vc = df.value_counts(dropna=False)
result1 = vc.loc[nulls_fixture]
result2 = vc[nulls_fixture]
expected = 1
assert result1 == expected
assert result2 == expected
| TestMisc |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 30737,
"end": 31941
} | class ____(Visitor):
"""Visitor for replacing types in a tree.
This replaces both NamedType and ClassType nodes that have a name in the
mapping. The two cases are not distinguished.
"""
def __init__(self, mapping, record=None):
"""Initialize this visitor.
Args:
mapping: A dictionary, mapping strings to node instances. Any NamedType or
ClassType with a name in this dictionary will be replaced with the
corresponding value.
record: Optional. A set. If given, this records which entries in the map
were used.
"""
super().__init__()
self.mapping = mapping
self.record = record
def VisitNamedType(self, node):
if node.name in self.mapping:
if self.record is not None:
self.record.add(node.name)
return self.mapping[node.name]
return node
def VisitClassType(self, node):
return self.VisitNamedType(node)
# We do *not* want to have 'def VisitClass' because that will replace a class
# definition with itself, which is almost certainly not what is wanted,
# because running pytd_utils.Print on it will result in output that's just a
# list of class names with no contents.
| ReplaceTypesByName |
python | sympy__sympy | sympy/polys/rootisolation.py | {
"start": 50310,
"end": 54707
} | class ____:
"""A fully qualified representation of a real isolation interval. """
def __init__(self, data, f, dom):
"""Initialize new real interval with complete information. """
if len(data) == 2:
s, t = data
self.neg = False
if s < 0:
if t <= 0:
f, s, t, self.neg = dup_mirror(f, dom), -t, -s, True
else:
raise ValueError("Cannot refine a real root in (%s, %s)" % (s, t))
a, b, c, d = _mobius_from_interval((s, t), dom.get_field())
f = dup_transform(f, dup_strip([a, b], dom),
dup_strip([c, d], dom), dom)
self.mobius = a, b, c, d
else:
self.mobius = data[:-1]
self.neg = data[-1]
self.f, self.dom = f, dom
@property
def func(self):
return RealInterval
@property
def args(self):
i = self
return (i.mobius + (i.neg,), i.f, i.dom)
def __eq__(self, other):
if type(other) is not type(self):
return False
return self.args == other.args
@property
def a(self):
"""Return the position of the left end. """
field = self.dom.get_field()
a, b, c, d = self.mobius
if not self.neg:
if a*d < b*c:
return field(a, c)
return field(b, d)
else:
if a*d > b*c:
return -field(a, c)
return -field(b, d)
@property
def b(self):
"""Return the position of the right end. """
was = self.neg
self.neg = not was
rv = -self.a
self.neg = was
return rv
@property
def dx(self):
"""Return width of the real isolating interval. """
return self.b - self.a
@property
def center(self):
"""Return the center of the real isolating interval. """
return (self.a + self.b)/2
@property
def max_denom(self):
"""Return the largest denominator occurring in either endpoint. """
return max(self.a.denominator, self.b.denominator)
def as_tuple(self):
"""Return tuple representation of real isolating interval. """
return (self.a, self.b)
def __repr__(self):
return "(%s, %s)" % (self.a, self.b)
def __contains__(self, item):
"""
Say whether a complex number belongs to this real interval.
Parameters
==========
item : pair (re, im) or number re
Either a pair giving the real and imaginary parts of the number,
or else a real number.
"""
if isinstance(item, tuple):
re, im = item
else:
re, im = item, 0
return im == 0 and self.a <= re <= self.b
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return (self.b < other.a or other.b < self.a)
assert isinstance(other, ComplexInterval)
return (self.b < other.ax or other.bx < self.a
or other.ay*other.by > 0)
def _inner_refine(self):
"""Internal one step real root refinement procedure. """
if self.mobius is None:
return self
f, mobius = dup_inner_refine_real_root(
self.f, self.mobius, self.dom, steps=1, mobius=True)
return RealInterval(mobius + (self.neg,), f, self.dom)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx):
"""Refine an isolating interval until it is of sufficiently small size. """
expr = self
while not (expr.dx < dx):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of real root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of real root refinement algorithm. """
return self._inner_refine()
| RealInterval |
python | openai__openai-python | src/openai/resources/videos.py | {
"start": 14958,
"end": 28709
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncVideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncVideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncVideosWithStreamingResponse(self)
async def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModel | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use. Defaults to `sora-2`.
seconds: Clip duration in seconds. Defaults to 4 seconds.
size: Output resolution formatted as width x height. Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/videos",
body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
async def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModel | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = await self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return await self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
async def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = await self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
await self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
async def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Retrieve a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]:
"""
List videos
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=AsyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
async def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Delete a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
async def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""Download video content
Args:
variant: Which downloadable asset to return.
Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{"variant": variant}, video_download_content_params.VideoDownloadContentParams
),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
async def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video remix
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._post(
f"/videos/{video_id}/remix",
body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
| AsyncVideos |
python | coleifer__peewee | tests/model_save.py | {
"start": 398,
"end": 562
} | class ____(TestModel):
pk1 = IntegerField()
pk2 = IntegerField()
value = IntegerField()
class Meta:
primary_key = CompositeKey('pk1', 'pk2')
| T4 |
python | spack__spack | lib/spack/spack/directives.py | {
"start": 33472,
"end": 33605
} | class ____(DirectiveError):
"""Raised when an invalid or unsupported package directive is specified."""
| UnsupportedPackageDirective |
python | walkccc__LeetCode | solutions/361. Bomb Enemy/361.py | {
"start": 0,
"end": 937
} | class ____:
def maxKilledEnemies(self, grid: list[list[str]]) -> int:
m = len(grid)
n = len(grid[0])
enemyCount = 0
# dp[i][j] := the maximum enemies grid[i][j] can kill
dp = [[0] * n for _ in range(m)]
def update(i: int, j: int) -> None:
nonlocal enemyCount
if grid[i][j] == '0':
dp[i][j] += enemyCount
elif grid[i][j] == 'E':
enemyCount += 1
else: # grid[i][j] == 'W'
enemyCount = 0
# Extend the four directions, if meet 'W', need to start over from 0.
for i in range(m):
enemyCount = 0
for j in range(n):
update(i, j)
enemyCount = 0
for j in reversed(range(n)):
update(i, j)
for j in range(n):
enemyCount = 0
for i in range(m):
update(i, j)
enemyCount = 0
for i in reversed(range(m)):
update(i, j)
# Returns sum(map(sum, dp))
return max(map(max, dp))
| Solution |
python | PyCQA__pylint | pylint/checkers/symilar.py | {
"start": 5898,
"end": 11691
} | class ____(NamedTuple):
"""Indices in both linesets that mark the beginning of successive lines."""
fst_lineset_index: Index
snd_lineset_index: Index
def __repr__(self) -> str:
return (
f"<LineSetStartCouple <{self.fst_lineset_index};{self.snd_lineset_index}>>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, LineSetStartCouple):
return NotImplemented
return (
self.fst_lineset_index == other.fst_lineset_index
and self.snd_lineset_index == other.snd_lineset_index
)
def __hash__(self) -> int:
return hash(self.fst_lineset_index) + hash(self.snd_lineset_index)
def increment(self, value: Index) -> LineSetStartCouple:
return LineSetStartCouple(
Index(self.fst_lineset_index + value),
Index(self.snd_lineset_index + value),
)
LinesChunkLimits_T = tuple["LineSet", LineNumber, LineNumber]
def hash_lineset(
lineset: LineSet, min_common_lines: int = DEFAULT_MIN_SIMILARITY_LINE
) -> tuple[HashToIndex_T, IndexToLines_T]:
"""Return two dicts.
The first associates the hash of successive stripped lines of a lineset
to the indices of the starting lines.
The second dict, associates the index of the starting line in the lineset's stripped lines to the
couple [start, end] lines number in the corresponding file.
:param lineset: lineset object (i.e the lines in a file)
:param min_common_lines: number of successive lines that are used to compute the hash
:return: a dict linking hashes to corresponding start index and a dict that links this
index to the start and end lines in the file
"""
hash2index = defaultdict(list)
index2lines = {}
# Comments, docstring and other specific patterns maybe excluded -> call to stripped_lines
# to get only what is desired
lines = tuple(x.text for x in lineset.stripped_lines)
# Need different iterators on same lines but each one is shifted 1 from the precedent
shifted_lines = [iter(lines[i:]) for i in range(min_common_lines)]
for i, *succ_lines in enumerate(zip(*shifted_lines)):
start_linenumber = LineNumber(lineset.stripped_lines[i].line_number)
try:
end_linenumber = lineset.stripped_lines[i + min_common_lines].line_number
except IndexError:
end_linenumber = LineNumber(lineset.stripped_lines[-1].line_number + 1)
index = Index(i)
index2lines[index] = SuccessiveLinesLimits(
start=start_linenumber, end=end_linenumber
)
l_c = LinesChunk(lineset.name, index, *succ_lines)
hash2index[l_c].append(index)
return hash2index, index2lines
def remove_successive(all_couples: CplIndexToCplLines_T) -> None:
"""Removes all successive entries in the dictionary in argument.
:param all_couples: collection that has to be cleaned up from successive entries.
The keys are couples of indices that mark the beginning of common entries
in both linesets. The values have two parts. The first one is the couple
of starting and ending line numbers of common successive lines in the first file.
The second part is the same for the second file.
For example consider the following dict:
>>> all_couples
{(11, 34): ([5, 9], [27, 31]),
(23, 79): ([15, 19], [45, 49]),
(12, 35): ([6, 10], [28, 32])}
There are two successive keys (11, 34) and (12, 35).
It means there are two consecutive similar chunks of lines in both files.
Thus remove last entry and update the last line numbers in the first entry
>>> remove_successive(all_couples)
>>> all_couples
{(11, 34): ([5, 10], [27, 32]),
(23, 79): ([15, 19], [45, 49])}
"""
couple: LineSetStartCouple
for couple in tuple(all_couples.keys()):
to_remove = []
test = couple.increment(Index(1))
while test in all_couples:
all_couples[couple].first_file.end = all_couples[test].first_file.end
all_couples[couple].second_file.end = all_couples[test].second_file.end
all_couples[couple].effective_cmn_lines_nb += 1
to_remove.append(test)
test = test.increment(Index(1))
for target in to_remove:
try:
all_couples.pop(target)
except KeyError:
pass
def filter_noncode_lines(
ls_1: LineSet,
stindex_1: Index,
ls_2: LineSet,
stindex_2: Index,
common_lines_nb: int,
) -> int:
"""Return the effective number of common lines between lineset1
and lineset2 filtered from non code lines.
That is to say the number of common successive stripped
lines except those that do not contain code (for example
a line with only an ending parenthesis)
:param ls_1: first lineset
:param stindex_1: first lineset starting index
:param ls_2: second lineset
:param stindex_2: second lineset starting index
:param common_lines_nb: number of common successive stripped lines before being filtered from non code lines
:return: the number of common successive stripped lines that contain code
"""
stripped_l1 = [
lspecif.text
for lspecif in ls_1.stripped_lines[stindex_1 : stindex_1 + common_lines_nb]
if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text)
]
stripped_l2 = [
lspecif.text
for lspecif in ls_2.stripped_lines[stindex_2 : stindex_2 + common_lines_nb]
if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text)
]
return sum(sline_1 == sline_2 for sline_1, sline_2 in zip(stripped_l1, stripped_l2))
| LineSetStartCouple |
python | huggingface__transformers | src/transformers/models/fuyu/image_processing_fuyu.py | {
"start": 6848,
"end": 34135
} | class ____(BaseImageProcessor):
"""
This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
handle:
- Processing Images:
Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
dimensions. The image output is always img_h, img_w of (1080, 1920)
Then, it patches up these images using the patchify_image function.
- Creating Image Input IDs:
For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
variable-sized images, each line of patches is terminated with a newline ID.
- Image Patch Indices:
For each image patch, the code maintains an index where these patches should be inserted in a token stream.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image to `size`.
size (`dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to `size`.
padding_value (`float`, *optional*, defaults to 1.0):
The value to pad the image with.
padding_mode (`str`, *optional*, defaults to `"constant"`):
The padding mode to use when padding the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float`, *optional*, defaults to 0.5):
The mean to use when normalizing the image.
image_std (`float`, *optional*, defaults to 0.5):
The standard deviation to use when normalizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `1 / 255`):
The factor to use when rescaling the image.
patch_size (`dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
model_input_names = [
"images",
"image_input_ids",
"image_patches",
"image_patch_indices_per_batch",
"image_patch_indices_per_subsequence",
]
valid_kwargs = FuyuImagesKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_pad: bool = True,
padding_value: float = 1.0,
padding_mode: str = "constant",
do_normalize: bool = True,
image_mean: Union[float, list[float]] = 0.5,
image_std: Union[float, list[float]] = 0.5,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
patch_size: Optional[dict[str, int]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size if size is not None else {"height": 1080, "width": 1920}
self.resample = resample
self.do_pad = do_pad
self.padding_value = padding_value
self.padding_mode = padding_mode
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30}
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
image_height, image_width = get_image_size(image, input_data_format)
target_height, target_width = size["height"], size["width"]
if image_width <= target_width and image_height <= target_height:
return image
height_scale_factor = target_height / image_height
width_scale_factor = target_width / image_width
optimal_scale_factor = min(height_scale_factor, width_scale_factor)
new_height = int(image_height * optimal_scale_factor)
new_width = int(image_width * optimal_scale_factor)
scaled_image = resize(
image=image,
size=(new_height, new_width),
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return scaled_image
def pad_image(
self,
image: np.ndarray,
size: dict[str, int],
mode: str = "constant",
constant_values: float = 1.0,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to pad.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image_height, image_width = get_image_size(image, input_data_format)
target_height, target_width = size["height"], size["width"]
padding_top = 0
padding_left = 0
padding_bottom = target_height - image_height
padding_right = target_width - image_width
padded_image = pad(
image,
padding=((padding_top, padding_bottom), (padding_left, padding_right)),
mode=mode,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_pad: Optional[bool] = None,
padding_value: Optional[float] = None,
padding_mode: Optional[str] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[float] = None,
image_std: Optional[float] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
patch_size: Optional[dict[str, int]] = None,
data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
return_tensors: Optional[TensorType] = None,
):
"""
Utility function to preprocess the images and extract necessary information about original formats.
Args:
images (`ImageInput`):
Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image to `size`.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to `size`.
padding_value (`float`, *optional*, defaults to `self.padding_value`):
The value to pad the image with.
padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
The padding mode to use when padding the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float`, *optional*, defaults to `self.image_mean`):
The mean to use when normalizing the image.
image_std (`float`, *optional*, defaults to `self.image_std`):
The standard deviation to use when normalizing the image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
The factor to use when rescaling the image.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
padding_value = padding_value if padding_value is not None else self.padding_value
padding_mode = padding_mode if padding_mode is not None else self.padding_mode
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
patch_size = patch_size if patch_size is not None else self.patch_size
if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images):
raise ValueError("Multiple images for a single sample are not yet supported.")
batch_images = make_list_of_list_of_images(images)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]
# Search for the first image in the image list.
# NOTE: we can't slice the first image with images_list[0][0] if the first batch contains no images. See #36682
first_image_in_list = [images for images in batch_images if images][0][0]
if do_rescale and is_scaled_image(first_image_in_list):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(first_image_in_list)
original_image_sizes = [
get_image_size(images[0], channel_dim=input_data_format) for images in batch_images if images
]
size = get_size_dict(size) # for BC
if do_resize:
batch_images = [
[self.resize(image, size=size, input_data_format=input_data_format) for image in images]
for images in batch_images
]
image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images if images]
image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
# scale_h is the same as scale_w
image_scale_factors = [
[resized_size[0] / original_size[0]]
for original_size, resized_size in zip(original_image_sizes, image_sizes)
]
if do_pad:
batch_images = [
[
self.pad_image(
image,
size=size,
mode=padding_mode,
constant_values=padding_value,
input_data_format=input_data_format,
)
for image in images
]
for images in batch_images
]
if do_rescale:
batch_images = [
[self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
for images in batch_images
]
if do_normalize:
batch_images = [
[
self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
for images in batch_images
]
if data_format is not None:
batch_images = [
[to_channel_dimension_format(image, data_format, input_data_format) for image in images]
for images in batch_images
]
data = {
"images": batch_images,
"image_unpadded_heights": image_unpadded_heights,
"image_unpadded_widths": image_unpadded_widths,
"image_scale_factors": image_scale_factors,
}
return FuyuBatchFeature(data=data, tensor_type=return_tensors)
def get_num_patches(self, image_height: int, image_width: int, patch_size: Optional[dict[str, int]] = None) -> int:
"""
Calculate number of patches required to encode an image.
Args:
image_height (`int`):
Height of the image.
image_width (`int`):
Width of the image.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = self.patch_size["height"], self.patch_size["width"]
if image_height % patch_height != 0:
raise ValueError(f"{image_height=} must be divisible by {patch_height}")
if image_width % patch_width != 0:
raise ValueError(f"{image_width=} must be divisible by {patch_width}")
num_patches_per_dim_h = image_height // patch_height
num_patches_per_dim_w = image_width // patch_width
num_patches = num_patches_per_dim_h * num_patches_per_dim_w
return num_patches
def patchify_image(self, image: "torch.Tensor", patch_size: Optional[dict[str, int]] = None) -> "torch.Tensor":
"""
Convert an image into a tensor of patches.
Args:
image (`torch.Tensor`):
Image to convert. Shape: [batch, channels, height, width]
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
requires_backends(self, ["torch"])
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = patch_size["height"], patch_size["width"]
# TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871
# torch implementation is faster but does not handle non-squares
batch_size, channels, _, _ = image.shape
unfolded_along_height = image.unfold(2, patch_height, patch_height)
patches = unfolded_along_height.unfold(3, patch_width, patch_width)
patches = patches.contiguous()
patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
patches = patches.permute(0, 2, 3, 4, 1)
patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
return patches
def preprocess_with_tokenizer_info(
self,
image_input: "torch.Tensor",
image_present: "torch.Tensor",
image_unpadded_h: "torch.Tensor",
image_unpadded_w: "torch.Tensor",
image_placeholder_id: int,
image_newline_id: int,
variable_sized: bool,
patch_size: Optional[dict[str, int]] = None,
) -> FuyuBatchFeature:
"""Process images for model input. In particular, variable-sized images are handled here.
Args:
image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
Tensor of images padded to model input size.
image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
Tensor of 1s and 0s indicating whether an image is present.
image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image heights.
image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image widths.
image_placeholder_id (int):
The id of the image placeholder token. Comes from an associated tokenizer.
image_newline_id (int):
The id of the image newline token. Comes from an associated tokenizer.
variable_sized (bool):
Whether to process images as variable-sized.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Size of the patches.
"""
requires_backends(self, ["torch"])
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = patch_size["height"], patch_size["width"]
# Only images that are present.
images: list[list[torch.Tensor]] = []
batch_image_patches: list[list[torch.Tensor]] = []
# Image input ids for every subsequence, including ones with no image present.
batch_image_input_ids: list[list[torch.Tensor]] = []
for batch_index in range(image_input.shape[0]):
image_input_ids = []
image_patches = []
for subseq_index in range(image_input.shape[1]):
if image_present[batch_index, subseq_index]:
image = image_input[batch_index, subseq_index]
image_height, image_width = image.shape[1], image.shape[2]
if variable_sized:
# The min() is required here due to floating point issues:
# math.ceil(torch.tensor(300).cuda() / 30) == 11
new_h = min(
image_height,
math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
)
new_w = min(
image_width,
math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
)
image = image[:, :new_h, :new_w]
image_height, image_width = new_h, new_w
num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)
tensor_of_image_ids = torch.full(
[num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
)
patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)
assert num_patches == patches.shape[0]
if variable_sized:
# Now terminate each line with |NEWLINE|.
tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
newline_ids = torch.full(
[tensor_of_image_ids.shape[0], 1],
image_newline_id,
dtype=torch.int32,
device=image_input.device,
)
tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
images.append([image])
image_input_ids.append(tensor_of_image_ids)
image_patches.append(patches)
else:
image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
batch_image_input_ids.append(image_input_ids)
batch_image_patches.append(image_patches)
# Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in
# the stream.
image_patch_indices_per_batch: list[list[torch.Tensor]] = []
image_patch_indices_per_subsequence: list[list[torch.Tensor]] = []
for sample_image_input_ids in batch_image_input_ids:
index_offset = 0
per_batch_indices = []
per_subsequence_indices = []
for subseq_image_input_ids in sample_image_input_ids:
# Indices of image patches.
patches_mask = subseq_image_input_ids == image_placeholder_id
num_patches = torch.count_nonzero(patches_mask)
indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
subseq_image_input_ids
)
# Place those indices in the image input ids token stream, with -1 representing non-index tokens.
indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
indices_in_stream_per_batch[patches_inds] = indices + index_offset
indices_in_stream_per_subsequence[patches_inds] = indices
per_batch_indices.append(indices_in_stream_per_batch)
per_subsequence_indices.append(indices_in_stream_per_subsequence)
index_offset += num_patches
image_patch_indices_per_batch.append(per_batch_indices)
image_patch_indices_per_subsequence.append(per_subsequence_indices)
return FuyuBatchFeature(
data={
"images": images,
"image_input_ids": batch_image_input_ids,
"image_patches": batch_image_patches,
"image_patch_indices_per_batch": image_patch_indices_per_batch,
"image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
}
)
__all__ = ["FuyuImageProcessor"]
| FuyuImageProcessor |
python | numba__numba | numba/tests/test_array_reductions.py | {
"start": 3802,
"end": 36480
} | class ____(MemoryLeakMixin, TestCase):
"""
Test array reduction methods and functions such as .sum(), .max(), etc.
"""
def setUp(self):
super(TestArrayReductions, self).setUp()
np.random.seed(42)
def check_reduction_basic(self, pyfunc, **kwargs):
# Basic reduction checks on 1-d float64 arrays
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr), **kwargs)
arr = np.float64([1.0, 2.0, 0.0, -0.0, 1.0, -1.5])
check(arr)
arr = np.float64([-0.0, -1.5])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf'])
check(arr)
arr = np.float64([-1.5, 2.5, '-inf'])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf', '-inf'])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 3.0])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 'inf', '-inf', 3.0])
check(arr)
arr = np.float64([5.0, 'nan', -1.5, 'nan'])
check(arr)
# Only NaNs
arr = np.float64(['nan', 'nan'])
check(arr)
def test_all_basic(self, pyfunc=array_all):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([1.0, 0.0, float('inf'), float('nan')])
check(arr)
arr[1] = -0.0
check(arr)
arr[1] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
def test_any_basic(self, pyfunc=array_any):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([0.0, -0.0, 0.0, 0.0])
check(arr)
arr[2] = float('nan')
check(arr)
arr[2] = float('inf')
check(arr)
arr[2] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
def test_sum_basic(self):
self.check_reduction_basic(array_sum)
def test_mean_basic(self):
self.check_reduction_basic(array_mean)
def test_var_basic(self):
self.check_reduction_basic(array_var, prec='double')
def test_std_basic(self):
self.check_reduction_basic(array_std)
def test_min_basic(self):
self.check_reduction_basic(array_min)
def test_max_basic(self):
self.check_reduction_basic(array_max)
def test_argmin_basic(self):
self.check_reduction_basic(array_argmin)
def test_argmax_basic(self):
self.check_reduction_basic(array_argmax)
def test_nanmin_basic(self):
self.check_reduction_basic(array_nanmin)
def test_nanmax_basic(self):
self.check_reduction_basic(array_nanmax)
def test_nanmean_basic(self):
self.check_reduction_basic(array_nanmean)
def test_nansum_basic(self):
self.check_reduction_basic(array_nansum)
def test_nanprod_basic(self):
self.check_reduction_basic(array_nanprod)
def test_nanstd_basic(self):
self.check_reduction_basic(array_nanstd)
def test_nanvar_basic(self):
self.check_reduction_basic(array_nanvar, prec='double')
def check_median_basic(self, pyfunc, array_variations):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
expected = pyfunc(arr)
got = cfunc(arr)
self.assertPreciseEqual(got, expected)
# Empty array case
check(np.array([]))
# Odd sizes
def check_odd(a):
check(a)
a = a.reshape((9, 7))
check(a)
check(a.T)
for a in array_variations(np.arange(63) + 10.5):
check_odd(a)
# Even sizes
def check_even(a):
check(a)
a = a.reshape((4, 16))
check(a)
check(a.T)
for a in array_variations(np.arange(64) + 10.5):
check_even(a)
@staticmethod
def _array_variations(a):
# Sorted, reversed, random, many duplicates, many NaNs, all NaNs
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 >= 1] = 3.5
yield a
a[a % 4 >= 2] = np.nan
yield a
a[:] = np.nan
yield a
def test_median_basic(self):
pyfunc = array_median_global
def variations(a):
# Sorted, reversed, random, many duplicates
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 >= 1] = 3.5
yield a
self.check_median_basic(pyfunc, variations)
def check_percentile_and_quantile(self, pyfunc, q_upper_bound):
cfunc = jit(nopython=True)(pyfunc)
def check(a, q, abs_tol=1e-12):
expected = pyfunc(a, q)
got = cfunc(a, q)
# NOTE: inf/nan is not checked, seems to be susceptible to upstream
# changes
finite = np.isfinite(expected)
if np.all(finite):
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
else:
self.assertPreciseEqual(got[finite], expected[finite],
abs_tol=abs_tol)
a = self.random.randn(27).reshape(3, 3, 3)
q = np.linspace(0, q_upper_bound, 14)[::-1]
check(a, q)
check(a, 0)
check(a, q_upper_bound / 2)
check(a, q_upper_bound)
not_finite = [np.nan, -np.inf, np.inf]
a.flat[:10] = self.random.choice(not_finite, 10)
self.random.shuffle(a)
self.random.shuffle(q)
check(a, q)
a = a.flatten().tolist()
q = q.flatten().tolist()
check(a, q)
check(tuple(a), tuple(q))
a = self.random.choice([1, 2, 3, 4], 10)
q = np.linspace(0, q_upper_bound, 5)
check(a, q)
# tests inspired by
# https://github.com/numpy/numpy/blob/345b2f6e/numpy/lib/tests/test_function_base.py
x = np.arange(8) * 0.5
np.testing.assert_equal(cfunc(x, 0), 0.)
np.testing.assert_equal(cfunc(x, q_upper_bound), 3.5)
np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 1.75)
x = np.arange(12).reshape(3, 4)
q = np.array((0.25, 0.5, 1.0)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q), [2.75, 5.5, 11.0])
x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
q = np.array((0.25, 0.50)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q).shape, (2,))
q = np.array((0.25, 0.50, 0.75)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q).shape, (3,))
x = np.arange(12).reshape(3, 4)
np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 5.5)
self.assertTrue(np.isscalar(cfunc(x, q_upper_bound / 2)))
np.testing.assert_equal(cfunc([1, 2, 3], 0), 1)
a = np.array([2, 3, 4, 1])
cfunc(a, [q_upper_bound / 2])
np.testing.assert_equal(a, np.array([2, 3, 4, 1]))
def check_percentile_edge_cases(self, pyfunc, q_upper_bound=100):
cfunc = jit(nopython=True)(pyfunc)
def check(a, q, abs_tol=1e-14):
expected = pyfunc(a, q)
got = cfunc(a, q)
# NOTE: inf/nan is not checked, seems to be susceptible to upstream
# changes
finite = np.isfinite(expected)
if np.all(finite):
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
else:
self.assertPreciseEqual(got[finite], expected[finite],
abs_tol=abs_tol)
def convert_to_float_and_check(a, q, abs_tol=1e-14):
expected = pyfunc(a, q).astype(np.float64)
got = cfunc(a, q)
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
def _array_combinations(elements):
for i in range(1, 10):
for comb in combinations_with_replacement(elements, i):
yield np.array(comb)
# high number of combinations, many including non-finite values
q = (0, 0.1 * q_upper_bound, 0.2 * q_upper_bound, q_upper_bound)
element_pool = (1, -1, np.nan, np.inf, -np.inf)
for a in _array_combinations(element_pool):
check(a, q)
# edge cases - numpy exhibits behavioural differences across
# platforms, see: https://github.com/numpy/numpy/issues/13272
if q_upper_bound == 1:
_check = convert_to_float_and_check
else:
_check = check
a = np.array(5)
q = np.array(1)
_check(a, q)
a = 5
q = q_upper_bound / 2
_check(a, q)
def check_percentile_exceptions(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check_err(a, q):
with self.assertRaises(ValueError) as raises:
cfunc(a, q)
self.assertEqual(
"Percentiles must be in the range [0, 100]",
str(raises.exception)
)
# Exceptions leak references
self.disable_leak_check()
a = np.arange(5)
check_err(a, -5) # q less than 0
check_err(a, (1, 10, 105)) # q contains value greater than 100
check_err(a, (1, 10, np.nan)) # q contains nan
with self.assertTypingError() as e:
a = np.arange(5) * 1j
q = 0.1
cfunc(a, q)
self.assertIn('Not supported for complex dtype', str(e.exception))
def check_quantile_exceptions(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check_err(a, q):
with self.assertRaises(ValueError) as raises:
cfunc(a, q)
self.assertEqual(
"Quantiles must be in the range [0, 1]",
str(raises.exception)
)
# Exceptions leak references
self.disable_leak_check()
a = np.arange(5)
check_err(a, -0.5) # q less than 0
check_err(a, (0.1, 0.10, 1.05)) # q contains value greater than 1
check_err(a, (0.1, 0.10, np.nan)) # q contains nan
with self.assertTypingError() as e:
a = np.arange(5) * 1j
q = 0.1
cfunc(a, q)
self.assertIn('Not supported for complex dtype', str(e.exception))
def test_percentile_basic(self):
pyfunc = array_percentile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=100)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=100)
self.check_percentile_exceptions(pyfunc)
def test_nanpercentile_basic(self):
pyfunc = array_nanpercentile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=100)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=100)
self.check_percentile_exceptions(pyfunc)
def test_quantile_basic(self):
pyfunc = array_quantile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=1)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=1)
self.check_quantile_exceptions(pyfunc)
def test_nanquantile_basic(self):
pyfunc = array_nanquantile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=1)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=1)
self.check_quantile_exceptions(pyfunc)
def test_nanmedian_basic(self):
pyfunc = array_nanmedian_global
self.check_median_basic(pyfunc, self._array_variations)
def test_array_sum_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cfunc = njit((arrty,),)(array_sum_global)
self.assertEqual(np.sum(arr), cfunc(arr))
def test_array_prod_int_1d(self):
arr = np.arange(10, dtype=np.int32) + 1
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cfunc = njit((arrty,))(array_prod)
self.assertEqual(arr.prod(), cfunc(arr))
def test_array_prod_float_1d(self):
arr = np.arange(10, dtype=np.float32) + 1 / 10
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cfunc = njit((arrty,))(array_prod)
np.testing.assert_allclose(arr.prod(), cfunc(arr))
def test_array_prod_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cfunc = njit((arrty,))(array_prod_global)
np.testing.assert_allclose(np.prod(arr), cfunc(arr))
def check_cumulative(self, pyfunc):
arr = np.arange(2, 10, dtype=np.int16)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = np.linspace(2, 8, 6)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = arr.reshape((3, 2))
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
def test_array_cumsum(self):
self.check_cumulative(array_cumsum)
def test_array_cumsum_global(self):
self.check_cumulative(array_cumsum_global)
def test_array_cumprod(self):
self.check_cumulative(array_cumprod)
def test_array_cumprod_global(self):
self.check_cumulative(array_cumprod_global)
def check_aggregation_magnitude(self, pyfunc, is_prod=False):
"""
Check that integer overflows are avoided (issue #931).
"""
# Overflows are avoided here (ints are cast either to intp
# or float64).
n_items = 2 if is_prod else 10 # avoid overflow on prod()
arr = (np.arange(n_items) + 40000).astype('int16')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
# Overflows are avoided for functions returning floats here.
# Other functions may wrap around.
arr = (np.arange(10) + 2**60).astype('int64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
arr = arr.astype('uint64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
def test_sum_magnitude(self):
self.check_aggregation_magnitude(array_sum)
self.check_aggregation_magnitude(array_sum_global)
def test_cumsum_magnitude(self):
self.check_aggregation_magnitude(array_cumsum)
self.check_aggregation_magnitude(array_cumsum_global)
def test_nancumsum_magnitude(self):
self.check_aggregation_magnitude(array_nancumsum, is_prod=True)
def test_prod_magnitude(self):
self.check_aggregation_magnitude(array_prod, is_prod=True)
self.check_aggregation_magnitude(array_prod_global, is_prod=True)
def test_cumprod_magnitude(self):
self.check_aggregation_magnitude(array_cumprod, is_prod=True)
self.check_aggregation_magnitude(array_cumprod_global, is_prod=True)
def test_nancumprod_magnitude(self):
self.check_aggregation_magnitude(array_nancumprod, is_prod=True)
def test_mean_magnitude(self):
self.check_aggregation_magnitude(array_mean)
self.check_aggregation_magnitude(array_mean_global)
def test_var_magnitude(self):
self.check_aggregation_magnitude(array_var)
self.check_aggregation_magnitude(array_var_global)
def test_std_magnitude(self):
self.check_aggregation_magnitude(array_std)
self.check_aggregation_magnitude(array_std_global)
def _do_check_nptimedelta(self, pyfunc, arr):
arrty = typeof(arr)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Even vs. odd size, for np.median
self.assertPreciseEqual(cfunc(arr[:-1]), pyfunc(arr[:-1]))
# Test with different orders, for np.median
arr = arr[::-1].copy() # Keep 'C' layout
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
np.random.shuffle(arr)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with a NaT
if 'median' not in pyfunc.__name__:
# Test with (val, NaT)^N (and with the random NaT from above)
# use a loop, there's some weird thing/bug with arr[1::2] = 'NaT'
# Further Numba has bug(s) relating to NaN/NaT handling in anything
# using a partition such as np.median
for x in range(1, len(arr), 2):
arr[x] = 'NaT'
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with all NaTs
arr.fill(arrty.dtype('NaT'))
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
def check_npdatetime(self, pyfunc):
arr = np.arange(10).astype(dtype='M8[Y]')
self._do_check_nptimedelta(pyfunc, arr)
def check_nptimedelta(self, pyfunc):
arr = np.arange(10).astype(dtype='m8[s]')
self._do_check_nptimedelta(pyfunc, arr)
def test_min_npdatetime(self):
self.check_npdatetime(array_min)
self.check_nptimedelta(array_min)
def test_max_npdatetime(self):
self.check_npdatetime(array_max)
self.check_nptimedelta(array_max)
def test_argmin_npdatetime(self):
self.check_npdatetime(array_argmin)
self.check_nptimedelta(array_argmin)
def test_argmax_npdatetime(self):
self.check_npdatetime(array_argmax)
self.check_nptimedelta(array_argmax)
def test_median_npdatetime(self):
self.check_nptimedelta(array_median_global)
def test_sum_npdatetime(self):
self.check_nptimedelta(array_sum)
def test_cumsum_npdatetime(self):
self.check_nptimedelta(array_cumsum)
def test_mean_npdatetime(self):
self.check_nptimedelta(array_mean)
def check_nan_cumulative(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def _set_some_values_to_nan(a):
p = a.size // 2 # set approx half elements to NaN
np.put(a, np.random.choice(range(a.size), p, replace=False), np.nan)
return a
def a_variations():
yield np.linspace(-1, 3, 60).reshape(3, 4, 5)
yield np.array([np.inf, 3, 4])
yield np.array([True, True, True, False])
yield np.arange(1, 10)
yield np.asfortranarray(np.arange(1, 64) - 33.3)
yield np.arange(1, 10, dtype=np.float32)[::-1]
for a in a_variations():
check(a) # no nans
check(_set_some_values_to_nan(a.astype(np.float64))) # about 50% nans
# edge cases
check(np.array([]))
check(np.full(10, np.nan))
parts = np.array([np.nan, 2, np.nan, 4, 5, 6, 7, 8, 9])
a = parts + 1j * parts[::-1]
a = a.reshape(3, 3)
check(a)
def test_nancumprod_basic(self):
self.check_cumulative(array_nancumprod)
self.check_nan_cumulative(array_nancumprod)
def test_nancumsum_basic(self):
self.check_cumulative(array_nancumsum)
self.check_nan_cumulative(array_nancumsum)
def test_ptp_basic(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def a_variations():
yield np.arange(10)
yield np.array([-1.1, np.nan, 2.2])
yield np.array([-np.inf, 5])
yield (4, 2, 5)
yield (1,)
yield np.full(5, 5)
yield [2.2, -2.3, 0.1]
a = np.linspace(-10, 10, 16).reshape(4, 2, 2)
yield a
yield np.asfortranarray(a)
yield a[::-1]
np.random.RandomState(0).shuffle(a)
yield a
yield 6
yield 6.5
yield -np.inf
yield 1 + 4j
yield [2.2, np.nan]
yield [2.2, np.inf]
yield ((4.1, 2.0, -7.6), (4.3, 2.7, 5.2))
yield np.full(5, np.nan)
yield 1 + np.nan * 1j
yield np.nan + np.nan * 1j
yield np.nan
for a in a_variations():
check(a)
@skip_if_numpy_2
def test_ptp_method(self):
# checks wiring of np.ndarray.ptp() only, `np.ptp` test above checks
# the actual alg
pyfunc = array_ptp
cfunc = jit(nopython=True)(pyfunc)
a = np.arange(10)
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_ptp_complex(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def make_array(real_nan=False, imag_nan=False):
real = np.linspace(-4, 4, 25)
if real_nan:
real[4:9] = np.nan
imag = np.linspace(-5, 5, 25)
if imag_nan:
imag[7:12] = np.nan
return (real + 1j * imag).reshape(5, 5)
for real_nan, imag_nan in product([True, False], repeat=2):
comp = make_array(real_nan, imag_nan)
check(comp)
real = np.ones(8)
imag = np.arange(-4, 4)
comp = real + 1j * imag
check(comp)
comp = real - 1j * imag
check(comp)
comp = np.full((4, 4), fill_value=(1 - 1j))
check(comp)
def test_ptp_exceptions(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
with self.assertTypingError() as e:
cfunc(np.array((True, True, False)))
msg = "Boolean dtype is unsupported (as per NumPy)"
self.assertIn(msg, str(e.exception))
with self.assertRaises(ValueError) as e:
cfunc(np.array([]))
msg = "zero-size array reduction not possible"
self.assertIn(msg, str(e.exception))
def test_min_max_complex_basic(self):
pyfuncs = array_min_global, array_max_global
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
real = np.linspace(-10, 10, 40)
real[:4] = real[-1]
imag = real * 2
a = real - imag * 1j
check(a)
for _ in range(10):
self.random.shuffle(real)
self.random.shuffle(imag)
dtype = self.random.choice([np.complex64, np.complex128])
a = real - imag * 1j
a[:4] = a[-1]
check(a.astype(dtype))
def test_nanmin_nanmax_complex_basic(self):
pyfuncs = array_nanmin, array_nanmax
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
real = np.linspace(-10, 10, 40)
real[:4] = real[-1]
real[5:9] = np.nan
imag = real * 2
imag[7:12] = np.nan
a = real - imag * 1j
check(a)
for _ in range(10):
self.random.shuffle(real)
self.random.shuffle(imag)
a = real - imag * 1j
a[:4] = a[-1]
check(a)
def test_nanmin_nanmax_non_array_inputs(self):
pyfuncs = array_nanmin, array_nanmax
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def a_variations():
yield [1, 6, 4, 2]
yield ((-10, 4, -12), (5, 200, -30))
yield np.array(3)
yield (2,)
yield 3.142
yield False
yield (np.nan, 3.142, -5.2, 3.0)
yield [np.inf, np.nan, -np.inf]
yield [(np.nan, 1.1), (-4.4, 8.7)]
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
for a in a_variations():
check(a)
def test_argmax_axis_1d_2d_4d(self):
arr1d = np.array([0, 20, 3, 4])
arr2d = np.arange(6).reshape(2, 3)
arr2d[0,1] += 100
arr4d = np.arange(120).reshape(2, 3, 4, 5) + 10
arr4d[0, 1, 1, 2] += 100
arr4d[1, 0, 0, 0] -= 51
for arr in [arr1d, arr2d, arr4d]:
axes = list(range(arr.ndim)) + [
-(i+1) for i in range(arr.ndim)
]
py_functions = [
lambda a, _axis=axis: np.argmax(a, axis=_axis)
for axis in axes
]
c_functions = [
jit(nopython=True)(pyfunc) for pyfunc in py_functions
]
for cfunc in c_functions:
self.assertPreciseEqual(cfunc.py_func(arr), cfunc(arr))
def test_argmax_axis_out_of_range(self):
arr1d = np.arange(6)
arr2d = np.arange(6).reshape(2, 3)
@jit(nopython=True)
def jitargmax(arr, axis):
return np.argmax(arr, axis)
def assert_raises(arr, axis):
with self.assertRaisesRegex(ValueError, "axis.*out of bounds"):
jitargmax.py_func(arr, axis)
with self.assertRaisesRegex(ValueError, "axis.*out of bounds"):
jitargmax(arr, axis)
assert_raises(arr1d, 1)
assert_raises(arr1d, -2)
assert_raises(arr2d, -3)
assert_raises(arr2d, 2)
# Exceptions leak references
self.disable_leak_check()
def test_argmax_axis_must_be_integer(self):
arr = np.arange(6)
@jit(nopython=True)
def jitargmax(arr, axis):
return np.argmax(arr, axis)
with self.assertTypingError() as e:
jitargmax(arr, "foo")
self.assertIn("axis must be an integer", str(e.exception))
def test_argmax_method_axis(self):
arr2d = np.arange(6).reshape(2, 3)
def argmax(arr):
return arr2d.argmax(axis=0)
self.assertPreciseEqual(argmax(arr2d),
jit(nopython=True)(argmax)(arr2d))
def test_argmax_return_type(self):
# See issue #7853, return type should be intp not based on input type
arr2d = np.arange(6, dtype=np.uint8).reshape(2, 3)
def argmax(arr):
return arr2d.argmax(axis=0)
self.assertPreciseEqual(argmax(arr2d),
jit(nopython=True)(argmax)(arr2d))
def test_argmin_axis_1d_2d_4d(self):
arr1d = np.array([0, 20, 3, 4])
arr2d = np.arange(6).reshape(2, 3)
arr2d[0,1] += 100
arr4d = np.arange(120).reshape(2, 3, 4, 5) + 10
arr4d[0, 1, 1, 2] += 100
arr4d[1, 0, 0, 0] -= 51
for arr in [arr1d, arr2d, arr4d]:
axes = list(range(arr.ndim)) + [
-(i+1) for i in range(arr.ndim)
]
py_functions = [
lambda a, _axis=axis: np.argmin(a, axis=_axis)
for axis in axes
]
c_functions = [
jit(nopython=True)(pyfunc) for pyfunc in py_functions
]
for cfunc in c_functions:
self.assertPreciseEqual(cfunc.py_func(arr), cfunc(arr))
def test_argmin_axis_out_of_range(self):
arr1d = np.arange(6)
arr2d = np.arange(6).reshape(2, 3)
@jit(nopython=True)
def jitargmin(arr, axis):
return np.argmin(arr, axis)
def assert_raises(arr, axis):
with self.assertRaisesRegex(ValueError, "axis.*out of bounds"):
jitargmin.py_func(arr, axis)
with self.assertRaisesRegex(ValueError, "axis.*out of bounds"):
jitargmin(arr, axis)
assert_raises(arr1d, 1)
assert_raises(arr1d, -2)
assert_raises(arr2d, -3)
assert_raises(arr2d, 2)
# Exceptions leak references
self.disable_leak_check()
def test_argmin_axis_must_be_integer(self):
arr = np.arange(6)
@jit(nopython=True)
def jitargmin(arr, axis):
return np.argmin(arr, axis)
with self.assertTypingError() as e:
jitargmin(arr, "foo")
self.assertIn("axis must be an integer", str(e.exception))
def test_argmin_method_axis(self):
arr2d = np.arange(6).reshape(2, 3)
def argmin(arr):
return arr2d.argmin(axis=0)
self.assertPreciseEqual(argmin(arr2d),
jit(nopython=True)(argmin)(arr2d))
def test_argmin_return_type(self):
# See issue #7853, return type should be intp not based on input type
arr2d = np.arange(6, dtype=np.uint8).reshape(2, 3)
def argmin(arr):
return arr2d.argmin(axis=0)
self.assertPreciseEqual(argmin(arr2d),
jit(nopython=True)(argmin)(arr2d))
@classmethod
def install_generated_tests(cls):
# These form a testing product where each of the combinations are tested
# these function are tested in real and complex space
reduction_funcs = [array_sum, array_sum_global,
array_prod, array_prod_global,
array_mean, array_mean_global,
array_var, array_var_global,
array_std, array_std_global,
array_all, array_all_global,
array_any, array_any_global,
array_min, array_min_global,
array_amax, array_amin,
array_max, array_max_global,
array_nanmax, array_nanmin,
array_nansum,
]
# these functions only work in real space as no complex comparison
# operator is implemented
reduction_funcs_rspace = [array_argmin, array_argmin_global,
array_argmax, array_argmax_global]
reduction_funcs += [array_nanmean, array_nanstd, array_nanvar]
reduction_funcs += [array_nanprod]
dtypes_to_test = [np.int32, np.float32, np.bool_, np.complex64]
def install_tests(dtypes, funcs):
# Install tests on class
for dt in dtypes:
test_arrays = full_test_arrays(dt)
for red_func, test_array in product(funcs, test_arrays):
# Create the name for the test function
test_name = "test_{0}_{1}_{2}d"
test_name = test_name.format(red_func.__name__,
test_array.dtype.name,
test_array.ndim)
def new_test_function(self, redFunc=red_func,
testArray=test_array,
testName=test_name):
ulps = 1
if 'prod' in red_func.__name__ and \
np.iscomplexobj(testArray):
# prod family accumulate slightly more error on
# some architectures (power, 32bit) for complex input
ulps = 3
npr, nbr = run_comparative(redFunc, testArray)
self.assertPreciseEqual(npr, nbr, msg=testName,
prec="single", ulps=ulps)
# Install it into the class
setattr(cls, test_name, new_test_function)
# install tests for reduction functions that only work in real space
install_tests(dtypes_to_test[:-1], reduction_funcs_rspace)
# install tests for reduction functions
install_tests(dtypes_to_test, reduction_funcs)
TestArrayReductions.install_generated_tests()
| TestArrayReductions |
python | pytorch__pytorch | torch/distributed/checkpoint/_pg_transport.py | {
"start": 1486,
"end": 1981
} | class ____:
"""
This is the metadata for a ShardedTensor that is used to transfer checkpoints.
It contains the metadata for all local shards and the global tensor metadata.
This must be pickleable so that it can be sent over the wire.
"""
local_shards_meta: list[_TensorMeta]
local_shards_shard_metadata: list[
ShardMetadata
] # Original shard metadata for each local shard
sharded_tensor_metadata: ShardedTensorMetadata
@dataclass
| _ShardedTensorMeta |
python | facebookresearch__faiss | faiss/gpu/test/test_binary_cagra.py | {
"start": 3431,
"end": 4527
} | class ____(unittest.TestCase):
"""Test IndexBinaryIDMap functionality with GpuIndexBinaryCagra"""
def test_add_with_ids(self):
d = 128 * 8
k = 10
n = 100000
res = faiss.StandardGpuResources()
# Create GpuIndexBinaryCagra with IDMap
index_gpu = faiss.GpuIndexBinaryCagra(res, d)
index_idmap = faiss.IndexBinaryIDMap(index_gpu)
xb = np.random.randint(
low=0, high=256, size=(n, d // 8), dtype=np.uint8)
ids = np.arange(1000000, 1000000 + n).astype(np.int64)
index_idmap.add_with_ids(xb, ids)
nq = 1000
xq = np.random.randint(
low=0, high=256, size=(nq, d // 8), dtype=np.uint8)
D, I = index_idmap.search(xq, k)
self.assertTrue(np.all(I >= 1000000))
self.assertTrue(np.all(I < 1000000 + n))
D_exact, I_exact = index_idmap.search(xb[:10], 1)
expected_ids = ids[:10].reshape(-1, 1)
np.testing.assert_array_equal(I_exact, expected_ids)
np.testing.assert_array_equal(D_exact, np.zeros((10, 1)))
| TestIndexBinaryIDMap |
python | skorch-dev__skorch | skorch/tests/test_utils.py | {
"start": 4525,
"end": 7014
} | class ____:
@pytest.fixture
def to_numpy(self):
from skorch.utils import to_numpy
return to_numpy
@pytest.fixture
def x_tensor(self):
return torch.zeros(3, 4)
@pytest.fixture
def x_tuple(self):
return torch.ones(3), torch.zeros(3, 4)
@pytest.fixture
def x_list(self):
return [torch.ones(3), torch.zeros(3, 4)]
@pytest.fixture
def x_dict(self):
return {'a': torch.ones(3), 'b': (torch.zeros(2), torch.zeros(3))}
def compare_array_to_tensor(self, x_numpy, x_tensor):
assert isinstance(x_tensor, torch.Tensor)
assert isinstance(x_numpy, np.ndarray)
assert x_numpy.shape == x_tensor.shape
for a, b in zip(x_numpy.flatten(), x_tensor.flatten()):
assert np.isclose(a, b.item())
def test_tensor(self, to_numpy, x_tensor):
x_numpy = to_numpy(x_tensor)
self.compare_array_to_tensor(x_numpy, x_tensor)
def test_list(self, to_numpy, x_list):
x_numpy = to_numpy(x_list)
for entry_numpy, entry_torch in zip(x_numpy, x_list):
self.compare_array_to_tensor(entry_numpy, entry_torch)
def test_tuple(self, to_numpy, x_tuple):
x_numpy = to_numpy(x_tuple)
for entry_numpy, entry_torch in zip(x_numpy, x_tuple):
self.compare_array_to_tensor(entry_numpy, entry_torch)
def test_dict(self, to_numpy, x_dict):
x_numpy = to_numpy(x_dict)
self.compare_array_to_tensor(x_numpy['a'], x_dict['a'])
self.compare_array_to_tensor(x_numpy['b'][0], x_dict['b'][0])
self.compare_array_to_tensor(x_numpy['b'][1], x_dict['b'][1])
@pytest.mark.parametrize('x_invalid', [
1,
[1, 2, 3],
(1, 2, 3),
{'a': 1},
])
def test_invalid_inputs(self, to_numpy, x_invalid):
# Inputs that are invalid for the scope of to_numpy.
with pytest.raises(TypeError) as e:
to_numpy(x_invalid)
expected = "Cannot convert this data type to a numpy array."
assert e.value.args[0] == expected
@pytest.mark.skipif(
not (hasattr(torch.backends, "mps") and torch.backends.mps.is_available()),
reason='Skipped because mps is not available as a torch backend'
)
def test_mps_support(self, to_numpy, x_tensor):
device = torch.device('mps')
x_tensor.to(device)
x_numpy = to_numpy(x_tensor)
self.compare_array_to_tensor(x_numpy, x_tensor)
| TestToNumpy |
python | ApeWorX__ape | src/ape/managers/accounts.py | {
"start": 7304,
"end": 17988
} | class ____(BaseManager):
"""
The ``AccountManager`` is a container of containers for
:class:`~ape.api.accounts.AccountAPI` objects.
All containers must subclass :class:`~ape.api.accounts.AccountContainerAPI`
and are treated as singletons.
Import the accounts manager singleton from the root ``ape`` namespace.
Usage example::
from ape import accounts # "accounts" is the AccountManager singleton
my_accounts = accounts.load("dev")
"""
_alias_to_account_cache: dict[str, AccountAPI] = {}
@property
def default_sender(self) -> Optional[AccountAPI]:
return _DEFAULT_SENDERS[-1] if _DEFAULT_SENDERS else None
@cached_property
def containers(self) -> dict[str, AccountContainerAPI]:
"""
A dict of all :class:`~ape.api.accounts.AccountContainerAPI` instances
across all installed plugins.
Returns:
dict[str, :class:`~ape.api.accounts.AccountContainerAPI`]
"""
containers = {}
data_folder = self.config_manager.DATA_FOLDER
data_folder.mkdir(exist_ok=True)
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
# Ignore containers that contain test accounts.
if issubclass(account_type, TestAccountAPI):
continue
containers[plugin_name] = container_type(name=plugin_name, account_type=account_type)
return containers
@property
def aliases(self) -> Iterator[str]:
"""
All account aliases from every account-related plugin. The "alias"
is part of the :class:`~ape.api.accounts.AccountAPI`. Use the
account alias to load an account using method
:meth:`~ape.managers.accounts.AccountManager.load`.
Returns:
Iterator[str]
"""
for container in self.containers.values():
yield from container.aliases
def get_accounts_by_type(self, type_: type[AccountAPI]) -> list[AccountAPI]:
"""
Get a list of accounts by their type.
Args:
type_ (type[:class:`~ape.api.accounts.AccountAPI`]): The type of account
to get.
Returns:
list[:class:`~ape.api.accounts.AccountAPI`]
"""
return [acc for acc in self if isinstance(acc, type_)]
def __len__(self) -> int:
"""
The number of accounts managed by all account plugins.
Returns:
int
"""
return sum(len(container) for container in self.containers.values())
def __iter__(self) -> Iterator[AccountAPI]:
for container in self.containers.values():
yield from container.accounts
@log_instead_of_fail(default="<AccountManager>")
def __repr__(self) -> str:
return "<AccountManager>"
@cached_property
def test_accounts(self) -> TestAccountManager:
"""
Accounts generated from the configured test mnemonic. These accounts
are also the subject of a fixture available in the ``test`` plugin called
``accounts``. Configure these accounts, such as the mnemonic and / or
number-of-accounts using the ``test`` section of the `ape-config.yaml` file.
Usage example::
def test_my_contract(accounts):
# The "accounts" fixture uses the AccountsManager.test_accounts()
sender = accounts[0]
receiver = accounts[1]
...
Returns:
:class:`TestAccountContainer`
"""
return TestAccountManager()
def load(self, alias: str) -> AccountAPI:
"""
Get an account by its alias.
Raises:
KeyError: When there is no local account with the given alias.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
if alias == "":
raise ValueError("Cannot use empty string as alias!")
elif alias in self._alias_to_account_cache:
return self._alias_to_account_cache[alias]
for account in self:
if account.alias and account.alias == alias:
self._alias_to_account_cache[alias] = account
return account
raise KeyError(f"No account with alias '{alias}'.")
@singledispatchmethod
def __getitem__(self, account_id) -> AccountAPI:
raise NotImplementedError(f"Cannot use {type(account_id)} as account ID.")
@__getitem__.register
def __getitem_int(self, account_id: int) -> AccountAPI:
"""
Get an account by index. For example, when you do the CLI command
``ape accounts list --all``, you will see a list of enumerated accounts
by their indices. Use this method as a quicker, ad-hoc way to get an
account from that index.
**NOTE**: It is generally preferred to use
:meth:`~ape.managers.accounts.AccountManager.load` or
:meth:`~ape.managers.accounts.AccountManager.__getitem_str`.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
if account_id < 0:
account_id = len(self) + account_id
for idx, account in enumerate(self):
if account_id == idx:
return account
raise IndexError(f"No account at index '{account_id}'.")
@__getitem__.register
def __getitem_slice(self, account_id: slice):
"""
Get list of accounts by slice. For example, when you do the CLI command
``ape accounts list --all``, you will see a list of enumerated accounts
by their indices. Use this method as a quicker, ad-hoc way to get an
accounts from a slice.
**NOTE**: It is generally preferred to use
:meth:`~ape.managers.accounts.AccountManager.load` or
:meth:`~ape.managers.accounts.AccountManager.__getitem_str`.
Returns:
list[:class:`~ape.api.accounts.AccountAPI`]
"""
start_idx = account_id.start or 0
if start_idx < 0:
start_idx += len(self)
stop_idx = account_id.stop or len(self)
if stop_idx < 0:
stop_idx += len(self)
step_size = account_id.step or 1
return [self[i] for i in range(start_idx, stop_idx, step_size)]
@__getitem__.register
def __getitem_str(self, account_str: str) -> AccountAPI:
"""
Get an account by address. If we are using a provider that supports unlocking
accounts, this method will return an impersonated account at that address.
Raises:
KeyError: When there is no local account with the given address.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
try:
account_id = self.conversion_manager.convert(account_str, AddressType)
except ConversionError as err:
prefix = f"No account with ID '{account_str}'"
if account_str.endswith(".eth"):
suffix = "Do you have `ape-ens` installed?"
else:
suffix = "Do you have the necessary conversion plugins installed?"
raise KeyError(f"{prefix}. {suffix}") from err
for container in self.containers.values():
if account_id in container.accounts:
return container[account_id]
# NOTE: Fallback to `TestAccountContainer`'s method for loading items
return self.test_accounts[account_id]
def __contains__(self, address: AddressType) -> bool:
"""
Determine if the given address matches an account in ``ape``.
Args:
address (:class:`~ape.types.address.AddressType`): The address to check.
Returns:
bool: ``True`` when the given address is found.
"""
return (
any(address in container for container in self.containers.values())
or address in self.test_accounts
)
def use_sender(
self,
account_id: Union[AccountAPI, AddressType, str, int],
) -> "ContextManager":
if not isinstance(account_id, AccountAPI):
if isinstance(account_id, int) or is_hex(account_id):
account = self[account_id]
elif isinstance(account_id, str): # alias
account = self.load(account_id)
else:
raise TypeError(account_id)
else:
account = account_id
return _use_sender(account)
def init_test_account(
self, index: int, address: AddressType, private_key: str
) -> "TestAccountAPI":
return self.test_accounts.init_test_account(index, address, private_key)
def resolve_address(
self, account_id: Union["BaseAddress", AddressType, str, int, bytes]
) -> Optional[AddressType]:
"""
Resolve the given input to an address.
Args:
account_id (:class:~ape.api.address.BaseAddress, str, int, bytes): The input to resolve.
It handles anything that converts to an AddressType like an ENS or a BaseAddress.
It also handles account aliases Ape is aware of, or int or bytes address values.
Returns:
:class:`~ape.types.AddressType` | None
"""
if isinstance(account_id, str) and account_id.startswith("0x"):
# Was given a hex-address string.
if provider := self.network_manager.active_provider:
return provider.network.ecosystem.decode_address(account_id)
else:
# Assume Ethereum-like.
return self.network_manager.ether.decode_address(account_id)
elif not isinstance(account_id, str):
# Was given either an integer, bytes, or a BaseAddress (account or contract).
return self.conversion_manager.convert(account_id, AddressType)
elif isinstance(account_id, str) and account_id in self.aliases:
# Was given an account alias.
account = self.load(account_id)
return account.address
elif (
isinstance(account_id, str)
and account_id.startswith("TEST::")
and account_id[-1].isdigit()
):
# Test account "alias".
account_idx = int(account_id[-1])
return self.test_accounts[account_idx]
elif isinstance(account_id, str) and not is_hex(account_id):
# Was maybe given an ENS name.
try:
return self.conversion_manager.convert(account_id, AddressType)
except ConversionError:
return None
return None
| AccountManager |
python | huggingface__transformers | src/transformers/models/aya_vision/processing_aya_vision.py | {
"start": 953,
"end": 1265
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding_side": "left",
"padding": True,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"crop_to_patches": True,
},
}
| AyaVisionProcessorKwargs |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/grant_types/test_implicit.py | {
"start": 243,
"end": 2662
} | class ____(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'world')
self.request.client = mock_client
self.request.client_id = 'abcdef'
self.request.response_type = 'token'
self.request.state = 'xyz'
self.request.redirect_uri = 'https://b.c/p'
self.mock_validator = mock.MagicMock()
self.auth = ImplicitGrant(request_validator=self.mock_validator)
@mock.patch('oauthlib.common.generate_token')
def test_create_token_response(self, generate_token):
generate_token.return_value = '1234'
bearer = BearerToken(self.mock_validator, expires_in=1800)
h, b, s = self.auth.create_token_response(self.request, bearer)
correct_uri = 'https://b.c/p#access_token=1234&token_type=Bearer&expires_in=1800&state=xyz&scope=hello+world'
self.assertEqual(s, 302)
self.assertURLEqual(h['Location'], correct_uri, parse_fragment=True)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
correct_uri = 'https://b.c/p?access_token=1234&token_type=Bearer&expires_in=1800&state=xyz&scope=hello+world'
self.request.response_mode = 'query'
h, b, s = self.auth.create_token_response(self.request, bearer)
self.assertURLEqual(h['Location'], correct_uri)
def test_custom_validators(self):
self.authval1, self.authval2 = mock.Mock(), mock.Mock()
self.tknval1, self.tknval2 = mock.Mock(), mock.Mock()
for val in (self.authval1, self.authval2):
val.return_value = {}
for val in (self.tknval1, self.tknval2):
val.return_value = None
self.auth.custom_validators.pre_token.append(self.tknval1)
self.auth.custom_validators.post_token.append(self.tknval2)
self.auth.custom_validators.pre_auth.append(self.authval1)
self.auth.custom_validators.post_auth.append(self.authval2)
bearer = BearerToken(self.mock_validator)
self.auth.create_token_response(self.request, bearer)
self.assertTrue(self.tknval1.called)
self.assertTrue(self.tknval2.called)
self.assertTrue(self.authval1.called)
self.assertTrue(self.authval2.called)
def test_error_response(self):
pass
| ImplicitGrantTest |
python | python-poetry__poetry | tests/types.py | {
"start": 1879,
"end": 2363
} | class ____(Protocol):
def __call__(
self,
name: str | None = None,
dependencies: dict[str, str] | None = None,
dev_dependencies: dict[str, str] | None = None,
pyproject_content: str | None = None,
poetry_lock_content: str | None = None,
install_deps: bool = True,
source: Path | None = None,
locker_config: dict[str, Any] | None = None,
use_test_locker: bool = True,
) -> Poetry: ...
| ProjectFactory |
python | pytorch__pytorch | test/inductor/test_group_batch_fusion.py | {
"start": 11496,
"end": 23815
} | class ____(TestCase):
def compare_dict_tensors(self, ref_dict, res_dict, rtol=1e-3, atol=1e-3):
if len(set(ref_dict.keys())) != len(set(res_dict.keys())):
return False
for key1 in ref_dict:
key2 = "_orig_mod." + key1
assert key2 in res_dict, f"{key1} does not exist in traced module"
if not torch.allclose(ref_dict[key1], res_dict[key2], rtol=rtol, atol=atol):
return False
return True
def compare_pred(self, module, traced, input, rtol=1e-3, atol=1e-3):
ref = module(*input)
res = traced(*input)
self.assertEqual(ref, res, rtol=rtol, atol=atol)
def compare_parameters(self, module, traced, rtol=1e-3, atol=1e-3):
ref_params = dict(module.named_parameters())
res_params = dict(traced.named_parameters())
self.assertTrue(self.compare_dict_tensors(ref_params, res_params, rtol, atol))
def compare_gradients(self, module, traced, rtol=1e-3, atol=1e-3):
ref_grad = {key: param.grad for key, param in module.named_parameters()}
res_grad = {key: param.grad for key, param in traced.named_parameters()}
self.assertTrue(
self.compare_dict_tensors(ref_grad, res_grad, rtol=rtol, atol=atol)
)
@requires_gpu()
@unittest.skipIf(not has_fbgemm, "requires fbgemm")
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"group_linear": {"require_fbgemm": True},
},
)
def test_group_linear_fusion(self):
z = 10
for has_bias in [True, False]:
counters.clear()
module = MyModule(z, has_bias).to(GPU_TYPE)
input = [torch.randn(z, z, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(
counters["inductor"]["group_linear"],
2,
)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
self.assertEqual(
counters["inductor"]["group_linear"],
4,
)
counters.clear()
@requires_gpu()
@unittest.skipIf(not has_fbgemm, "requires fbgemm")
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"group_linear": {"require_fbgemm": True},
},
)
def test_group_linear_fusion_different_shapes(self):
counters.clear()
module = MyModule2().eval().to(GPU_TYPE)
input = [torch.rand(4, 24, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(
counters["inductor"]["group_linear"],
1,
)
self.assertEqual(
counters["inductor"]["batch_fusion"],
0,
)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
self.assertEqual(
counters["inductor"]["group_linear"],
2,
)
counters.clear()
@requires_gpu()
@unittest.skipIf(GPU_TYPE == "mps", "welford_reduce is yet not implemented for MPS")
@torch._inductor.config.patch(
pre_grad_fusion_options={"batch_layernorm": {}},
post_grad_fusion_options={},
)
def test_batch_layer_norm_fusion(self):
for has_weight in [True, False]:
for has_bias in [True, False]:
counters.clear()
module = MyModule3(GPU_TYPE, has_weight, has_bias).to(GPU_TYPE)
input = [torch.randn(2, 5, 50, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_layernorm"], 2)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={"batch_linear_lhs": {}},
post_grad_fusion_options={},
)
def test_batch_linear_lhs_fusion(self):
z = 10
for has_bias in [True, False]:
counters.clear()
module = MyModule4(z, GPU_TYPE, has_bias)
input = [torch.randn(20, z, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_linear_lhs"], 2)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={"batch_linear": {}},
post_grad_fusion_options={},
)
def test_batch_linear_pre_grad_fusion(self):
for has_bias in [True, False]:
counters.clear()
module = MyModule5(GPU_TYPE, has_bias)
input = [torch.randn(50, 500, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_linear"], 1)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={
"batch_relu": {},
"batch_sigmoid": {},
},
post_grad_fusion_options={
"batch_aten_add": {},
"batch_aten_mul": {},
"batch_aten_sub": {},
"batch_aten_div": {},
},
)
def test_pointwise_op_fusion(self):
counters.clear()
module = TestPoitwiseOps(GPU_TYPE)
input = [torch.randn(50, 1000, requires_grad=True, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_relu"], 1)
self.assertEqual(counters["inductor"]["batch_sigmoid"], 1)
self.assertEqual(counters["inductor"]["batch_aten_add"], 1)
self.assertEqual(counters["inductor"]["batch_aten_mul"], 1)
self.assertEqual(counters["inductor"]["batch_aten_sub"], 1)
self.assertEqual(counters["inductor"]["batch_aten_div"], 1)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"batch_aten_relu": {},
"batch_aten_sigmoid": {},
"batch_aten_tanh": {},
"unbind_stack_aten_pass": {},
},
)
def test_pointwise_op_fusion_post_grad(self):
counters.clear()
module = TestPoitwiseOpsPostGrad(GPU_TYPE)
input = [torch.randn(50, 1000, requires_grad=True, device=GPU_TYPE)]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_aten_tanh"], 1)
self.assertEqual(counters["inductor"]["batch_aten_relu"], 1)
self.assertEqual(counters["inductor"]["batch_aten_sigmoid"], 1)
self.assertEqual(counters["inductor"]["unbind_stack_aten_pass"], 2)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"batch_linear_post_grad": {
"shape_broadcast_batch_linear": True,
"fuse_nodes_with_same_users": True,
},
"batch_aten_mul": {"fuse_nodes_with_same_parent": False},
"batch_aten_sigmoid": {"fuse_nodes_with_same_parent": True},
"batch_aten_add": {"fuse_nodes_with_same_parent": True},
"normalization_aten_pass": {},
"unbind_stack_aten_pass": {},
},
)
def test_gate_fusion_post_grad(self):
counters.clear()
size = 20
module = TestHighwaySelfGating(d_model=10, size=size, device=GPU_TYPE)
input = [
[
torch.randn(10, 10, requires_grad=True, device=GPU_TYPE)
for i in range(size)
]
]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["batch_linear_post_grad"], 2)
self.assertEqual(counters["inductor"]["batch_aten_sigmoid"], 1)
self.assertEqual(counters["inductor"]["batch_aten_mul"], 1)
self.assertEqual(counters["inductor"]["batch_aten_add"], 2)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 1)
self.assertEqual(counters["inductor"]["unbind_stack_aten_pass"], 5)
ref.sum().backward()
res.sum().backward()
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
self.compare_gradients(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={
"normalization_pass": {},
"batch_detach": {},
"batch_nan_to_num": {},
"batch_clamp": {},
"unbind_stack_pass": {},
"unbind_stack_to_slices_pass": {},
},
post_grad_fusion_options={},
)
def test_math_op_fusion(self):
counters.clear()
module = TestMathOps(GPU_TYPE)
input = [
torch.tensor(
[float("nan"), float("inf"), -float("inf"), 3.14], device=GPU_TYPE
)
]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
self.compare_pred(module, traced, input)
self.assertEqual(counters["inductor"]["normalization_pass"], 3)
self.assertEqual(counters["inductor"]["batch_clamp"], 1)
self.assertEqual(counters["inductor"]["batch_detach"], 1)
self.assertEqual(counters["inductor"]["batch_nan_to_num"], 1)
self.assertEqual(counters["inductor"]["unbind_stack_to_slices_pass"], 2)
self.assertEqual(counters["inductor"]["unbind_stack_pass"], 2)
self.assertTrue(torch.allclose(ref, res))
counters.clear()
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={
"normalization_pass": {},
"batch_dropout": {},
}
)
def test_batch_dropout_pre_grad_fusion(self):
counters.clear()
module = TestDropout(GPU_TYPE)
input = [torch.randn(10, 100, requires_grad=True, device=GPU_TYPE)]
traced = torch.compile(module)
module(*input)
traced(*input)
self.assertEqual(counters["inductor"]["normalization_pass"], 1)
self.assertEqual(counters["inductor"]["batch_dropout"], 1)
counters.clear()
| TestGroupBatchFusion |
python | euske__pdfminer | pdfminer/cmapdb.py | {
"start": 4588,
"end": 4920
} | class ____(UnicodeMap):
def __init__(self, name, module, vertical):
UnicodeMap.__init__(self, CMapName=name)
if vertical:
self.cid2unichr = module.CID2UNICHR_V
self.attrs['WMode'] = 1
else:
self.cid2unichr = module.CID2UNICHR_H
return
## CMapDB
##
| PyUnicodeMap |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_vector/query/sync.py | {
"start": 310,
"end": 453
} | class ____(
Generic[Properties, References],
_NearVectorQueryExecutor[ConnectionSync, Properties, References],
):
pass
| _NearVectorQuery |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_bigquery.py | {
"start": 16482,
"end": 22121
} | class ____:
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook._get_job")
async def test_bigquery_check_trigger_running(self, mock_get_job, caplog, check_trigger):
"""Test that BigQuery Triggers do not fire while a query is still running."""
mock_get_job.return_value = mock.MagicMock(state="running")
task = asyncio.create_task(check_trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
assert "Bigquery job status is running. Sleeping for 4.0 seconds." in caplog.text
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_bigquery_check_trigger_terminated(self, mock_job_status, caplog, check_trigger):
"""Test that BigQuery Triggers fire the correct event in case of an error."""
# Set the status to a value other than success or pending
mock_job_status.return_value = {
"status": "error",
"message": "The conn_id `bq_default` isn't defined",
}
generator = check_trigger.run()
actual = await generator.asend(None)
assert (
TriggerEvent({"status": "error", "message": "The conn_id `bq_default` isn't defined"}) == actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_check_trigger_exception(self, mock_job_status, caplog, check_trigger):
"""Test that BigQuery Triggers fire the correct event in case of an error."""
mock_job_status.side_effect = Exception("Test exception")
generator = check_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "Test exception"}) == actual
def test_check_trigger_serialization(self, check_trigger):
"""Asserts that the BigQueryCheckTrigger correctly serializes its arguments and classpath."""
classpath, kwargs = check_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.bigquery.BigQueryCheckTrigger"
assert kwargs == {
"conn_id": TEST_CONN_ID,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
"job_id": TEST_JOB_ID,
"dataset_id": TEST_DATASET_ID,
"project_id": TEST_GCP_PROJECT_ID,
"table_id": TEST_TABLE_ID,
"location": None,
"poll_interval": POLLING_PERIOD_SECONDS,
"cancel_on_kill": True,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_output")
async def test_check_trigger_success_with_data(self, mock_job_output, mock_job_status, check_trigger):
"""
Test the BigQueryCheckTrigger only fires once the query execution reaches a successful state.
"""
mock_job_status.return_value = {"status": "success", "message": "Job completed"}
mock_job_output.return_value = {
"kind": "bigquery#getQueryResultsResponse",
"etag": "test_etag",
"schema": {"fields": [{"name": "f0_", "type": "INTEGER", "mode": "NULLABLE"}]},
"jobReference": {
"projectId": "test_airflow-providers",
"jobId": "test_jobid",
"location": "US",
},
"totalRows": "1",
"rows": [{"f": [{"v": "22"}]}],
"totalBytesProcessed": "0",
"jobComplete": True,
"cacheHit": False,
}
generator = check_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success", "message": "Job completed", "records": [22]}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_output")
async def test_check_trigger_success_without_data(self, mock_job_output, mock_job_status, check_trigger):
"""
Tests that BigQueryCheckTrigger sends TriggerEvent when no rows are available in the query result.
"""
mock_job_status.return_value = {"status": "success", "message": "Job completed"}
mock_job_output.return_value = {
"kind": "bigquery#getQueryResultsResponse",
"etag": "test_etag",
"schema": {
"fields": [
{"name": "value", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "ds", "type": "DATE", "mode": "NULLABLE"},
]
},
"jobReference": {
"projectId": "test_airflow-airflow-providers",
"jobId": "test_jobid",
"location": "US",
},
"totalRows": "0",
"totalBytesProcessed": "0",
"jobComplete": True,
"cacheHit": False,
}
generator = check_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success", "message": "Job completed", "records": None}) == actual
| TestBigQueryCheckTrigger |
python | pikepdf__pikepdf | src/pikepdf/objects.py | {
"start": 2933,
"end": 5140
} | class ____(Object, metaclass=_NameObjectMeta):
"""Construct a PDF Name object.
Names can be constructed with two notations:
1. ``Name.Resources``
2. ``Name('/Resources')``
The two are semantically equivalent. The former is preferred for names
that are normally expected to be in a PDF. The latter is preferred for
dynamic names and attributes.
"""
object_type = ObjectType.name_
def __new__(cls, name: str | Name) -> Name:
"""Construct a PDF Name."""
# QPDF_Name::unparse ensures that names are always saved in a UTF-8
# compatible way, so we only need to guard the input.
if isinstance(name, bytes):
raise TypeError("Name should be str")
if isinstance(name, Name):
return name # Names are immutable so we can return a reference
return _core._new_name(name)
@classmethod
def random(cls, len_: int = 16, prefix: str = '') -> Name:
"""Generate a cryptographically strong, random, valid PDF Name.
If you are inserting a new name into a PDF (for example,
name for a new image), you can use this function to generate a
cryptographically strong random name that is almost certainly already
not already in the PDF, and not colliding with other existing names.
This function uses Python's secrets.token_urlsafe, which returns a
URL-safe encoded random number of the desired length. An optional
*prefix* may be prepended. (The encoding is ultimately done with
:func:`base64.urlsafe_b64encode`.) Serendipitously, URL-safe is also
PDF-safe.
When the length parameter is 16 (16 random bytes or 128 bits), the result
is probably globally unique and can be treated as never colliding with
other names.
The length of the returned string may vary because it is encoded,
but will always have ``8 * len_`` random bits.
Args:
len_: The length of the random string.
prefix: A prefix to prepend to the random string.
"""
random_string = token_urlsafe(len_)
return _core._new_name(f"/{prefix}{random_string}")
| Name |
python | PyCQA__pylint | tests/functional/u/unexpected_special_method_signature.py | {
"start": 1365,
"end": 1563
} | class ____:
def __enter__(self):
return self
# +1: [unexpected-special-method-signature]
def __exit__(self, exc_type, value, tb, stack, *args):
pass
| ThirdBadContextManager |
python | RaRe-Technologies__gensim | gensim/similarities/termsim.py | {
"start": 15423,
"end": 25806
} | class ____(SaveLoad):
"""
Builds a sparse term similarity matrix using a term similarity index.
Examples
--------
>>> from gensim.test.utils import common_texts as corpus, datapath
>>> from gensim.corpora import Dictionary
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix, WordEmbeddingSimilarityIndex
>>> from gensim.similarities.index import AnnoyIndexer
>>>
>>> model_corpus_file = datapath('lee_background.cor')
>>> model = Word2Vec(corpus_file=model_corpus_file, vector_size=20, min_count=1) # train word-vectors
>>>
>>> dictionary = Dictionary(corpus)
>>> tfidf = TfidfModel(dictionary=dictionary)
>>> words = [word for word, count in dictionary.most_common()]
>>> word_vectors = model.wv.vectors_for_all(words, allow_inference=False) # produce vectors for words in corpus
>>>
>>> indexer = AnnoyIndexer(word_vectors, num_trees=2) # use Annoy for faster word similarity lookups
>>> termsim_index = WordEmbeddingSimilarityIndex(word_vectors, kwargs={'indexer': indexer})
>>> similarity_matrix = SparseTermSimilarityMatrix(termsim_index, dictionary, tfidf) # compute word similarities
>>>
>>> tfidf_corpus = tfidf[[dictionary.doc2bow(document) for document in common_texts]]
>>> docsim_index = SoftCosineSimilarity(tfidf_corpus, similarity_matrix, num_best=10) # index tfidf_corpus
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> sims = docsim_index[dictionary.doc2bow(query)] # find the ten closest documents from tfidf_corpus
Check out `the Gallery <https://radimrehurek.com/gensim/auto_examples/tutorials/run_scm.html>`_
for more examples.
Parameters
----------
source : :class:`~gensim.similarities.termsim.TermSimilarityIndex` or :class:`scipy.sparse.spmatrix`
The source of the term similarity. Either a term similarity index that will be used for
building the term similarity matrix, or an existing sparse term similarity matrix that will
be encapsulated and stored in the matrix attribute. When a matrix is specified as the
source, any other parameters will be ignored.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary` or None, optional
A dictionary that specifies a mapping between terms and the indices of rows and columns
of the resulting term similarity matrix. The dictionary may only be None when source is
a :class:`scipy.sparse.spmatrix`.
tfidf : :class:`gensim.models.tfidfmodel.TfidfModel` or None, optional
A model that specifies the relative importance of the terms in the dictionary. The columns
of the term similarity matrix will be build in a decreasing order of importance of
terms, or in the order of term identifiers if None.
symmetric : bool, optional
Whether the symmetry of the term similarity matrix will be enforced. Symmetry is a necessary
precondition for positive definiteness, which is necessary if you later wish to derive a
unique change-of-basis matrix from the term similarity matrix using Cholesky factorization.
Setting symmetric to False will significantly reduce memory usage during matrix construction.
dominant: bool, optional
Whether the strict column diagonal dominance of the term similarity matrix will be enforced.
Strict diagonal dominance and symmetry are sufficient preconditions for positive
definiteness, which is necessary if you later wish to derive a change-of-basis matrix from
the term similarity matrix using Cholesky factorization.
nonzero_limit : int or None, optional
The maximum number of non-zero elements outside the diagonal in a single column of the
sparse term similarity matrix. If None, then no limit will be imposed.
dtype : numpy.dtype, optional
The data type of the sparse term similarity matrix.
Attributes
----------
matrix : :class:`scipy.sparse.csc_matrix`
The encapsulated sparse term similarity matrix.
Raises
------
ValueError
If `dictionary` is empty.
See Also
--------
:class:`~gensim.similarities.docsim.SoftCosineSimilarity`
A document similarity index using the soft cosine similarity over the term similarity matrix.
:class:`~gensim.similarities.termsim.LevenshteinSimilarityIndex`
A term similarity index that computes Levenshtein similarities between terms.
:class:`~gensim.similarities.termsim.WordEmbeddingSimilarityIndex`
A term similarity index that computes cosine similarities between word embeddings.
"""
def __init__(self, source, dictionary=None, tfidf=None, symmetric=True, dominant=False,
nonzero_limit=100, dtype=np.float32):
if not sparse.issparse(source):
index = source
args = (index, dictionary, tfidf, symmetric, dominant, nonzero_limit, dtype)
source = _create_source(*args)
assert sparse.issparse(source)
self.matrix = source.tocsc()
def inner_product(self, X, Y, normalized=(False, False)):
"""Get the inner product(s) between real vectors / corpora X and Y.
Return the inner product(s) between real vectors / corpora vec1 and vec2 expressed in a
non-orthogonal normalized basis, where the dot product between the basis vectors is given by
the sparse term similarity matrix.
Parameters
----------
vec1 : list of (int, float) or iterable of list of (int, float)
A query vector / corpus in the sparse bag-of-words format.
vec2 : list of (int, float) or iterable of list of (int, float)
A document vector / corpus in the sparse bag-of-words format.
normalized : tuple of {True, False, 'maintain'}, optional
First/second value specifies whether the query/document vectors in the inner product
will be L2-normalized (True; corresponds to the soft cosine measure), maintain their
L2-norm during change of basis ('maintain'; corresponds to query expansion with partial
membership), or kept as-is (False; corresponds to query expansion; default).
Returns
-------
`self.matrix.dtype`, `scipy.sparse.csr_matrix`, or :class:`numpy.matrix`
The inner product(s) between `X` and `Y`.
References
----------
The soft cosine measure was perhaps first described by [sidorovetal14]_.
Further notes on the efficient implementation of the soft cosine measure are described by
[novotny18]_.
.. [sidorovetal14] Grigori Sidorov et al., "Soft Similarity and Soft Cosine Measure: Similarity
of Features in Vector Space Model", 2014, http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/2043/1921.
.. [novotny18] Vít Novotný, "Implementation Notes for the Soft Cosine Measure", 2018,
http://dx.doi.org/10.1145/3269206.3269317.
"""
if not X or not Y:
return self.matrix.dtype.type(0.0)
normalized_X, normalized_Y = normalized
valid_normalized_values = (True, False, 'maintain')
if normalized_X not in valid_normalized_values:
raise ValueError('{} is not a valid value of normalize'.format(normalized_X))
if normalized_Y not in valid_normalized_values:
raise ValueError('{} is not a valid value of normalize'.format(normalized_Y))
is_corpus_X, X = is_corpus(X)
is_corpus_Y, Y = is_corpus(Y)
if not is_corpus_X and not is_corpus_Y:
X = dict(X)
Y = dict(Y)
word_indices = np.array(sorted(set(chain(X, Y))))
dtype = self.matrix.dtype
X = np.array([X[i] if i in X else 0 for i in word_indices], dtype=dtype)
Y = np.array([Y[i] if i in Y else 0 for i in word_indices], dtype=dtype)
matrix = self.matrix[word_indices[:, None], word_indices].todense()
X = _normalize_dense_vector(X, matrix, normalized_X)
Y = _normalize_dense_vector(Y, matrix, normalized_Y)
result = X.T.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result = np.clip(result, -1.0, 1.0)
return result[0, 0]
elif not is_corpus_X or not is_corpus_Y:
if is_corpus_X and not is_corpus_Y:
X, Y = Y, X # make Y the corpus
is_corpus_X, is_corpus_Y = is_corpus_Y, is_corpus_X
normalized_X, normalized_Y = normalized_Y, normalized_X
transposed = True
else:
transposed = False
dtype = self.matrix.dtype
expanded_X = corpus2csc([X], num_terms=self.matrix.shape[0], dtype=dtype).T.dot(self.matrix)
word_indices = np.array(sorted(expanded_X.nonzero()[1]))
del expanded_X
X = dict(X)
X = np.array([X[i] if i in X else 0 for i in word_indices], dtype=dtype)
Y = corpus2csc(Y, num_terms=self.matrix.shape[0], dtype=dtype)[word_indices, :].todense()
matrix = self.matrix[word_indices[:, None], word_indices].todense()
X = _normalize_dense_vector(X, matrix, normalized_X)
Y = _normalize_dense_corpus(Y, matrix, normalized_Y)
result = X.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result = np.clip(result, -1.0, 1.0)
if transposed:
result = result.T
return result
else: # if is_corpus_X and is_corpus_Y:
dtype = self.matrix.dtype
X = corpus2csc(X if is_corpus_X else [X], num_terms=self.matrix.shape[0], dtype=dtype)
Y = corpus2csc(Y if is_corpus_Y else [Y], num_terms=self.matrix.shape[0], dtype=dtype)
matrix = self.matrix
X = _normalize_sparse_corpus(X, matrix, normalized_X)
Y = _normalize_sparse_corpus(Y, matrix, normalized_Y)
result = X.T.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result.data = np.clip(result.data, -1.0, 1.0)
return result
| SparseTermSimilarityMatrix |
python | protocolbuffers__protobuf | python/google/protobuf/internal/python_message.py | {
"start": 57289,
"end": 58000
} | class ____(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
| _OneofListener |
python | walkccc__LeetCode | solutions/1999. Smallest Greater Multiple Made of Two Digits/1999.py | {
"start": 0,
"end": 572
} | class ____:
def findInteger(self, k: int, digit1: int, digit2: int) -> int:
minDigit = min(digit1, digit2)
maxDigit = max(digit1, digit2)
digits = [minDigit] if minDigit == maxDigit else [minDigit, maxDigit]
q = collections.deque()
for digit in digits:
q.append(digit)
while q:
u = q.popleft()
if u > k and u % k == 0:
return u
if u == 0:
continue
for digit in digits:
nextNum = u * 10 + digit
if nextNum > 2**31 - 1:
continue
q.append(nextNum)
return -1
| Solution |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 42132,
"end": 42480
} | class ____(Visitor):
"""Strips off the prefix the parser uses to mark external types.
The prefix needs to be present for AddNamePrefix, and stripped off afterwards.
"""
def VisitNamedType(self, node):
new_name = node.name.removeprefix(parser_constants.EXTERNAL_NAME_PREFIX)
return node.Replace(name=new_name)
| StripExternalNamePrefix |
python | getsentry__sentry | src/sentry/ingest/inbound_filters.py | {
"start": 469,
"end": 2020
} | class ____:
"""
NOTE: This enum also exists in Relay, check if alignment is needed when
editing this.
"""
IP_ADDRESS = "ip-address"
RELEASE_VERSION = "release-version"
ERROR_MESSAGE = "error-message"
BROWSER_EXTENSION = "browser-extensions"
LEGACY_BROWSER = "legacy-browsers"
LOCALHOST = "localhost"
WEB_CRAWLER = "web-crawlers"
INVALID_CSP = "invalid-csp"
CORS = "cors"
DISCARDED_HASH = "discarded-hash" # Not replicated in Relay
CRASH_REPORT_LIMIT = "crash-report-limit" # Not replicated in Relay
HEALTH_CHECK = "filtered-transaction" # Ignore health-check transactions
FILTER_STAT_KEYS_TO_VALUES = {
FilterStatKeys.IP_ADDRESS: TSDBModel.project_total_received_ip_address,
FilterStatKeys.RELEASE_VERSION: TSDBModel.project_total_received_release_version,
FilterStatKeys.ERROR_MESSAGE: TSDBModel.project_total_received_error_message,
FilterStatKeys.BROWSER_EXTENSION: TSDBModel.project_total_received_browser_extensions,
FilterStatKeys.LEGACY_BROWSER: TSDBModel.project_total_received_legacy_browsers,
FilterStatKeys.LOCALHOST: TSDBModel.project_total_received_localhost,
FilterStatKeys.WEB_CRAWLER: TSDBModel.project_total_received_web_crawlers,
FilterStatKeys.INVALID_CSP: TSDBModel.project_total_received_invalid_csp,
FilterStatKeys.CORS: TSDBModel.project_total_received_cors,
FilterStatKeys.DISCARDED_HASH: TSDBModel.project_total_received_discarded,
FilterStatKeys.HEALTH_CHECK: TSDBModel.project_total_healthcheck,
}
| FilterStatKeys |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias13.py | {
"start": 635,
"end": 861
} | class ____: ...
DT = TypeVar("DT", bound=D)
Error = CoMaybeMethod[DT, [F, E], Any]
reveal_type(
Error,
expected_text="type[(DT@Error, F, E) -> Coroutine[Any, Any, Any]] | type[(F, E) -> Coroutine[Any, Any, Any]]",
)
| F |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 12051,
"end": 12774
} | class ____(_DeprecationTestCase):
message = "__array_wrap__.*"
def test_deprecated(self):
class Test1:
def __array__(self, dtype=None, copy=None):
return np.arange(4)
def __array_wrap__(self, arr, context=None):
self.called = True
return 'pass context'
class Test2(Test1):
def __array_wrap__(self, arr):
self.called = True
return 'pass'
test1 = Test1()
test2 = Test2()
self.assert_deprecated(lambda: np.negative(test1))
assert test1.called
self.assert_deprecated(lambda: np.negative(test2))
assert test2.called
| TestDeprecatedArrayWrap |
python | matplotlib__matplotlib | lib/matplotlib/quiver.py | {
"start": 18744,
"end": 35729
} | class ____(mcollections.PolyCollection):
"""
Specialized PolyCollection for arrows.
Use set_UVC to change the size, orientation, and color of the
arrows; their locations can be set using set_offsets().
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
_PIVOT_VALS = ('tail', 'middle', 'tip')
@_docstring.Substitution(_quiver_doc)
def __init__(self, ax, *args,
scale=None, headwidth=3, headlength=5, headaxislength=4.5,
minshaft=1, minlength=1, units='width', scale_units=None,
angles='uv', width=None, color='k', pivot='tail', **kwargs):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pyplot interface documentation:
%s
"""
self._axes = ax # The attr actually set by the Artist.axes property.
X, Y, U, V, C = _parse_args(*args, caller_name='quiver')
self.X = X
self.Y = Y
self.XY = np.column_stack((X, Y))
self.N = len(X)
self.scale = scale
self.headwidth = headwidth
self.headlength = float(headlength)
self.headaxislength = headaxislength
self.minshaft = minshaft
self.minlength = minlength
self.units = units
self.scale_units = scale_units
self.angles = angles
self.width = width
if pivot.lower() == 'mid':
pivot = 'middle'
self.pivot = pivot.lower()
_api.check_in_list(self._PIVOT_VALS, pivot=self.pivot)
self.transform = kwargs.pop('transform', ax.transData)
kwargs.setdefault('facecolors', color)
kwargs.setdefault('linewidths', (0,))
super().__init__([], offsets=self.XY, offset_transform=self.transform,
closed=False, **kwargs)
self.polykw = kwargs
self.set_UVC(U, V, C)
self._dpi_at_last_init = None
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # self._dpi_at_last_init != self.axes.figure.dpi
trans = self._set_transform()
self.span = trans.inverted().transform_bbox(self.axes.bbox).width
if self.width is None:
sn = np.clip(math.sqrt(self.N), 8, 25)
self.width = 0.06 * self.span / sn
# _make_verts sets self.scale if not already specified
if (self._dpi_at_last_init != self.axes.get_figure(root=True).dpi
and self.scale is None):
self._make_verts(self.XY, self.U, self.V, self.angles)
self._dpi_at_last_init = self.axes.get_figure(root=True).dpi
def get_datalim(self, transData):
trans = self.get_transform()
offset_trf = self.get_offset_transform()
full_transform = (trans - transData) + (offset_trf - transData)
XY = full_transform.transform(self.XY)
bbox = transforms.Bbox.null()
bbox.update_from_data_xy(XY, ignore=True)
return bbox
@martist.allow_rasterization
def draw(self, renderer):
self._init()
verts = self._make_verts(self.XY, self.U, self.V, self.angles)
self.set_verts(verts, closed=False)
super().draw(renderer)
self.stale = False
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference
# to an array that might change before draw().
U = ma.masked_invalid(U, copy=True).ravel()
V = ma.masked_invalid(V, copy=True).ravel()
if C is not None:
C = ma.masked_invalid(C, copy=True).ravel()
for name, var in zip(('U', 'V', 'C'), (U, V, C)):
if not (var is None or var.size == self.N or var.size == 1):
raise ValueError(f'Argument {name} has a size {var.size}'
f' which does not match {self.N},'
' the number of arrow positions')
mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
if C is not None:
mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
if mask is ma.nomask:
C = C.filled()
else:
C = ma.array(C, mask=mask, copy=False)
self.U = U.filled(1)
self.V = V.filled(1)
self.Umask = mask
if C is not None:
self.set_array(C)
self.stale = True
def _dots_per_unit(self, units):
"""Return a scale factor for converting from units to pixels."""
bb = self.axes.bbox
vl = self.axes.viewLim
return _api.check_getitem({
'x': bb.width / vl.width,
'y': bb.height / vl.height,
'xy': np.hypot(*bb.size) / np.hypot(*vl.size),
'width': bb.width,
'height': bb.height,
'dots': 1.,
'inches': self.axes.get_figure(root=True).dpi,
}, units=units)
def _set_transform(self):
"""
Set the PolyCollection transform to go
from arrow width units to pixels.
"""
dx = self._dots_per_unit(self.units)
self._trans_scale = dx # pixels per arrow width unit
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
# Calculate angles and lengths for segment between (x, y), (x+u, y+v)
def _angles_lengths(self, XY, U, V, eps=1):
xy = self.axes.transData.transform(XY)
uv = np.column_stack((U, V))
xyp = self.axes.transData.transform(XY + eps * uv)
dxy = xyp - xy
angles = np.arctan2(dxy[:, 1], dxy[:, 0])
lengths = np.hypot(*dxy.T) / eps
return angles, lengths
# XY is stacked [X, Y].
# See quiver() doc for meaning of X, Y, U, V, angles.
def _make_verts(self, XY, U, V, angles):
uv = (U + V * 1j)
str_angles = angles if isinstance(angles, str) else ''
if str_angles == 'xy' and self.scale_units == 'xy':
# Here eps is 1 so that if we get U, V by diffing
# the X, Y arrays, the vectors will connect the
# points, regardless of the axis scaling (including log).
angles, lengths = self._angles_lengths(XY, U, V, eps=1)
elif str_angles == 'xy' or self.scale_units == 'xy':
# Calculate eps based on the extents of the plot
# so that we don't end up with roundoff error from
# adding a small number to a large.
eps = np.abs(self.axes.dataLim.extents).max() * 0.001
angles, lengths = self._angles_lengths(XY, U, V, eps=eps)
if str_angles and self.scale_units == 'xy':
a = lengths
else:
a = np.abs(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
if self.Umask is not ma.nomask:
amean = a[~self.Umask].mean()
else:
amean = a.mean()
# crude auto-scaling
# scale is typical arrow length as a multiple of the arrow width
scale = 1.8 * amean * sn / self.span
if self.scale_units is None:
if self.scale is None:
self.scale = scale
widthu_per_lenu = 1.0
else:
if self.scale_units == 'xy':
dx = 1
else:
dx = self._dots_per_unit(self.scale_units)
widthu_per_lenu = dx / self._trans_scale
if self.scale is None:
self.scale = scale * widthu_per_lenu
length = a * (widthu_per_lenu / (self.scale * self.width))
X, Y = self._h_arrows(length)
if str_angles == 'xy':
theta = angles
elif str_angles == 'uv':
theta = np.angle(uv)
else:
theta = ma.masked_invalid(np.deg2rad(angles)).filled(0)
theta = theta.reshape((-1, 1)) # for broadcasting
xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
XY = np.stack((xy.real, xy.imag), axis=2)
if self.Umask is not ma.nomask:
XY = ma.array(XY)
XY[self.Umask] = ma.masked
# This might be handled more efficiently with nans, given
# that nans will end up in the paths anyway.
return XY
def _h_arrows(self, length):
"""Length is in arrow width units."""
# It might be possible to streamline the code
# and speed it up a bit by using complex (x, y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# This number is chosen based on when pixel values overflow in Agg
# causing rendering errors
# length = np.minimum(length, 2 ** 16)
np.clip(length, 0, 2 ** 16, out=length)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0],
np.float64)
x = x + np.array([0, 1, 1, 1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis, :], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh - self.headaxislength,
minsh - self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0, 1, 2, 3, 2, 1, 0, 0]
X = x[:, ii]
Y = y[:, ii]
Y[:, 3:-1] *= -1
X0 = x0[ii]
Y0 = y0[ii]
Y0[3:-1] *= -1
shrink = length / minsh if minsh != 0. else 0.
X0 = shrink * X0[np.newaxis, :]
Y0 = shrink * Y0[np.newaxis, :]
short = np.repeat(length < minsh, 8, axis=1)
# Now select X0, Y0 if short, otherwise X, Y
np.copyto(X, X0, where=short)
np.copyto(Y, Y0, where=short)
if self.pivot == 'middle':
X -= 0.5 * X[:, 3, np.newaxis]
elif self.pivot == 'tip':
# numpy bug? using -= does not work here unless we multiply by a
# float first, as with 'mid'.
X = X - X[:, 3, np.newaxis]
elif self.pivot != 'tail':
_api.check_in_list(["middle", "tip", "tail"], pivot=self.pivot)
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = np.repeat(tooshort, 8, 1)
np.copyto(X, X1, where=tooshort)
np.copyto(Y, Y1, where=tooshort)
# Mask handling is deferred to the caller, _make_verts.
return X, Y
_barbs_doc = r"""
Plot a 2D field of wind barbs.
Call signature::
barbs([X, Y], U, V, [C], /, **kwargs)
Where *X*, *Y* define the barb locations, *U*, *V* define the barb
directions, and *C* optionally sets the color.
The arguments *X*, *Y*, *U*, *V*, *C* are positional-only and may be
1D or 2D. *U*, *V*, *C* may be masked arrays, but masked *X*, *Y*
are not supported at present.
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \
: / \ \
: / \ \ \
: / \ \ \
: ------------------------------
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
See also https://en.wikipedia.org/wiki/Wind_barb.
Parameters
----------
X, Y : 1D or 2D array-like, optional
The x and y coordinates of the barb locations. See *pivot* for how the
barbs are drawn to the x, y positions.
If not given, they will be generated as a uniform integer meshgrid based
on the dimensions of *U* and *V*.
If *X* and *Y* are 1D but *U*, *V* are 2D, *X*, *Y* are expanded to 2D
using ``X, Y = np.meshgrid(X, Y)``. In this case ``len(X)`` and ``len(Y)``
must match the column and row dimensions of *U* and *V*.
U, V : 1D or 2D array-like
The x and y components of the barb shaft.
C : 1D or 2D array-like, optional
Numeric data that defines the barb colors by colormapping via *norm* and
*cmap*.
This does not support explicit colors. If you want to set colors directly,
use *barbcolor* instead.
length : float, default: 7
Length of the barb in points; the other parts of the barb
are scaled against this.
pivot : {'tip', 'middle'} or float, default: 'tip'
The part of the arrow that is anchored to the *X*, *Y* grid. The barb
rotates about this point. This can also be a number, which shifts the
start of the barb that many points away from grid point.
barbcolor : :mpltype:`color` or color sequence
The color of all parts of the barb except for the flags. This parameter
is analogous to the *edgecolor* parameter for polygons, which can be used
instead. However this parameter will override facecolor.
flagcolor : :mpltype:`color` or color sequence
The color of any flags on the barb. This parameter is analogous to the
*facecolor* parameter for polygons, which can be used instead. However,
this parameter will override facecolor. If this is not set (and *C* has
not either) then *flagcolor* will be set to match *barbcolor* so that the
barb has a uniform color. If *C* has been set, *flagcolor* has no effect.
sizes : dict, optional
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
fill_empty : bool, default: False
Whether the empty barbs (circles) that are drawn should be filled with
the flag color. If they are not filled, the center is transparent.
rounding : bool, default: True
Whether the vector magnitude should be rounded when allocating barb
components. If True, the magnitude is rounded to the nearest multiple
of the half-barb increment. If False, the magnitude is simply truncated
to the next lowest multiple.
barb_increments : dict, optional
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
flip_barb : bool or array-like of bool, default: False
Whether the lines and flags should point opposite to normal.
Normal behavior is for the barbs and lines to point right (comes from wind
barbs having these features point towards low pressure in the Northern
Hemisphere).
A single value is applied to all barbs. Individual barbs can be flipped by
passing a bool array of the same size as *U* and *V*.
Returns
-------
barbs : `~matplotlib.quiver.Barbs`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
The barbs can further be customized using `.PolyCollection` keyword
arguments:
%(PolyCollection:kwdoc)s
""" % _docstring.interpd.params
_docstring.interpd.register(barbs_doc=_barbs_doc)
| Quiver |
python | weaviate__weaviate-python-client | weaviate/collections/grpc/shared.py | {
"start": 29548,
"end": 31028
} | class ____:
@staticmethod
def single(byte_vector: bytes) -> List[float]:
return _ByteOps.decode_float32s(byte_vector)
@staticmethod
def multi(byte_vector: bytes) -> List[List[float]]:
dim_bytes = byte_vector[:2]
dim = int(struct.unpack("<H", dim_bytes)[0])
byte_vector = byte_vector[2:]
how_many = len(byte_vector) // (dim * UINT32_LEN)
return [
_ByteOps.decode_float32s(byte_vector[i * dim * UINT32_LEN : (i + 1) * dim * UINT32_LEN])
for i in range(how_many)
]
def _is_1d_vector(inputs: Any) -> TypeGuard[OneDimensionalVectorType]:
try:
if len(inputs) == 0:
return False
except TypeError:
return False
if __is_list_type(inputs):
return not __is_list_type(inputs[0])
return False
def _is_2d_vector(inputs: Any) -> TypeGuard[TwoDimensionalVectorType]:
try:
if len(inputs) == 0:
return False
except TypeError:
return False
if __is_list_type(inputs):
return _is_1d_vector(inputs[0])
return False
def __is_list_type(inputs: Any) -> bool:
try:
if len(inputs) == 0:
return False
except TypeError:
return False
return any(
_is_valid(types, inputs)
for types in [
List,
_ExtraTypes.TF,
_ExtraTypes.PANDAS,
_ExtraTypes.NUMPY,
_ExtraTypes.POLARS,
]
)
| _Unpack |
python | getsentry__sentry | tests/sentry/integrations/github/tasks/test_pr_comment.py | {
"start": 8994,
"end": 12986
} | class ____(GithubCommentTestCase, SnubaTestCase):
def test_simple(self) -> None:
group1 = [
self.store_event(
{"fingerprint": ["group-1"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{"fingerprint": ["group-2"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{"fingerprint": ["group-3"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group2, group3, group1]
def test_over_5_issues(self) -> None:
issue_ids = [
self.store_event(
{"fingerprint": [f"group-{idx}"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
).group.id
for idx in range(6)
]
res = self.pr_comment_workflow.get_top_5_issues_by_count(issue_ids, self.project)
assert len(res) == 5
def test_ignore_info_level_issues(self) -> None:
group1 = [
self.store_event(
{
"fingerprint": ["group-1"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{"fingerprint": ["group-2"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{
"fingerprint": ["group-3"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group2]
def test_do_not_ignore_other_issues(self) -> None:
group1 = [
self.store_event(
{
"fingerprint": ["group-1"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.ERROR,
},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{
"fingerprint": ["group-2"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{
"fingerprint": ["group-3"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.DEBUG,
},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group3, group1]
| TestTop5IssuesByCount |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/operators/test_analytics_admin.py | {
"start": 6381,
"end": 7884
} | class ____:
@mock.patch(f"{ANALYTICS_PATH}.GoogleAnalyticsAdminHook")
@mock.patch(f"{ANALYTICS_PATH}.DataStream.to_dict")
def test_execute(self, data_stream_to_dict_mock, hook_mock):
data_stream_returned = mock.MagicMock()
hook_mock.return_value.create_data_stream.return_value = data_stream_returned
data_stream_serialized = mock.MagicMock()
data_stream_to_dict_mock.return_value = data_stream_serialized
mock_parent, mock_data_stream, mock_retry, mock_timeout, mock_metadata = (
mock.MagicMock() for _ in range(5)
)
data_stream_created = GoogleAnalyticsAdminCreateDataStreamOperator(
task_id="test_task",
property_id=TEST_PROPERTY_ID,
data_stream=mock_data_stream,
retry=mock_retry,
timeout=mock_timeout,
metadata=mock_metadata,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
).execute(context=None)
hook_mock.assert_called_once()
hook_mock.return_value.create_data_stream.assert_called_once_with(
property_id=TEST_PROPERTY_ID,
data_stream=mock_data_stream,
retry=mock_retry,
timeout=mock_timeout,
metadata=mock_metadata,
)
data_stream_to_dict_mock.assert_called_once_with(data_stream_returned)
assert data_stream_created == data_stream_serialized
| TestGoogleAnalyticsAdminCreateDataStreamOperator |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 96645,
"end": 98315
} | class ____(TestCase):
@cached_property
def alert_rule(self):
return self.create_alert_rule()
def test(self) -> None:
trigger = create_alert_rule_trigger(self.alert_rule, "hello", 1000)
label = "uh oh"
alert_threshold = 2000
trigger = update_alert_rule_trigger(trigger, label=label, alert_threshold=alert_threshold)
assert trigger.label == label
assert trigger.alert_threshold == alert_threshold
def test_name_used(self) -> None:
label = "uh oh"
create_alert_rule_trigger(self.alert_rule, label, 1000)
trigger = create_alert_rule_trigger(self.alert_rule, "something else", 1000)
with pytest.raises(AlertRuleTriggerLabelAlreadyUsedError):
update_alert_rule_trigger(trigger, label=label)
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_invalid_threshold_dynamic_alert(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
rule = self.create_alert_rule(
time_window=15,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
trigger = create_alert_rule_trigger(rule, "yay", 0)
with pytest.raises(ValidationError):
update_alert_rule_trigger(trigger, alert_threshold=10)
| UpdateAlertRuleTriggerTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_betterem.py | {
"start": 53,
"end": 6463
} | class ____(util.MdCase):
"""Test escaping cases for BetterEm without smart enabled."""
extension = [
'pymdownx.betterem'
]
extension_configs = {
"pymdownx.betterem": {
"smart_enable": "none"
}
}
def test_complex_multple_emphasis_type(self):
"""Test complex case where `**text*text***` may be detected on accident."""
self.check_markdown(
'traced ***along*** bla **blocked** if other ***or***',
'<p>traced <strong><em>along</em></strong> bla <strong>blocked</strong> if other <strong><em>or</em></strong></p>' # noqa: E501
)
def test_complex_multple_emphasis_type_variant2(self):
"""Test another complex case where `**text*text***` may be detected on accident."""
self.check_markdown(
'on the **1-4 row** of the AP Combat Table ***and*** receive',
'<p>on the <strong>1-4 row</strong> of the AP Combat Table <strong><em>and</em></strong> receive</p>'
)
def test_complex_multple_underscore_type(self):
"""Test complex case where `__text_text___` may be detected on accident."""
self.check_markdown(
'traced ___along___ bla __blocked__ if other ___or___',
'<p>traced <strong><em>along</em></strong> bla <strong>blocked</strong> if other <strong><em>or</em></strong></p>' # noqa: E501
)
def test_complex_multple_underscore_type_variant2(self):
"""Test another complex case where `__text_text___` may be detected on accident."""
self.check_markdown(
'on the __1-4 row__ of the AP Combat Table ___and___ receive',
'<p>on the <strong>1-4 row</strong> of the AP Combat Table <strong><em>and</em></strong> receive</p>'
)
def test_complex_cases_star(self):
"""Test some complex cases for asterisks."""
self.check_markdown(
'''
***I'm italic and bold* I am just bold.**
***I'm bold and italic!** I am just italic.*
*italic and **italic bold*** and *italic*
**bold and *italic bold*** and *italic*
***I'm italic and bold* I am just bold.** *italic*
***I'm bold and italic!** I am just italic.* *italic*
*italic and **italic bold*** and italic*
**bold and *italic bold*** and bold*
*italic and **italic bold***
**bold and *italic bold***
*italic **italic bold** italic*
***italic and bold* bold**: foo bar **italic**
***italic and bold** italic* foo bar **italic**
*italic and **italic bold*** **italic**
**bold and *italic bold*** **italic**
''',
'''
<p><strong><em>I'm italic and bold</em> I am just bold.</strong></p>
<p><em><strong>I'm bold and italic!</strong> I am just italic.</em></p>
<p><em>italic and <strong>italic bold</strong></em> and <em>italic</em></p>
<p><strong>bold and <em>italic bold</em></strong> and <em>italic</em></p>
<p><strong><em>I'm italic and bold</em> I am just bold.</strong> <em>italic</em></p>
<p><em><strong>I'm bold and italic!</strong> I am just italic.</em> <em>italic</em></p>
<p><em>italic and <strong>italic bold</strong></em> and italic*</p>
<p><strong>bold and <em>italic bold</em></strong> and bold*</p>
<p><em>italic and <strong>italic bold</strong></em></p>
<p><strong>bold and <em>italic bold</em></strong></p>
<p><em>italic <strong>italic bold</strong> italic</em></p>
<p><strong><em>italic and bold</em> bold</strong>: foo bar <strong>italic</strong></p>
<p><em><strong>italic and bold</strong> italic</em> foo bar <strong>italic</strong></p>
<p><em>italic and <strong>italic bold</strong></em> <strong>italic</strong></p>
<p><strong>bold and <em>italic bold</em></strong> <strong>italic</strong></p>
''',
True
)
def test_complex_cases_underscore(self):
"""Test some complex cases for underscore."""
self.check_markdown(
'''
___I'm italic and bold_ I am just bold.__
___I'm bold and italic!__ I am just italic._
_italic and __italic bold___ and _italic_
__bold and _italic bold___ and _italic_
___I'm italic and bold_ I am just bold.__ _italic_
___I'm bold and italic!__ I am just italic._ _italic_
_italic and __italic bold___ and italic_
__bold and _italic bold___ and bold_
_italic and __italic bold___
__bold and _italic bold___
_italic __italic bold__ italic_
___italic and bold_ bold__: foo bar __italic__
___italic and bold__ italic_ foo bar __italic__
_italic and __italic bold___ __italic__
__bold and _italic bold___ __italic__
''',
'''
<p><strong><em>I'm italic and bold</em> I am just bold.</strong></p>
<p><em><strong>I'm bold and italic!</strong> I am just italic.</em></p>
<p><em>italic and <strong>italic bold</strong></em> and <em>italic</em></p>
<p><strong>bold and <em>italic bold</em></strong> and <em>italic</em></p>
<p><strong><em>I'm italic and bold</em> I am just bold.</strong> <em>italic</em></p>
<p><em><strong>I'm bold and italic!</strong> I am just italic.</em> <em>italic</em></p>
<p><em>italic and <strong>italic bold</strong></em> and italic_</p>
<p><strong>bold and <em>italic bold</em></strong> and bold_</p>
<p><em>italic and <strong>italic bold</strong></em></p>
<p><strong>bold and <em>italic bold</em></strong></p>
<p><em>italic <strong>italic bold</strong> italic</em></p>
<p><strong><em>italic and bold</em> bold</strong>: foo bar <strong>italic</strong></p>
<p><em><strong>italic and bold</strong> italic</em> foo bar <strong>italic</strong></p>
<p><em>italic and <strong>italic bold</strong></em> <strong>italic</strong></p>
<p><strong>bold and <em>italic bold</em></strong> <strong>italic</strong></p>
''',
True
)
| TestBetterNoSmart |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_palette_group.py | {
"start": 3052,
"end": 3853
} | class ____(Directive):
has_content = False
required_arguments = 1
def run(self):
node = bokeh_palette_group()
node["group"] = self.arguments[0]
return [node]
def setup(app):
""" Required Sphinx extension setup function. """
app.add_node(bokeh_palette_group, html=bokeh_palette_group.html)
app.add_directive("bokeh-palette-group", BokehPaletteGroupDirective)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| BokehPaletteGroupDirective |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess3.py | {
"start": 945,
"end": 1002
} | class ____(Generic[_TParent]):
member1: _TParent
| Parent |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 51821,
"end": 52325
} | class ____(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
| ExtendedReadTestChunked |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 5998,
"end": 6171
} | class ____(Node):
"""
Create a union node. Any of the children need to be satisfied in order for
this node to evaluate as true
"""
JOINSTR = "|"
| UnionNode |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_note.py | {
"start": 466,
"end": 5304
} | class ____(SlackActivityNotificationTest, PerformanceIssueTestCase):
def create_notification(self, group):
return NoteActivityNotification(
Activity(
project=self.project,
group=group,
user_id=self.user.id,
type=ActivityType.NOTE,
data={"text": "text", "mentions": []},
)
)
def test_note_block(self) -> None:
"""
Tests that a Slack message is sent with the expected payload when a comment is made on an issue
with block kit enabled.
"""
notification = self.create_notification(self.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == f"New comment by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
comment = notification.activity.data["text"]
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=note_activity-slack¬ification_uuid={notification_uuid}|*{self.group.title}*> \n{comment}"
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
def test_note_performance_issue_block(self) -> None:
"""
Tests that a Slack message is sent with the expected payload when a comment is made on a performance issue
with block kit enabled.
"""
event = self.create_performance_issue()
assert event.group is not None
notification = self.create_notification(event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == f"New comment by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
comment = notification.activity.data["text"]
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/?referrer=note_activity-slack¬ification_uuid={notification_uuid}|*N+1 Query*> \n{comment}"
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | production | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_note_generic_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a comment is made on a generic issue type
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
notification = self.create_notification(group_event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == f"New comment by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
comment = notification.activity.data["text"]
assert event.group
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/?referrer=note_activity-slack¬ification_uuid={notification_uuid}|*{TEST_ISSUE_OCCURRENCE.issue_title}*> \n{comment}"
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
| SlackNoteNotificationTest |
python | donnemartin__system-design-primer | solutions/object_oriented_design/hash_table/hash_map.py | {
"start": 109,
"end": 1079
} | class ____(object):
def __init__(self, size):
self.size = size
self.table = [[] for _ in range(self.size)]
def _hash_function(self, key):
return key % self.size
def set(self, key, value):
hash_index = self._hash_function(key)
for item in self.table[hash_index]:
if item.key == key:
item.value = value
return
self.table[hash_index].append(Item(key, value))
def get(self, key):
hash_index = self._hash_function(key)
for item in self.table[hash_index]:
if item.key == key:
return item.value
raise KeyError('Key not found')
def remove(self, key):
hash_index = self._hash_function(key)
for index, item in enumerate(self.table[hash_index]):
if item.key == key:
del self.table[hash_index][index]
return
raise KeyError('Key not found')
| HashTable |
python | pytorch__pytorch | test/onnx/test_pytorch_onnx_onnxruntime_cuda.py | {
"start": 727,
"end": 4921
} | class ____(onnx_test_common._TestONNXRuntime):
@skipIfUnsupportedMinOpsetVersion(9)
@skipIfNoCuda
def test_gelu_fp16(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x)
x = torch.randn(
2,
4,
5,
6,
requires_grad=True,
dtype=torch.float16,
device=torch.device("cuda"),
)
self.run_test(GeluModel(), x, rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(9)
@skipIfNoCuda
@skipScriptTest()
def test_layer_norm_fp16(self):
class LayerNormModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer_norm = torch.nn.LayerNorm([10, 10])
@autocast()
def forward(self, x):
return self.layer_norm(x)
x = torch.randn(
20,
5,
10,
10,
requires_grad=True,
dtype=torch.float16,
device=torch.device("cuda"),
)
self.run_test(LayerNormModel().cuda(), x, rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
@skipIfNoCuda
@skipScriptTest()
def test_softmaxCrossEntropy_fusion_fp16(self):
class FusionModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.m = torch.nn.LogSoftmax(dim=1)
@autocast()
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, dtype=torch.float16, device=torch.device("cuda"))
target = torch.empty(N, dtype=torch.long, device=torch.device("cuda")).random_(
0, C
)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(FusionModel(), (input, target))
@skipIfNoCuda
@skipScriptTest()
def test_apex_o2(self):
class LinearModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 5)
def forward(self, x):
return self.linear(x)
try:
from apex import amp
except Exception as e:
raise unittest.SkipTest("Apex is not available") from e
input = torch.randn(3, 3, device=torch.device("cuda"))
model = amp.initialize(LinearModel(), opt_level="O2")
self.run_test(model, input)
# ONNX supports bfloat16 for opsets >= 13
# Add, Sub and Mul ops don't support bfloat16 cpu in onnxruntime.
@skipIfUnsupportedMinOpsetVersion(13)
@skipIfNoBFloat16Cuda
def test_arithmetic_bfp16(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4, dtype=torch.bfloat16, device=torch.device("cuda"))
x = x.type_as(y)
return torch.mul(torch.add(x, y), torch.sub(x, y)).to(
dtype=torch.float16
)
x = torch.ones(
3, 4, requires_grad=True, dtype=torch.float16, device=torch.device("cuda")
)
self.run_test(MyModule(), x, rtol=1e-3, atol=1e-5)
@skipIfNoCuda
def test_deduplicate_initializers_diff_devices(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.nn.Parameter(
torch.ones(2, 3, device=torch.device("cpu"))
)
self.b = torch.nn.Parameter(torch.ones(3, device=torch.device("cuda")))
def forward(self, x, y):
return torch.matmul(self.w, x), y + self.b
x = torch.randn(3, 3, device=torch.device("cpu"))
y = torch.randn(3, 3, device=torch.device("cuda"))
self.run_test(Model(), (x, y))
if __name__ == "__main__":
common_utils.run_tests()
| TestONNXRuntime_cuda |
python | openai__openai-python | src/openai/types/responses/response_function_shell_tool_call.py | {
"start": 522,
"end": 1242
} | class ____(BaseModel):
id: str
"""The unique ID of the function shell tool call.
Populated when this item is returned via API.
"""
action: Action
"""The shell commands and limits that describe how to run the tool call."""
call_id: str
"""The unique ID of the function shell tool call generated by the model."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the shell call.
One of `in_progress`, `completed`, or `incomplete`.
"""
type: Literal["shell_call"]
"""The type of the item. Always `shell_call`."""
created_by: Optional[str] = None
"""The ID of the entity that created this tool call."""
| ResponseFunctionShellToolCall |
python | sympy__sympy | sympy/polys/agca/homomorphisms.py | {
"start": 16521,
"end": 17749
} | class ____(MatrixHomomorphism):
"""
Concrete class for homomorphisms with domain a free module or a quotient
thereof.
Do not instantiate; the constructor does not check that your data is well
defined. Use the ``homomorphism`` function instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> homomorphism(F, F, [[1, 0], [0, 1]])
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2
[0, 1]])
"""
def _apply(self, elem):
if isinstance(self.domain, QuotientModule):
elem = elem.data
return sum(x * e for x, e in zip(elem, self.matrix))
def _image(self):
return self.codomain.submodule(*self.matrix)
def _kernel(self):
# The domain is either a free module or a quotient thereof.
# It does not matter if it is a quotient, because that won't increase
# the kernel.
# Our generators {e_i} are sent to the matrix entries {b_i}.
# The kernel is essentially the syzygy module of these {b_i}.
syz = self.image().syzygy_module()
return self.domain.submodule(*syz.gens)
| FreeModuleHomomorphism |
python | keras-team__keras | keras/src/layers/core/masking.py | {
"start": 255,
"end": 2856
} | class ____(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.0))
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
# `mask_value` can be a serialized tensor, hence verify it
if isinstance(mask_value, dict) and mask_value.get("config", None):
mask_value = deserialize_keras_object(mask_value)
self.mask_value = mask_value
self.supports_masking = True
self._build_at_init()
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
| Masking |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_system_message.py | {
"start": 37638,
"end": 39986
} | class ____:
"""Test edge cases and error handling for system messages."""
@pytest.mark.parametrize(
"content,expected_blocks,expected_prompt",
[
("", 0, ""),
(
[
{"type": "text", "text": "Block 1"},
{"type": "text", "text": "Block 2"},
{"type": "text", "text": "Block 3"},
],
3,
None,
),
],
ids=["empty_content", "multiple_blocks"],
)
def test_system_message_content_variations(
self, content, expected_blocks, expected_prompt
) -> None:
"""Test SystemMessage with various content variations."""
system_message = SystemMessage(content=content)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=system_message,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=cast("AgentState", {"messages": []}), # type: ignore[name-defined]
runtime=_fake_runtime(),
)
if isinstance(content, list):
assert isinstance(request.system_message.content_blocks, list)
assert len(request.system_message.content_blocks) == expected_blocks
else:
assert len(request.system_message.content_blocks) == expected_blocks
assert request.system_prompt == expected_prompt
def test_reset_system_prompt_to_none(self) -> None:
"""Test resetting system prompt to None."""
base_message = SystemMessage(content="Original prompt")
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=base_message,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=cast("AgentState", {"messages": []}), # type: ignore[name-defined]
runtime=_fake_runtime(),
)
new_request = request.override(system_message=None)
assert new_request.system_message is None
assert new_request.system_prompt is None
| TestEdgeCasesAndErrorHandling |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 54512,
"end": 58024
} | class ____(LossFunctionWrapper):
"""Computes Circle Loss between integer labels and L2-normalized embeddings.
This is a metric learning loss designed to minimize within-class distance
and maximize between-class distance in a flexible manner by dynamically
adjusting the penalty strength based on optimization status of each
similarity score.
To use Circle Loss effectively, the model should output embeddings without
an activation function (such as a `Dense` layer with `activation=None`)
followed by UnitNormalization layer to ensure unit-norm embeddings.
Args:
gamma: Scaling factor that determines the largest scale of each
similarity score. Defaults to `80`.
margin: The relaxation factor, below this distance, negatives are
up weighted and positives are down weighted. Similarly, above this
distance negatives are down weighted and positive are up weighted.
Defaults to `0.4`.
remove_diagonal: Boolean, whether to remove self-similarities from the
positive mask. Defaults to `True`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Examples:
Usage with the `compile()` API:
```python
model = models.Sequential([
keras.layers.Input(shape=(224, 224, 3)),
keras.layers.Conv2D(16, (3, 3), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(64, activation=None), # No activation
keras.layers.UnitNormalization() # L2 normalization
])
model.compile(optimizer="adam", loss=keras.losses.Circle())
```
Reference:
- [Yifan Sun et al., 2020](https://arxiv.org/abs/2002.10857)
"""
def __init__(
self,
gamma=80.0,
margin=0.4,
remove_diagonal=True,
reduction="sum_over_batch_size",
name="circle",
dtype=None,
):
super().__init__(
circle,
name=name,
reduction=reduction,
dtype=dtype,
gamma=gamma,
margin=margin,
remove_diagonal=remove_diagonal,
)
self.gamma = gamma
self.margin = margin
self.remove_diagonal = remove_diagonal
def get_config(self):
config = Loss.get_config(self)
config.update(
{
"gamma": self.gamma,
"margin": self.margin,
"remove_diagonal": self.remove_diagonal,
}
)
return config
@keras_export("keras.losses.CategoricalGeneralizedCrossEntropy")
| Circle |
python | streamlit__streamlit | lib/streamlit/elements/code.py | {
"start": 1076,
"end": 6028
} | class ____:
@gather_metrics("code")
def code(
self,
body: SupportsStr,
language: str | None = "python",
*,
line_numbers: bool = False,
wrap_lines: bool = False,
height: Height | None = "content",
width: Width = "stretch",
) -> DeltaGenerator:
"""Display a code block with optional syntax highlighting.
Parameters
----------
body : str
The string to display as code or monospace text.
language : str or None
The language that the code is written in, for syntax highlighting.
This defaults to ``"python"``. If this is ``None``, the code will
be plain, monospace text.
For a list of available ``language`` values, see
`react-syntax-highlighter
<https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_PRISM.MD>`_
on GitHub.
line_numbers : bool
An optional boolean indicating whether to show line numbers to the
left of the code block. This defaults to ``False``.
wrap_lines : bool
An optional boolean indicating whether to wrap lines. This defaults
to ``False``.
height : "content", "stretch", or int
The height of the code block element. This can be one of the following:
- ``"content"`` (default): The height of the element matches the
height of its content.
- ``"stretch"``: The height of the element matches the height of
its content or the height of the parent container, whichever is
larger. If the element is not in a parent container, the height
of the element matches the height of its content.
- An integer specifying the height in pixels: The element has a
fixed height. If the content is larger than the specified
height, scrolling is enabled.
.. note::
Use scrolling containers sparingly. If you use scrolling
containers, avoid heights that exceed 500 pixels. Otherwise,
the scroll surface of the container might cover the majority of
the screen on mobile devices, which makes it hard to scroll the
rest of the app.
width : "stretch", "content", or int
The width of the code block element. This can be one of the following:
- ``"stretch"`` (default): The width of the element matches the
width of the parent container.
- ``"content"``: The width of the element matches the width of its
content, but doesn't exceed the width of the parent container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
Examples
--------
>>> import streamlit as st
>>>
>>> code = '''def hello():
... print("Hello, Streamlit!")'''
>>> st.code(code, language="python")
.. output ::
https://doc-code.streamlit.app/
height: 220px
>>> import streamlit as st
>>> code = '''Is it a crown or boat?
... ii
... iiiiii
... WWw .iiiiiiii. ...:
... WWWWWWw .iiiiiiiiiiii. ........
... WWWWWWWWWWw iiiiiiiiiiiiiiii ...........
... WWWWWWWWWWWWWWwiiiiiiiiiiiiiiiii............
... WWWWWWWWWWWWWWWWWWwiiiiiiiiiiiiii.........
... WWWWWWWWWWWWWWWWWWWWWWwiiiiiiiiii.......
... WWWWWWWWWWWWWWWWWWWWWWWWWWwiiiiiii....
... WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWwiiii.
... -MMMWWWWWWWWWWWWWWWWWWWWWWMMM-
... '''
>>> st.code(code, language=None)
.. output ::
https://doc-code-ascii.streamlit.app/
height: 380px
"""
code_proto = CodeProto()
code_proto.code_text = re.sub(r"\n\Z", "", re.sub(r"\A\n", "", str(body)))
code_proto.language = language or "plaintext"
code_proto.show_line_numbers = line_numbers
code_proto.wrap_lines = wrap_lines
if height is None:
height = "content"
else:
validate_height(height, allow_content=True)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(height=height, width=width)
return self.dg._enqueue("code", code_proto, layout_config=layout_config)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| CodeMixin |
python | huggingface__transformers | src/transformers/pipelines/token_classification.py | {
"start": 1704,
"end": 4778
} | class ____(ExplicitEnum):
"""All the valid aggregation strategies for TokenClassificationPipeline"""
NONE = "none"
SIMPLE = "simple"
FIRST = "first"
AVERAGE = "average"
MAX = "max"
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True),
r"""
ignore_labels (`list[str]`, defaults to `["O"]`):
A list of labels to ignore.
grouped_entities (`bool`, *optional*, defaults to `False`):
DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
same entity together in the predictions or not.
stride (`int`, *optional*):
If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size
model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The
value of this argument defines the number of overlapping tokens between chunks. In other words, the model
will shift forward by `tokenizer.model_max_length - stride` tokens each step.
aggregation_strategy (`str`, *optional*, defaults to `"none"`):
The strategy to fuse (or not) tokens based on the model prediction.
- "none" : Will simply not do any aggregation and simply return raw results from the model
- "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
"entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
different entities. On word based languages, we might end up splitting words undesirably : Imagine
Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
"NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
that support that meaning, which is basically tokens separated by a space). These mitigations will
only work on real words, "New york" might still be tagged with two different entities.
- "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
end up with different tags. Words will simply use the tag of the first token of the word when there
is ambiguity.
- "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
label is applied.
- "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
end up with different tags. Word entity will simply be the token with the maximum score.""",
)
| AggregationStrategy |
python | crytic__slither | slither/tools/upgradeability/checks/variable_initialization.py | {
"start": 195,
"end": 1484
} | class ____(AbstractCheck):
ARGUMENT = "variables-initialized"
IMPACT = CheckClassification.HIGH
HELP = "State variables with an initial value"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#state-variable-initialized"
WIKI_TITLE = "State variable initialized"
# region wiki_description
WIKI_DESCRIPTION = """
Detect state variables that are initialized.
"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
uint variable = 10;
}
```
Using `Contract` will the delegatecall proxy pattern will lead `variable` to be 0 when called through the proxy.
"""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Using initialize functions to write initial values in state variables.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
def _check(self) -> List[Output]:
results = []
for s in self.contract.storage_variables_ordered:
if s.initialized:
info: CHECK_INFO = [s, " is a state variable with an initial value.\n"]
json = self.generate_result(info)
results.append(json)
return results
| VariableWithInit |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 4478,
"end": 4653
} | class ____(ParentD):
def f(self):
def x():
super = 1
super # Python injects __class__ into scope
builtins.super(ChildD8, self).f()
| ChildD8 |
python | huggingface__transformers | tests/models/pop2piano/test_feature_extraction_pop2piano.py | {
"start": 2528,
"end": 10879
} | class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = Pop2PianoFeatureExtractor if requirements_available else None
def setUp(self):
self.feat_extract_tester = Pop2PianoFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.use_mel
mel_2 = feat_extract_second.use_mel
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.use_mel
mel_2 = feat_extract_second.use_mel
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_call(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_input = np.zeros([1000000], dtype=np.float32)
input_features = feature_extractor(speech_input, sampling_rate=16_000, return_tensors="np")
self.assertTrue(input_features.input_features.ndim == 3)
self.assertEqual(input_features.input_features.shape[-1], 512)
self.assertTrue(input_features.beatsteps.ndim == 2)
self.assertTrue(input_features.extrapolated_beatstep.ndim == 2)
def test_integration(self):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id").select([0])["audio"]
input_speech = [x["array"] for x in speech_samples][0]
sampling_rate = [x["sampling_rate"] for x in speech_samples][0]
feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano")
input_features = feature_extractor(
input_speech, sampling_rate=sampling_rate, return_tensors="pt"
).input_features
EXPECTED_INPUT_FEATURES = torch.tensor(
[[-7.1493, -6.8701, -4.3214], [-5.9473, -5.7548, -3.8438], [-6.1324, -5.9018, -4.3778]]
)
torch.testing.assert_close(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
def test_attention_mask(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_input1 = np.zeros([1_000_000], dtype=np.float32)
speech_input2 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32)
input_features = feature_extractor(
[speech_input1, speech_input2],
sampling_rate=[44_100, 16_000],
return_tensors="np",
return_attention_mask=True,
)
self.assertTrue(hasattr(input_features, "attention_mask"))
# check shapes
self.assertTrue(input_features["attention_mask"].ndim == 2)
self.assertEqual(input_features["attention_mask_beatsteps"].shape[0], 2)
self.assertEqual(input_features["attention_mask_extrapolated_beatstep"].shape[0], 2)
# check if they are any values except 0 and 1
self.assertTrue(np.max(input_features["attention_mask"]) == 1)
self.assertTrue(np.max(input_features["attention_mask_beatsteps"]) == 1)
self.assertTrue(np.max(input_features["attention_mask_extrapolated_beatstep"]) == 1)
self.assertTrue(np.min(input_features["attention_mask"]) == 0)
self.assertTrue(np.min(input_features["attention_mask_beatsteps"]) == 0)
self.assertTrue(np.min(input_features["attention_mask_extrapolated_beatstep"]) == 0)
def test_batch_feature(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_input1 = np.zeros([1_000_000], dtype=np.float32)
speech_input2 = np.ones([2_000_000], dtype=np.float32)
speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32)
input_features = feature_extractor(
[speech_input1, speech_input2, speech_input3],
sampling_rate=[44_100, 16_000, 48_000],
return_attention_mask=True,
)
self.assertEqual(len(input_features["input_features"].shape), 3)
# check shape
self.assertEqual(input_features["beatsteps"].shape[0], 3)
self.assertEqual(input_features["extrapolated_beatstep"].shape[0], 3)
def test_batch_feature_np(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_input1 = np.zeros([1_000_000], dtype=np.float32)
speech_input2 = np.ones([2_000_000], dtype=np.float32)
speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32)
input_features = feature_extractor(
[speech_input1, speech_input2, speech_input3],
sampling_rate=[44_100, 16_000, 48_000],
return_tensors="np",
return_attention_mask=True,
)
# check np array or not
self.assertEqual(type(input_features["input_features"]), np.ndarray)
# check shape
self.assertEqual(len(input_features["input_features"].shape), 3)
def test_batch_feature_pt(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_input1 = np.zeros([1_000_000], dtype=np.float32)
speech_input2 = np.ones([2_000_000], dtype=np.float32)
speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32)
input_features = feature_extractor(
[speech_input1, speech_input2, speech_input3],
sampling_rate=[44_100, 16_000, 48_000],
return_tensors="pt",
return_attention_mask=True,
)
# check pt tensor or not
self.assertEqual(type(input_features["input_features"]), torch.Tensor)
# check shape
self.assertEqual(len(input_features["input_features"].shape), 3)
@unittest.skip(
"Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)"
)
def test_padding_accepts_tensors_pt(self):
pass
@unittest.skip(
"Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)"
)
def test_padding_accepts_tensors_tf(self):
pass
@unittest.skip(
"Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)"
)
def test_padding_from_list(self):
pass
@unittest.skip(
"Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)"
)
def test_padding_from_array(self):
pass
@unittest.skip(reason="Pop2PianoFeatureExtractor does not support truncation")
def test_attention_mask_with_truncation(self):
pass
@unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation")
def test_truncation_from_array(self):
pass
@unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation")
def test_truncation_from_list(self):
pass
| Pop2PianoFeatureExtractionTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/flickr/provider.py | {
"start": 910,
"end": 2077
} | class ____(OAuthProvider):
id = "flickr"
name = "Flickr"
account_class = FlickrAccount
oauth_adapter_class = FlickrOAuthAdapter
def get_default_scope(self):
scope = []
return scope
def get_auth_params_from_request(self, request, action):
ret = super().get_auth_params_from_request(request, action)
if "perms" not in ret:
ret["perms"] = "read"
return ret
def get_profile_fields(self):
default_fields = [
"id",
"first-name",
"last-name",
"email-address",
"picture-url",
"public-profile-url",
]
fields = self.get_settings().get("PROFILE_FIELDS", default_fields)
return fields
def extract_uid(self, data):
return data["person"]["nsid"]
def extract_common_fields(self, data):
person = data.get("person", {})
name = person.get("realname", {}).get("_content")
username = person.get("username", {}).get("_content")
return dict(email=data.get("email-address"), name=name, username=username)
provider_classes = [FlickrProvider]
| FlickrProvider |
python | google__jax | tests/sparsify_test.py | {
"start": 23283,
"end": 23617
} | class ____(SparsifyTest):
@classmethod
def sparsify(cls, f):
return sparsify(f, use_tracer=True)
def testTracerIsInstanceCheck(self):
@self.sparsify
def f(x):
self.assertIsInstance(x, SparseTracer)
f(jnp.arange(5))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| SparsifyTracerTest |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_barcode.py | {
"start": 905,
"end": 1968
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_barcode"
condition_value_keys = ("barcode_type",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, barcode_type, **kwargs):
return column.apply(lambda x: is_valid_barcode(x, barcode_type))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidBarcode |
python | pypa__pipenv | pipenv/vendor/plette/models/base.py | {
"start": 2972,
"end": 3506
} | class ____(DataModelCollection):
"""A mapping of data views.
The keys are primitive values, while values are instances of `item_class`.
"""
@classmethod
def validate(cls, data):
for d in data.values():
cls.item_class.validate(d)
def __iter__(self):
return iter(self._data)
def keys(self):
return self._data.keys()
def values(self):
return [self[k] for k in self._data]
def items(self):
return [(k, self[k]) for k in self._data]
| DataModelMapping |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 4601,
"end": 4753
} | class ____(WeaviateBaseError):
"""Scope was not provided with client credential flow."""
MissingScopeException = MissingScopeError
| MissingScopeError |
python | django__django | tests/gis_tests/layermap/models.py | {
"start": 1951,
"end": 3060
} | class ____(models.Model):
uuid = models.UUIDField(primary_key=True, editable=False)
geom = models.PolygonField(srid=4326)
datetime = models.DateTimeField()
integer = models.IntegerField()
num = models.FloatField()
boolean = models.BooleanField()
name = models.CharField(max_length=20)
# Mapping dictionaries for the models above.
co_mapping = {
"name": "Name",
# ForeignKey's use another mapping dictionary for the _related_ Model
# (State in this case).
"state": {"name": "State"},
"mpoly": "MULTIPOLYGON", # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {
"name": "Name",
"poly": "POLYGON",
}
city_mapping = {
"name": "Name",
"population": "Population",
"density": "Density",
"dt": "Created",
"point": "POINT",
}
inter_mapping = {
"name": "Name",
"length": "Length",
"path": "LINESTRING",
}
has_nulls_mapping = {
"geom": "POLYGON",
"uuid": "uuid",
"datetime": "datetime",
"name": "name",
"integer": "integer",
"num": "num",
"boolean": "boolean",
}
| DoesNotAllowNulls |
python | django__django | django/test/testcases.py | {
"start": 62478,
"end": 62796
} | class ____(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
| _StaticFilesHandler |
python | openai__openai-python | src/openai/types/create_embedding_response.py | {
"start": 265,
"end": 446
} | class ____(BaseModel):
prompt_tokens: int
"""The number of tokens used by the prompt."""
total_tokens: int
"""The total number of tokens used by the request."""
| Usage |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/fixtures/sql.py | {
"start": 6458,
"end": 6866
} | class ____:
@util.memoized_property
def _event_fns(self):
return set()
def event_listen(self, target, name, fn, **kw):
self._event_fns.add((target, name, fn))
event.listen(target, name, fn, **kw)
@config.fixture(autouse=True, scope="function")
def _remove_events(self):
yield
for key in self._event_fns:
event.remove(*key)
| RemovesEvents |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/list.py | {
"start": 6400,
"end": 14613
} | class ____(str, Enum):
ASSET = "asset"
ASSET_CHECK = "asset_check"
JOB = "job"
RESOURCE = "resource"
SCHEDULE = "schedule"
SENSOR = "sensor"
DEFAULT_COLUMNS = [
DefsColumn.KEY,
DefsColumn.GROUP,
DefsColumn.DEPS,
DefsColumn.KINDS,
DefsColumn.DESCRIPTION,
DefsColumn.CRON,
]
def _supports_column(column: DefsColumn, defs_type: DefsType) -> bool:
if column == DefsColumn.KEY:
return True
elif column == DefsColumn.GROUP:
return defs_type in (DefsType.ASSET,)
elif column == DefsColumn.DEPS:
return defs_type in (DefsType.ASSET, DefsType.ASSET_CHECK)
elif column == DefsColumn.KINDS:
return defs_type in (DefsType.ASSET,)
elif column == DefsColumn.DESCRIPTION:
return defs_type in (DefsType.ASSET, DefsType.ASSET_CHECK, DefsType.JOB)
elif column == DefsColumn.TAGS:
return defs_type in (DefsType.ASSET,)
elif column == DefsColumn.CRON:
return defs_type in (DefsType.SCHEDULE,)
elif column == DefsColumn.IS_EXECUTABLE:
return defs_type in (DefsType.ASSET,)
else:
raise ValueError(f"Invalid column: {column}")
def _get_asset_value(column: DefsColumn, asset: DgAssetMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return asset.key
elif column == DefsColumn.GROUP:
return asset.group
elif column == DefsColumn.DEPS:
return "\n".join(asset.deps)
elif column == DefsColumn.KINDS:
return "\n".join(asset.kinds)
elif column == DefsColumn.DESCRIPTION:
return asset.description
elif column == DefsColumn.TAGS:
return "\n".join(asset.tags)
elif column == DefsColumn.IS_EXECUTABLE:
return str(asset.is_executable)
else:
raise ValueError(f"Invalid column: {column}")
def _get_asset_check_value(column: DefsColumn, asset_check: DgAssetCheckMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return asset_check.key
elif column == DefsColumn.DEPS:
return "\n".join(asset_check.additional_deps)
elif column == DefsColumn.DESCRIPTION:
return asset_check.description
else:
raise ValueError(f"Invalid column: {column}")
def _get_job_value(column: DefsColumn, job: DgJobMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return job.name
elif column == DefsColumn.DESCRIPTION:
return job.description
else:
raise ValueError(f"Invalid column: {column}")
def _get_resource_value(column: DefsColumn, resource: DgResourceMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return resource.name
else:
raise ValueError(f"Invalid column: {column}")
def _get_schedule_value(column: DefsColumn, schedule: DgScheduleMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return schedule.name
elif column == DefsColumn.CRON:
return schedule.cron_schedule
else:
raise ValueError(f"Invalid column: {column}")
def _get_sensor_value(column: DefsColumn, sensor: DgSensorMetadata) -> Optional[str]:
if column == DefsColumn.KEY:
return sensor.name
else:
raise ValueError(f"Invalid column: {column}")
GET_VALUE_BY_DEFS_TYPE = {
DefsType.ASSET: _get_asset_value,
DefsType.ASSET_CHECK: _get_asset_check_value,
DefsType.JOB: _get_job_value,
DefsType.RESOURCE: _get_resource_value,
DefsType.SCHEDULE: _get_schedule_value,
DefsType.SENSOR: _get_sensor_value,
}
def _get_value(column: DefsColumn, defs_type: DefsType, defn: Any) -> Optional[Text]:
raw_value = GET_VALUE_BY_DEFS_TYPE[defs_type](column, defn)
value = Text(raw_value) if raw_value else None
if value and column in _TRUNCATED_COLUMN_WIDTHS:
value.truncate(max_width=_TRUNCATED_COLUMN_WIDTHS[column], overflow="ellipsis")
return value
def _get_table(columns: Sequence[DefsColumn], defs_type: DefsType, defs: Sequence[Any]) -> "Table":
columns_to_display = [column for column in columns if _supports_column(column, defs_type)]
table = DagsterInnerTable(
[column.value.replace("_", " ").capitalize() for column in columns_to_display]
)
table.columns[-1].max_width = 100
for column_type, table_column in zip(columns_to_display, table.columns):
if column_type in _TRUNCATED_COLUMN_WIDTHS:
table_column.max_width = _TRUNCATED_COLUMN_WIDTHS[column_type]
for defn in sorted(defs, key=lambda x: str(_get_value(DefsColumn.KEY, defs_type, x))):
table.add_row(
*(_get_value(column, defs_type, defn) for column in columns_to_display),
)
return table
@list_group.command(name="defs", aliases=["def"], cls=DgClickCommand)
@click.option(
"--json",
"output_json",
is_flag=True,
default=False,
help="Output as JSON instead of a table.",
)
@click.option(
"--path",
"-p",
type=click.Path(
resolve_path=True,
path_type=Path,
),
help="Path to the definitions to list.",
)
@click.option(
"--assets",
"-a",
help="Asset selection to list.",
)
@click.option(
"--columns",
"-c",
multiple=True,
help="Columns to display. Either a comma-separated list of column names, or multiple "
"invocations of the flag. Available columns: "
+ ", ".join(column.value for column in DefsColumn),
)
@dg_global_options
@dg_path_options
@cli_telemetry_wrapper
def list_defs_command(
output_json: bool,
target_path: Path,
path: Optional[Path],
assets: Optional[str],
columns: Optional[Sequence[str]],
**global_options: object,
) -> None:
"""List registered Dagster definitions in the current project environment."""
from rich.console import Console
from rich.table import Table
cli_config = normalize_cli_config(global_options, click.get_current_context())
dg_context = DgContext.for_project_environment(target_path, cli_config)
from dagster.components.list import list_definitions
if columns:
if len(columns) == 1 and "," in columns[0]:
columns = columns[0].split(",")
defs_columns = [_defs_column_from_str(column.lower()) for column in columns]
if DefsColumn.KEY not in defs_columns:
defs_columns = [DefsColumn.KEY] + defs_columns
else:
defs_columns = DEFAULT_COLUMNS
# capture stdout during the definitions load so it doesn't pollute the structured output
with capture_stdout(), disable_dagster_warnings():
definitions = list_definitions(
dg_context=dg_context,
path=path,
asset_selection=assets,
)
# JSON
if output_json: # pass it straight through
if columns:
raise click.UsageError("Cannot use --columns with --json")
click.echo(json.dumps(definitions.to_dict(), indent=4))
# TABLE
else:
if definitions.is_empty:
click.echo("No definitions are defined for this project.")
return
console = Console()
table = Table(border_style="dim")
table.add_column("Section", style="bold")
table.add_column("Definitions")
if definitions.assets:
table.add_row("Assets", _get_table(defs_columns, DefsType.ASSET, definitions.assets))
if definitions.asset_checks:
table.add_row(
"Asset Checks",
_get_table(defs_columns, DefsType.ASSET_CHECK, definitions.asset_checks),
)
if definitions.jobs:
table.add_row("Jobs", _get_table(defs_columns, DefsType.JOB, definitions.jobs))
if definitions.schedules:
table.add_row(
"Schedules", _get_table(defs_columns, DefsType.SCHEDULE, definitions.schedules)
)
if definitions.sensors:
table.add_row("Sensors", _get_table(defs_columns, DefsType.SENSOR, definitions.sensors))
if definitions.resources:
table.add_row(
"Resources", _get_table(defs_columns, DefsType.RESOURCE, definitions.resources)
)
console.print(table)
# ########################
# ##### ENVIRONMENT
# ########################
@dataclass
| DefsType |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_training.py | {
"start": 43566,
"end": 47183
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
def init_global_mesh(self) -> DeviceMesh:
return init_device_mesh(
device_type.type,
(2, 2),
mesh_dim_names=("dp_replicate", "tp"),
)
@skip_if_lt_x_gpu(8)
def test_replicate_tp(self):
global_mesh = self.init_global_mesh()
self.run_subtests(
{
"use_activation_checkpointing": [False, True],
"mlp_dim": [3, 5, 16, 17],
"foreach": [False],
},
functools.partial(self._test_replicate_tp, global_mesh),
)
def _test_replicate_tp(
self,
global_mesh: DeviceMesh,
use_activation_checkpointing: bool,
mlp_dim: int,
foreach: bool,
):
dp_mesh, tp_mesh = global_mesh["dp_replicate"], global_mesh["tp"]
dp_pg = dp_mesh._flatten().get_group() # used for `replicate()`
torch.manual_seed(42)
model = MLPStack(mlp_dim)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2, foreach=foreach)
parallelize_plan = {
# Pass `use_local_output=False` to keep as DTensor to preserve
# uneven activation dims
"0.in_proj": ColwiseParallel(use_local_output=False),
"0.out_proj": RowwiseParallel(use_local_output=False),
"1.in_proj": ColwiseParallel(use_local_output=False),
"1.out_proj": RowwiseParallel(use_local_output=False),
"2.in_proj": ColwiseParallel(use_local_output=False),
"2.out_proj": (RowwiseParallel()),
}
model = parallelize_module(model, tp_mesh, parallelize_plan)
for module in model:
if isinstance(module, nn.LayerNorm):
continue
if use_activation_checkpointing:
checkpoint(module)
replicate(module, mesh=dp_mesh)
replicate(model, mesh=dp_mesh)
# Checking parameters match orig model is critical to validate .full_tensor correctly replicates the
# strided-sharded layers.
for ref_p, p in zip(ref_model.parameters(), model.parameters()):
self.assertIsInstance(p, DTensor)
self.assertEqual(ref_p, p.full_tensor())
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=foreach)
torch.manual_seed(42 + dp_pg.rank() + 1)
device = device_type
for iter_idx in range(10):
inp = torch.randn((8, mlp_dim), device=device)
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(inp).sum())
losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
for _optim in (ref_optim, optim):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
_optim.step()
self.assertEqual(losses[0], losses[1])
check_sharded_parity(self, ref_model, model)
for _, p in model.named_parameters():
self.assertIsInstance(p, DTensor)
self.assertEqual(p.device_mesh.ndim, 2)
self.assertEqual(len(p.placements), 2)
self.assertEqual(p.device_mesh.mesh_dim_names, ("dp_replicate", "tp"))
if __name__ == "__main__":
run_tests()
| TestReplicateTPTraining |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 13053,
"end": 13825
} | class ____(ReflectedConstraint):
"""Dictionary representing the reflected elements corresponding to
:class:`.ForeignKeyConstraint`.
The :class:`.ReflectedForeignKeyConstraint` structure is returned by
the :meth:`.Inspector.get_foreign_keys` method.
"""
constrained_columns: List[str]
"""local column names which comprise the foreign key"""
referred_schema: Optional[str]
"""schema name of the table being referred"""
referred_table: str
"""name of the table being referred"""
referred_columns: List[str]
"""referred column names that correspond to ``constrained_columns``"""
options: NotRequired[Dict[str, Any]]
"""Additional options detected for this foreign key constraint"""
| ReflectedForeignKeyConstraint |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 565930,
"end": 566635
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cost", "limit", "node_count", "remaining", "reset_at", "used")
cost = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="cost")
limit = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="limit")
node_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="nodeCount")
remaining = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="remaining")
reset_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="resetAt")
used = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="used")
| RateLimit |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 61585,
"end": 62073
} | class ____(TestCase):
"""
Tests for the DataAndGroups class. Currently, this is just to pass codecov.
"""
def test_repr(self) -> None:
condition = DataAndGroups(data=TEST_RULE_SLOW_CONDITION, group_ids={1, 2}, rule_id=1)
assert (
repr(condition)
== "<DataAndGroups data: {'id': 'sentry.rules.conditions.event_frequency.EventFrequencyCondition', 'value': 1, 'interval': '1h'} group_ids: {1, 2} rule_id: 1>"
)
| DataAndGroupsTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_button03.py | {
"start": 315,
"end": 847
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("button03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button("C2", {})
worksheet.insert_button("E5", {})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/db/backends/mysql/client.py | {
"start": 79,
"end": 2988
} | class ____(BaseDatabaseClient):
executable_name = "mysql"
@classmethod
def settings_to_cmd_args_env(cls, settings_dict, parameters):
args = [cls.executable_name]
env = None
database = settings_dict["OPTIONS"].get(
"database",
settings_dict["OPTIONS"].get("db", settings_dict["NAME"]),
)
user = settings_dict["OPTIONS"].get("user", settings_dict["USER"])
password = settings_dict["OPTIONS"].get(
"password",
settings_dict["OPTIONS"].get("passwd", settings_dict["PASSWORD"]),
)
host = settings_dict["OPTIONS"].get("host", settings_dict["HOST"])
port = settings_dict["OPTIONS"].get("port", settings_dict["PORT"])
server_ca = settings_dict["OPTIONS"].get("ssl", {}).get("ca")
client_cert = settings_dict["OPTIONS"].get("ssl", {}).get("cert")
client_key = settings_dict["OPTIONS"].get("ssl", {}).get("key")
defaults_file = settings_dict["OPTIONS"].get("read_default_file")
charset = settings_dict["OPTIONS"].get("charset")
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if password:
# The MYSQL_PWD environment variable usage is discouraged per
# MySQL's documentation due to the possibility of exposure through
# `ps` on old Unix flavors but --password suffers from the same
# flaw on even more systems. Usage of an environment variable also
# prevents password exposure if the subprocess.run(check=True) call
# raises a CalledProcessError since the string representation of
# the latter includes all of the provided `args`.
env = {"MYSQL_PWD": password}
if host:
if "/" in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if server_ca:
args += ["--ssl-ca=%s" % server_ca]
if client_cert:
args += ["--ssl-cert=%s" % client_cert]
if client_key:
args += ["--ssl-key=%s" % client_key]
if charset:
args += ["--default-character-set=%s" % charset]
if database:
args += [database]
args.extend(parameters)
return args, env
def runshell(self, parameters):
sigint_handler = signal.getsignal(signal.SIGINT)
try:
# Allow SIGINT to pass to mysql to abort queries.
signal.signal(signal.SIGINT, signal.SIG_IGN)
super().runshell(parameters)
finally:
# Restore the original SIGINT handler.
signal.signal(signal.SIGINT, sigint_handler)
| DatabaseClient |
python | PrefectHQ__prefect | tests/server/services/test_scheduler.py | {
"start": 22110,
"end": 23781
} | class ____:
@pytest.mark.parametrize(
"interval,n",
[
# schedule until we at least exceed an hour
(datetime.timedelta(minutes=1), 61),
# schedule at least 3 runs
(datetime.timedelta(hours=1), 3),
# schedule until we at least exceed an hour
(datetime.timedelta(minutes=5), 13),
# schedule until at most 100 days
(datetime.timedelta(days=60), 2),
],
)
async def test_create_schedule_respects_max_future_time(
self,
flow: schemas.core.Flow,
session: AsyncSession,
interval: datetime.timedelta,
n: int,
):
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="test",
flow_id=flow.id,
schedules=[
schemas.core.DeploymentSchedule(
schedule=schemas.schedules.IntervalSchedule(
interval=interval,
anchor_date=datetime.datetime.now(timezone.utc)
+ datetime.timedelta(seconds=1),
),
active=True,
)
],
),
)
await session.commit()
# assert clean slate
assert (await models.flow_runs.count_flow_runs(session)) == 0
# run scheduler
service = Scheduler()
await service.start(loops=1)
runs = await models.flow_runs.read_flow_runs(session)
assert len(runs) == n
| TestScheduleRulesWaterfall |
python | rushter__MLAlgorithms | mla/knn.py | {
"start": 2079,
"end": 2281
} | class ____(KNNBase):
"""Nearest neighbors regressor."""
def aggregate(self, neighbors_targets):
"""Return the mean of all targets."""
return np.mean(neighbors_targets)
| KNNRegressor |
python | pennersr__django-allauth | allauth/headless/base/response.py | {
"start": 5114,
"end": 5297
} | class ____(BaseAuthenticationResponse):
def __init__(self, request, status=HTTPStatus.UNAUTHORIZED):
super().__init__(request, user=None, status=status)
| UnauthorizedResponse |
python | EpistasisLab__tpot | tpot/builtin_modules/estimatortransformer.py | {
"start": 1802,
"end": 7528
} | class ____(TransformerMixin, BaseEstimator ):
def __init__(self, estimator, method='auto', passthrough=False, cross_val_predict_cv=None):
"""
A class for using a sklearn estimator as a transformer. When calling fit_transform, this class returns the out put of cross_val_predict
and trains the estimator on the full dataset. When calling transform, this class uses the estimator fit on the full dataset to transform the data.
Parameters
----------
estimator : sklear.base. BaseEstimator
The estimator to use as a transformer.
method : str, default='auto'
The method to use for the transformation. If 'auto', will try to use predict_proba, decision_function, or predict in that order.
- predict_proba: use the predict_proba method of the estimator.
- decision_function: use the decision_function method of the estimator.
- predict: use the predict method of the estimator.
passthrough : bool, default=False
Whether to pass the original input through.
cross_val_predict_cv : int, default=0
Number of folds to use for the cross_val_predict function for inner classifiers and regressors. Estimators will still be fit on the full dataset, but the following node will get the outputs from cross_val_predict.
- 0-1 : When set to 0 or 1, the cross_val_predict function will not be used. The next layer will get the outputs from fitting and transforming the full dataset.
- >=2 : When fitting pipelines with inner classifiers or regressors, they will still be fit on the full dataset.
However, the output to the next node will come from cross_val_predict with the specified number of folds.
"""
self.estimator = estimator
self.method = method
self.passthrough = passthrough
self.cross_val_predict_cv = cross_val_predict_cv
def fit(self, X, y=None):
self.estimator.fit(X, y)
return self
def transform(self, X, y=None):
#Does not do cross val predict, just uses the estimator to transform the data. This is used for the actual transformation in practice, so the real transformation without fitting is needed
if self.method == 'auto':
if hasattr(self.estimator, 'predict_proba'):
method = 'predict_proba'
elif hasattr(self.estimator, 'decision_function'):
method = 'decision_function'
elif hasattr(self.estimator, 'predict'):
method = 'predict'
else:
raise ValueError('Estimator has no valid method')
else:
method = self.method
output = getattr(self.estimator, method)(X)
output=np.array(output)
if len(output.shape) == 1:
output = output.reshape(-1,1)
if self.passthrough:
return np.hstack((output, X))
else:
return output
def fit_transform(self, X, y=None):
#Does use cross_val_predict if cross_val_predict_cv is greater than 0. this function is only used in training the model.
self.estimator.fit(X,y)
if self.method == 'auto':
if hasattr(self.estimator, 'predict_proba'):
method = 'predict_proba'
elif hasattr(self.estimator, 'decision_function'):
method = 'decision_function'
elif hasattr(self.estimator, 'predict'):
method = 'predict'
else:
raise ValueError('Estimator has no valid method')
else:
method = self.method
if self.cross_val_predict_cv is not None:
output = cross_val_predict(self.estimator, X, y=y, cv=self.cross_val_predict_cv)
else:
output = getattr(self.estimator, method)(X)
#reshape if needed
if len(output.shape) == 1:
output = output.reshape(-1,1)
output=np.array(output)
if self.passthrough:
return np.hstack((output, X))
else:
return output
def _estimator_has(attr):
'''Check if we can delegate a method to the underlying estimator.
First, we check the first fitted final estimator if available, otherwise we
check the unfitted final estimator.
'''
return lambda self: (self.estimator is not None and
hasattr(self.estimator, attr)
)
@available_if(_estimator_has('predict'))
def predict(self, X, **predict_params):
check_is_fitted(self.estimator)
#X = check_array(X)
preds = self.estimator.predict(X,**predict_params)
return preds
@available_if(_estimator_has('predict_proba'))
def predict_proba(self, X, **predict_params):
check_is_fitted(self.estimator)
#X = check_array(X)
return self.estimator.predict_proba(X,**predict_params)
@available_if(_estimator_has('decision_function'))
def decision_function(self, X, **predict_params):
check_is_fitted(self.estimator)
#X = check_array(X)
return self.estimator.decision_function(X,**predict_params)
def __sklearn_is_fitted__(self):
"""
Check fitted status and return a Boolean value.
"""
return check_is_fitted(self.estimator)
# @property
# def _estimator_type(self):
# return self.estimator._estimator_type
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
return self.estimator._classes | EstimatorTransformer |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 9317,
"end": 10203
} | class ____(BinaryPredicate):
"""The isinstance() function."""
_NAME = "isinstance"
def _call_predicate(self, node, left, right):
return node, self._is_instance(left.data, right.data)
def _is_instance(self, obj, class_spec):
"""Check if the object matches a class specification.
Args:
obj: A BaseValue, generally the left hand side of an isinstance() call.
class_spec: A BaseValue, generally the right hand side of an isinstance()
call.
Returns:
True if the object is derived from a class in the class_spec, False if
it is not, and None if it is ambiguous whether obj matches class_spec.
"""
cls = obj.cls
if isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY) or isinstance(
cls, abstract.AMBIGUOUS_OR_EMPTY
):
return None
return abstract_utils.check_against_mro(self.ctx, cls, class_spec)
| IsInstance |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 20095,
"end": 20470
} | class ____(FunctionPass):
_name = "print_ir_cfg"
def __init__(self):
FunctionPass.__init__(self)
self._ver = 0
def run_pass(self, state):
fir = state.func_ir
self._ver += 1
fir.render_dot(filename_prefix='v{}'.format(self._ver)).render()
return False
@register_pass(mutates_CFG=True, analysis_only=False)
| PrintIRCFG |
python | kamyu104__LeetCode-Solutions | Python/max-number-of-k-sum-pairs.py | {
"start": 50,
"end": 461
} | class ____(object):
def maxOperations(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
count = collections.Counter()
result = 0
for x in nums:
if k-x in count and count[k-x]:
count[k-x] -= 1
result += 1
else:
count[x] += 1
return result
| Solution |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_source_generators.py | {
"start": 6301,
"end": 8423
} | class ____(test.BaseTest):
def test_not_image(self):
"""
Non-images raise an exception.
"""
self.assertRaises(
IOError,
source_generators.pil_image, BytesIO(b'not an image'))
def test_nearly_image(self):
"""
Truncated images *don't* raise an exception if they can still be read.
"""
data = self.create_image(None, None)
reference = source_generators.pil_image(data)
data.seek(0)
trunc_data = BytesIO()
trunc_data.write(data.read()[:-10])
trunc_data.seek(0)
im = source_generators.pil_image(trunc_data)
# im will be truncated, but it should be the same dimensions.
self.assertEqual(im.size, reference.size)
# self.assertRaises(IOError, source_generators.pil_image, trunc_data)
def test_exif_orientation(self):
"""
Images with EXIF orientation data are reoriented.
"""
reference = image_from_b64(EXIF_REFERENCE)
for exif_orientation, data in EXIF_ORIENTATION.items():
im = image_from_b64(data)
self.assertEqual(exif_orientation, im._getexif().get(0x0112))
self.assertFalse(near_identical(reference, im))
im = source_generators.pil_image(BytesIO(base64.b64decode(data)))
self.assertTrue(
near_identical(reference, im),
'EXIF orientation %s did not match reference image' %
exif_orientation)
def test_switch_off_exif_orientation(self):
"""
Images with EXIF orientation data are not reoriented if the
``exif_orientation`` parameter is ``False``.
"""
reference = image_from_b64(EXIF_REFERENCE)
data = EXIF_ORIENTATION[2]
im = image_from_b64(data)
self.assertFalse(near_identical(reference, im))
im = source_generators.pil_image(
BytesIO(base64.b64decode(data)), exif_orientation=False)
self.assertFalse(
near_identical(reference, im),
'Image should not have been modified')
| PilImageTest |
python | ray-project__ray | python/ray/dashboard/modules/job/common.py | {
"start": 22324,
"end": 22378
} | class ____:
stopped: bool
@dataclass
| JobStopResponse |
python | kamyu104__LeetCode-Solutions | Python/bitwise-or-of-even-numbers-in-an-array.py | {
"start": 37,
"end": 255
} | class ____(object):
def evenNumberBitwiseORs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return reduce(lambda total, x: total|(x if x%2 == 0 else 0), nums, 0)
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/providers/dummy/views.py | {
"start": 846,
"end": 945
} | class ____(BaseLoginView):
provider_id = DummyProvider.id
login = LoginView.as_view()
| LoginView |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 1452,
"end": 1538
} | class ____(PayloadEncodingError):
"""Content encoding error."""
| ContentEncodingError |
python | keras-team__keras | keras/src/export/tfsm_layer_test.py | {
"start": 519,
"end": 5584
} | class ____(testing.TestCase):
def test_reloading_export_archive(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(temp_filepath)
self.assertAllClose(reloaded_layer(ref_input), ref_output, atol=1e-7)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
def test_reloading_default_saved_model(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
tf.saved_model.save(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(
temp_filepath, call_endpoint="serving_default"
)
# The output is a dict, due to the nature of SavedModel saving.
new_output = reloaded_layer(ref_input)
self.assertAllClose(
new_output[list(new_output.keys())[0]],
ref_output,
atol=1e-7,
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
for keras_var in reloaded_layer.weights:
self.assertIsInstance(keras_var, backend.Variable)
def test_call_training(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
utils.set_random_seed(1337)
model = models.Sequential(
[
layers.Input((10,)),
layers.Dense(10),
layers.Dropout(0.99999),
]
)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="call_inference",
fn=lambda x: model(x, training=False),
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.add_endpoint(
name="call_training",
fn=lambda x: model(x, training=True),
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.write_out(temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(
temp_filepath,
call_endpoint="call_inference",
call_training_endpoint="call_training",
)
inference_output = reloaded_layer(
tf.random.normal((1, 10)), training=False
)
training_output = reloaded_layer(
tf.random.normal((1, 10)), training=True
)
self.assertAllClose(np.mean(training_output), 0.0, atol=1e-7)
self.assertNotAllClose(np.mean(inference_output), 0.0, atol=1e-7)
def test_serialization(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(temp_filepath)
# Test reinstantiation from config
config = reloaded_layer.get_config()
rereloaded_layer = tfsm_layer.TFSMLayer.from_config(config)
self.assertAllClose(rereloaded_layer(ref_input), ref_output, atol=1e-7)
# Test whole model saving with reloaded layer inside
model = models.Sequential([reloaded_layer])
temp_model_filepath = os.path.join(self.get_temp_dir(), "m.keras")
model.save(temp_model_filepath, save_format="keras_v3")
reloaded_model = saving_lib.load_model(
temp_model_filepath,
custom_objects={"TFSMLayer": tfsm_layer.TFSMLayer},
)
self.assertAllClose(reloaded_model(ref_input), ref_output, atol=1e-7)
def test_errors(self):
# Test missing call endpoint
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = models.Sequential([layers.Input((2,)), layers.Dense(3)])
saved_model.export_saved_model(model, temp_filepath)
with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"):
tfsm_layer.TFSMLayer(temp_filepath, call_endpoint="wrong")
# Test missing call training endpoint
with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"):
tfsm_layer.TFSMLayer(
temp_filepath,
call_endpoint="serve",
call_training_endpoint="wrong",
)
| TestTFSMLayer |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 11688,
"end": 12009
} | class ____(Action):
"""Do nothing when an Automation is triggered"""
type: Literal["do-nothing"] = "do-nothing"
async def act(self, triggered_action: "TriggeredAction") -> None:
logger.info(
"Doing nothing",
extra={**self.logging_context(triggered_action)},
)
| DoNothing |
python | chroma-core__chroma | chromadb/test/test_config.py | {
"start": 646,
"end": 984
} | class ____(Component):
def __init__(self, system: System):
data.inits += "B"
super().__init__(system)
self.require(ComponentC)
self.require(ComponentD)
@overrides
def start(self) -> None:
data.starts += "B"
@overrides
def stop(self) -> None:
data.stops += "B"
| ComponentB |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.