language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/clvp/test_modeling_clvp.py | {
"start": 1443,
"end": 6009
} | class ____:
def __init__(
self,
parent,
batch_size=2,
seq_length=7,
is_training=False,
use_input_mask=True,
use_labels=True,
vocab_size=50,
hidden_size=128,
projection_dim=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=32,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
def get_config(self):
encoder_config = ClvpEncoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
)
return encoder_config
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
encoder_config = self.get_config()
return encoder_config, input_ids, input_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
speech_config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)}
return speech_config, inputs_dict
def create_and_check_model(self, speech_config, input_ids, input_mask):
text_config = ClvpEncoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
text_encoder_model = ClvpEncoder(config=text_config)
text_encoder_model.to(torch_device)
text_encoder_model.eval()
with torch.no_grad():
result = text_encoder_model(input_ids, attention_mask=input_mask)
result = text_encoder_model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim))
# now check with speech config
speech_encoder_model = ClvpEncoder(config=speech_config)
speech_encoder_model.to(torch_device)
speech_encoder_model.eval()
with torch.no_grad():
result = speech_encoder_model(input_ids, attention_mask=input_mask)
result = speech_encoder_model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim))
@require_torch
| ClvpEncoderTester |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 7649,
"end": 9012
} | class ____(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
| Pix2Sky_ZenithalPerspective |
python | sympy__sympy | sympy/physics/quantum/qasm.py | {
"start": 2819,
"end": 6288
} | class ____:
"""Class to form objects from Qasm lines
>>> from sympy.physics.quantum.qasm import Qasm
>>> q = Qasm('qubit q0', 'qubit q1', 'h q0', 'cnot q0,q1')
>>> q.get_circuit()
CNOT(1,0)*H(1)
>>> q = Qasm('qubit q0', 'qubit q1', 'cnot q0,q1', 'cnot q1,q0', 'cnot q0,q1')
>>> q.get_circuit()
CNOT(1,0)*CNOT(0,1)*CNOT(1,0)
"""
def __init__(self, *args, **kwargs):
self.defs = {}
self.circuit = []
self.labels = []
self.inits = {}
self.add(*args)
self.kwargs = kwargs
def add(self, *lines):
for line in nonblank(lines):
command, rest = fullsplit(line)
if self.defs.get(command): #defs come first, since you can override built-in
function = self.defs.get(command)
indices = self.indices(rest)
if len(indices) == 1:
self.circuit.append(function(indices[0]))
else:
self.circuit.append(function(indices[:-1], indices[-1]))
elif hasattr(self, command):
function = getattr(self, command)
function(*rest)
else:
print("Function %s not defined. Skipping" % command)
def get_circuit(self):
return prod(reversed(self.circuit))
def get_labels(self):
return list(reversed(self.labels))
def plot(self):
from sympy.physics.quantum.circuitplot import CircuitPlot
circuit, labels = self.get_circuit(), self.get_labels()
CircuitPlot(circuit, len(labels), labels=labels, inits=self.inits)
def qubit(self, arg, init=None):
self.labels.append(arg)
if init: self.inits[arg] = init
def indices(self, args):
return get_indices(args, self.labels)
def index(self, arg):
return get_index(arg, self.labels)
def nop(self, *args):
pass
def x(self, arg):
self.circuit.append(X(self.index(arg)))
def z(self, arg):
self.circuit.append(Z(self.index(arg)))
def h(self, arg):
self.circuit.append(H(self.index(arg)))
def s(self, arg):
self.circuit.append(S(self.index(arg)))
def t(self, arg):
self.circuit.append(T(self.index(arg)))
def measure(self, arg):
self.circuit.append(Mz(self.index(arg)))
def cnot(self, a1, a2):
self.circuit.append(CNOT(*self.indices([a1, a2])))
def swap(self, a1, a2):
self.circuit.append(SWAP(*self.indices([a1, a2])))
def cphase(self, a1, a2):
self.circuit.append(CPHASE(*self.indices([a1, a2])))
def toffoli(self, a1, a2, a3):
i1, i2, i3 = self.indices([a1, a2, a3])
self.circuit.append(CGateS((i1, i2), X(i3)))
def cx(self, a1, a2):
fi, fj = self.indices([a1, a2])
self.circuit.append(CGate(fi, X(fj)))
def cz(self, a1, a2):
fi, fj = self.indices([a1, a2])
self.circuit.append(CGate(fi, Z(fj)))
def defbox(self, *args):
print("defbox not supported yet. Skipping: ", args)
def qdef(self, name, ncontrols, symbol):
from sympy.physics.quantum.circuitplot import CreateOneQubitGate, CreateCGate
ncontrols = int(ncontrols)
command = fixcommand(name)
symbol = stripquotes(symbol)
if ncontrols > 0:
self.defs[command] = CreateCGate(symbol)
else:
self.defs[command] = CreateOneQubitGate(symbol)
| Qasm |
python | google__python-fire | fire/custom_descriptions_test.py | {
"start": 704,
"end": 2654
} | class ____(testutils.BaseTestCase):
def test_string_type_summary_enough_space(self):
component = 'Test'
summary = custom_descriptions.GetSummary(
obj=component, available_space=80, line_length=LINE_LENGTH)
self.assertEqual(summary, '"Test"')
def test_string_type_summary_not_enough_space_truncated(self):
component = 'Test'
summary = custom_descriptions.GetSummary(
obj=component, available_space=5, line_length=LINE_LENGTH)
self.assertEqual(summary, '"..."')
def test_string_type_summary_not_enough_space_new_line(self):
component = 'Test'
summary = custom_descriptions.GetSummary(
obj=component, available_space=4, line_length=LINE_LENGTH)
self.assertEqual(summary, '"Test"')
def test_string_type_summary_not_enough_space_long_truncated(self):
component = 'Lorem ipsum dolor sit amet'
summary = custom_descriptions.GetSummary(
obj=component, available_space=10, line_length=LINE_LENGTH)
self.assertEqual(summary, '"Lorem..."')
def test_string_type_description_enough_space(self):
component = 'Test'
description = custom_descriptions.GetDescription(
obj=component, available_space=80, line_length=LINE_LENGTH)
self.assertEqual(description, 'The string "Test"')
def test_string_type_description_not_enough_space_truncated(self):
component = 'Lorem ipsum dolor sit amet'
description = custom_descriptions.GetDescription(
obj=component, available_space=20, line_length=LINE_LENGTH)
self.assertEqual(description, 'The string "Lore..."')
def test_string_type_description_not_enough_space_new_line(self):
component = 'Lorem ipsum dolor sit amet'
description = custom_descriptions.GetDescription(
obj=component, available_space=10, line_length=LINE_LENGTH)
self.assertEqual(description, 'The string "Lorem ipsum dolor sit amet"')
if __name__ == '__main__':
testutils.main()
| CustomDescriptionTest |
python | ray-project__ray | python/ray/data/llm.py | {
"start": 1933,
"end": 3714
} | class ____(_HttpRequestProcessorConfig):
"""The configuration for the HTTP request processor.
Args:
batch_size: The batch size to send to the HTTP request.
url: The URL to send the HTTP request to.
headers: The headers to send with the HTTP request.
concurrency: The number of concurrent requests to send. Default to 1.
If ``concurrency`` is a ``tuple`` ``(m, n)``,
autoscaling strategy is used (``1 <= m <= n``).
Examples:
.. testcode::
:skipif: True
import ray
from ray.data.llm import HttpRequestProcessorConfig, build_llm_processor
config = HttpRequestProcessorConfig(
url="https://api.openai.com/v1/chat/completions",
headers={"Authorization": "Bearer sk-..."},
concurrency=1,
)
processor = build_llm_processor(
config,
preprocess=lambda row: dict(
payload=dict(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a calculator"},
{"role": "user", "content": f"{row['id']} ** 3 = ?"},
],
temperature=0.3,
max_tokens=20,
),
),
postprocess=lambda row: dict(
resp=row["http_response"]["choices"][0]["message"]["content"],
),
)
ds = ray.data.range(10)
ds = processor(ds)
for row in ds.take_all():
print(row)
"""
pass
@PublicAPI(stability="alpha")
| HttpRequestProcessorConfig |
python | doocs__leetcode | solution/1900-1999/1979.Find Greatest Common Divisor of Array/Solution.py | {
"start": 0,
"end": 104
} | class ____:
def findGCD(self, nums: List[int]) -> int:
return gcd(max(nums), min(nums))
| Solution |
python | numba__numba | numba/tests/doc_examples/test_typed_dict_usage.py | {
"start": 214,
"end": 4051
} | class ____(unittest.TestCase):
def test_ex_typed_dict_from_cpython(self):
with captured_stdout():
# magictoken.ex_typed_dict_from_cpython.begin
import numpy as np
from numba import njit
from numba.core import types
from numba.typed import Dict
# The Dict.empty() constructs a typed dictionary.
# The key and value typed must be explicitly declared.
d = Dict.empty(
key_type=types.unicode_type,
value_type=types.float64[:],
)
# The typed-dict can be used from the interpreter.
d['posx'] = np.asarray([1, 0.5, 2], dtype='f8')
d['posy'] = np.asarray([1.5, 3.5, 2], dtype='f8')
d['velx'] = np.asarray([0.5, 0, 0.7], dtype='f8')
d['vely'] = np.asarray([0.2, -0.2, 0.1], dtype='f8')
# Here's a function that expects a typed-dict as the argument
@njit
def move(d):
# inplace operations on the arrays
d['posx'] += d['velx']
d['posy'] += d['vely']
print('posx: ', d['posx']) # Out: posx: [1. 0.5 2. ]
print('posy: ', d['posy']) # Out: posy: [1.5 3.5 2. ]
# Call move(d) to inplace update the arrays in the typed-dict.
move(d)
print('posx: ', d['posx']) # Out: posx: [1.5 0.5 2.7]
print('posy: ', d['posy']) # Out: posy: [1.7 3.3 2.1]
# magictoken.ex_typed_dict_from_cpython.end
# Test
np.testing.assert_array_equal(d['posx'], [1.5, 0.5, 2.7])
np.testing.assert_array_equal(d['posy'], [1.7, 3.3, 2.1])
def test_ex_typed_dict_njit(self):
with captured_stdout():
# magictoken.ex_typed_dict_njit.begin
import numpy as np
from numba import njit
from numba.core import types
from numba.typed import Dict
# Make array type. Type-expression is not supported in jit
# functions.
float_array = types.float64[:]
@njit
def foo():
# Make dictionary
d = Dict.empty(
key_type=types.unicode_type,
value_type=float_array,
)
# Fill the dictionary
d["posx"] = np.arange(3).astype(np.float64)
d["posy"] = np.arange(3, 6).astype(np.float64)
return d
d = foo()
# Print the dictionary
print(d) # Out: {posx: [0. 1. 2.], posy: [3. 4. 5.]}
# magictoken.ex_typed_dict_njit.end
np.testing.assert_array_equal(d['posx'], [0, 1, 2])
np.testing.assert_array_equal(d['posy'], [3, 4, 5])
def test_ex_inferred_dict_njit(self):
with captured_stdout():
# magictoken.ex_inferred_dict_njit.begin
from numba import njit
import numpy as np
@njit
def foo():
d = dict()
k = {1: np.arange(1), 2: np.arange(2)}
# The following tells the compiler what the key type and the
# value
# type are for `d`.
d[3] = np.arange(3)
d[5] = np.arange(5)
return d, k
d, k = foo()
print(d) # {3: [0 1 2], 5: [0 1 2 3 4]}
print(k) # {1: [0], 2: [0 1]}
# magictoken.ex_inferred_dict_njit.end
np.testing.assert_array_equal(d[3], [0, 1, 2])
np.testing.assert_array_equal(d[5], [0, 1, 2, 3, 4])
np.testing.assert_array_equal(k[1], [0])
np.testing.assert_array_equal(k[2], [0, 1])
if __name__ == '__main__':
unittest.main()
| DocsTypedDictUsageTest |
python | tensorflow__tensorflow | tensorflow/python/autograph/operators/dispatch_context.py | {
"start": 841,
"end": 1224
} | class ____(collections.namedtuple(
'DispatchContext',
('options',))):
"""Allows passing additional parameters to the specific implementations.
Attributes:
options: Optional dict of extra arguments that may be required by specific
implementations.
"""
def option(self, name):
return self.options[name]
NO_CTX = DispatchContext(options={})
| DispatchContext |
python | tensorflow__tensorflow | tensorflow/python/framework/weak_tensor_test.py | {
"start": 1686,
"end": 9707
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_basic(self):
a = WeakTensor.from_tensor(constant_op.constant(1, dtypes.int32))
self.assertEqual(a.dtype, dtypes.int32)
self.assertEqual(a.shape, [])
b = [1.0, 2.0], [3.0, 4.0]
b_wt = WeakTensor.from_tensor(constant_op.constant(b, dtypes.float32))
self.assertEqual(b_wt.dtype, dtypes.float32)
self.assertEqual(b_wt.shape, [2, 2])
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_init(self):
# Make sure an exception is thrown for unallowed dtypes.
t = constant_op.constant(1, dtypes.int16)
with self.assertRaises(TypeError):
_ = WeakTensor.from_tensor(t)
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_inheritance(self):
a = WeakTensor.from_tensor(constant_op.constant([1, 2, 3], dtypes.int32))
self.assertIsInstance(a, WeakTensor)
self.assertIsInstance(a, core.Tensor)
self.assertIsInstance(a, extension_type.ExtensionType)
if context.executing_eagerly():
self.assertIsInstance(a, core.Value)
self.assertIsInstance(a, EagerWeakTensor)
else:
self.assertIsInstance(a, core.Symbol)
self.assertIsInstance(a, GraphWeakTensor)
def test_weak_tensor_eager_methods(self):
wt = WeakTensor.from_tensor(constant_op.constant(2, dtypes.int32))
b = [1.0, 2.0], [3.0, 4.0]
b_wt = WeakTensor.from_tensor(constant_op.constant(b, dtypes.float32))
self.assertEqual(complex(wt), complex(2))
self.assertEqual(int(wt), int(2))
self.assertEqual(float(wt), float(2))
self.assertEqual(wt.__index__(), int(2))
self.assertEqual(wt.numpy(), 2)
self.assertEqual(format(wt, 'b'), '10 weakly typed')
self.assertEqual(np.array(wt), 2)
self.assertAllEqual(np.array(b_wt), np.array(b, dtype=np.float32))
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_bool(self):
# Test to make sure WeakTensor(bool) isn't used as a bool.
with self.assertRaises(TypeError):
if WeakTensor.from_tensor(constant_op.constant(True)):
raise TypeError('Type error is raised because WeakTensor != bool')
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_getattr(self):
wt = WeakTensor.from_tensor(constant_op.constant(1, dtypes.int32))
wt_name = getattr(wt, '__name__', None)
if context.executing_eagerly():
self.assertEqual(wt_name, 'tf.EagerWeakTensor')
else:
self.assertEqual(wt_name, 'tf.GraphWeakTensor')
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_in_tf_func(self):
@def_function.function()
def f(x):
return x
t = constant_op.constant(1, dtypes.int32)
wt = WeakTensor.from_tensor(t)
res = f(wt)
self.assertIsInstance(res, WeakTensor)
_ = f(t)
self.assertEqual(f.experimental_get_tracing_count(), 2)
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_in_tf_func_with_branch_error(self):
a = constant_op.constant(1, dtypes.int32)
b = WeakTensor.from_tensor(a)
@def_function.function()
def f(c, a, b):
if c > 1:
return a
else:
return b
with self.assertRaises(TypeError):
# if and else branch cannot return two different types in a tf.function.
_ = f(constant_op.constant(2, dtypes.int32), a, b)
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_in_tf_func_with_spec(self):
# Test weak tensor spec with matching input.
weak_tensor_spec = WeakTensor.Spec(tensor.TensorSpec([2]))
wt = WeakTensor.from_tensor(constant_op.constant([1.0, 2.0]))
@def_function.function(input_signature=[weak_tensor_spec])
def f(x):
return x
_ = f(wt)
# Test weak tensor spec with mismatching input.
wt_mismatch = WeakTensor.from_tensor(constant_op.constant([1.0, 2.0, 3.0]))
with self.assertRaises(TypeError):
_ = f(wt_mismatch)
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_gradient(self):
x = WeakTensor.from_tensor(constant_op.constant([3.0, 4.0, 5.0]))
with backprop.GradientTape() as g:
g.watch(x)
y = x
dy_dx = g.gradient(y, x)
self.assertAllEqual(dy_dx, [1.0, 1.0, 1.0])
self.assertIsInstance(dy_dx, WeakTensor)
@test_util.run_in_graph_and_eager_modes
def test_weak_tensor_in_restored_function(self):
class CustomModule(module.Module):
@def_function.function
def __call__(self, x):
if isinstance(x, tensor.Tensor):
raise TypeError('Weak tensor should not be tensor.Tensor type.')
return x
m = CustomModule()
a = WeakTensor.from_tensor(constant_op.constant(1, dtypes.int32))
_ = m(a)
save(m, '/tmp/f')
m_loaded = load('/tmp/f')
res = m_loaded(a)
self.assertIsInstance(res, WeakTensor)
b = constant_op.constant(1, dtypes.int32)
with self.assertRaisesRegex(
ValueError, 'Could not find matching concrete function'
):
m_loaded(b)
def test_weak_tensor_format_to_string(self):
# __str__ test in eager mode
t = constant_op.constant([1.0, 2.0], dtypes.float32)
wt = WeakTensor(t)
wt_str = 'tf.Tensor([1. 2.], shape=(2,), dtype=float32, weak=True)'
self.assertEqual(str(wt), wt_str)
# __repr__ test in eager mode
wt_repr = (
'<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.],'
' dtype=float32), weak=True>'
)
self.assertEqual(repr(wt), wt_repr)
@def_function.function()
def f():
# __str__ test in graph mode
t = constant_op.constant([1.0, 2.0], dtypes.float32)
wt = WeakTensor(t)
wt_str = 'Tensor("Const:0", shape=(2,), dtype=float32, weak=True)'
self.assertEqual(str(wt), wt_str)
# __repr__ test in graph mode
wt_repr = "<tf.Tensor 'Const:0' shape=(2,) dtype=float32, weak=True>"
self.assertEqual(repr(wt), wt_repr)
return wt
_ = f()
def test_weak_tensor_iter(self):
# Test normal weakTensor iteration.
t = constant_op.constant([0, 1, 2], dtypes.int32)
wt = WeakTensor.from_tensor(t)
it_weak_tensor = iter(wt)
for i in range(len(wt)):
self.assertAllEqual(
next(it_weak_tensor),
WeakTensor.from_tensor(constant_op.constant(i)),
)
# Test multi-dimensional weakTensor iteration.
t_multi = constant_op.constant([[1, 2], [3, 4]], dtypes.int32)
wt_multi = WeakTensor(t_multi)
it_wt_multi_tensor = iter(wt_multi)
self.assertAllEqual(
next(it_wt_multi_tensor), WeakTensor.from_tensor(t_multi[0])
)
self.assertAllEqual(
next(it_wt_multi_tensor), WeakTensor.from_tensor(t_multi[1])
)
# Test scalar weakTensor iteration.
t_scalar = constant_op.constant(1, dtypes.int32)
wt_scalar = WeakTensor.from_tensor(t_scalar)
with self.assertRaises(TypeError):
# Cannot iterate over a scalar tensor.
_ = iter(wt_scalar)
@test_util.deprecated_graph_mode_only
def test_weak_tensor_iter_graph_mode(self):
# Make sure iteration is not allowed in Graph mode.
wt = WeakTensor.from_tensor(constant_op.constant([0, 1, 2], dtypes.int32))
with self.assertRaisesRegex(
errors.OperatorNotAllowedInGraphError,
'Iterating over a symbolic `tf.WeakTensor` is not allowed. You can'
' attempt the following resolutions to the problem: If you are running'
' in Graph mode, use Eager execution mode or decorate this function'
' with @tf.function. If you are using AutoGraph, you can try decorating'
' this function with @tf.function. If that does not work, then you may'
' be using an unsupported feature or your source code may not be'
' visible to AutoGraph. See'
' https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md#access-to-source-code'
' for more information.',
):
_ = iter(wt)
if __name__ == '__main__':
ops.enable_eager_execution()
googletest.main()
| WeakTensorTest |
python | PyCQA__pylint | tests/functional/r/return_in_init.py | {
"start": 316,
"end": 424
} | class ____:
"""dummy class"""
def __init__(self):
self.callable = lambda: (yield None)
| MyClass5 |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/hooks/test_handlers.py | {
"start": 1001,
"end": 2530
} | class ____:
def test_return_single_query_results(self):
assert return_single_query_results("SELECT 1", return_last=True, split_statements=False)
assert return_single_query_results("SELECT 1", return_last=False, split_statements=False)
assert return_single_query_results("SELECT 1", return_last=False, split_statements=None) is False
assert return_single_query_results(["SELECT 1"], return_last=True, split_statements=False) is False
assert return_single_query_results(["SELECT 1"], return_last=False, split_statements=False) is False
assert return_single_query_results("SELECT 1", return_last=False, split_statements=True) is False
assert return_single_query_results(["SELECT 1"], return_last=False, split_statements=True) is False
assert return_single_query_results(["SELECT 1"], return_last=True, split_statements=True) is False
def test_fetch_all_handler(self):
cursor = MagicMock()
cursor.description = [("col1", "int"), ("col2", "string")]
cursor.fetchall.return_value = [(1, "hello")]
assert fetch_all_handler(cursor) == [(1, "hello")]
cursor.description = None
assert fetch_all_handler(cursor) is None
def test_fetch_one_handler(self):
cursor = MagicMock()
cursor.description = [("col1", "int")]
cursor.fetchone.return_value = 1
assert fetch_one_handler(cursor) == (1)
cursor.description = None
assert fetch_one_handler(cursor) is None
| TestHandlers |
python | numba__numba | numba/core/errors.py | {
"start": 1698,
"end": 1858
} | class ____(NumbaWarning):
"""
Warning category for when an operation in a prange
might not have parallel semantics.
"""
| NumbaParallelSafetyWarning |
python | etianen__django-reversion | tests/test_app/tests/test_commands.py | {
"start": 3352,
"end": 3773
} | class ____(TestModelMixin, TestBase):
def testCreateInitialRevisionsComment(self):
obj = TestModel.objects.create()
meta_name = "meta name"
meta = json.dumps({"test_app.TestMeta": {"name": meta_name}})
self.callCommand("createinitialrevisions", "--meta", meta)
self.assertSingleRevision((obj,), meta_names=(meta_name, ), comment="Initial version.")
| CreateInitialRevisionsMetaTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_requests/boards_request_builder.py | {
"start": 209,
"end": 1235
} | class ____(MondayBaseRequestBuilder):
@classmethod
def boards_endpoint(cls, authenticator: Authenticator, board_ids: List[int] = None) -> "BoardsRequestBuilder":
return cls().with_authenticator(authenticator).with_board_ids(board_ids)
@property
def request_body(self):
params = super().query_params or {}
if self._board_ids:
board_ids = ", ".join(list(map(str, self._board_ids)))
board_ids_str = f",ids:[{board_ids}]"
else:
board_ids_str = ""
params["query"] = (
"{boards(limit:10%s){id,name,board_kind,type,columns{archived,description,id,settings_str,title,type,width},communication,description,groups{archived,color,deleted,id,position,title},owners{id},creator{id},permissions,state,subscribers{id},tags{id},top_group{id},updated_at,updates{id},views{id,name,settings_str,type,view_specific_data_str},workspace{id,name,kind,description}}}"
% board_ids_str
)
return params
| BoardsRequestBuilder |
python | matplotlib__matplotlib | galleries/examples/misc/multiprocess_sgskip.py | {
"start": 437,
"end": 1710
} | class ____:
def __init__(self):
self.x = []
self.y = []
def terminate(self):
plt.close('all')
def call_back(self):
while self.pipe.poll():
command = self.pipe.recv()
if command is None:
self.terminate()
return False
else:
self.x.append(command[0])
self.y.append(command[1])
self.ax.plot(self.x, self.y, 'ro')
self.fig.canvas.draw()
return True
def __call__(self, pipe):
print('starting plotter...')
self.pipe = pipe
self.fig, self.ax = plt.subplots()
timer = self.fig.canvas.new_timer(interval=1000)
timer.add_callback(self.call_back)
timer.start()
print('...done')
plt.show()
# %%
#
# Plotting class
# ==============
#
# This class uses multiprocessing to spawn a process to run code from the
# class above. When initialized, it creates a pipe and an instance of
# ``ProcessPlotter`` which will be run in a separate process.
#
# When run from the command line, the parent process sends data to the spawned
# process which is then plotted via the callback function specified in
# ``ProcessPlotter:__call__``.
#
| ProcessPlotter |
python | redis__redis-py | redis/commands/vectorset/commands.py | {
"start": 725,
"end": 868
} | class ____(Enum):
"""Quantization options for the VADD command."""
NOQUANT = "NOQUANT"
BIN = "BIN"
Q8 = "Q8"
| QuantizationOptions |
python | scrapy__scrapy | tests/test_utils_request.py | {
"start": 11507,
"end": 13806
} | class ____:
def _test_request(self, request_object, expected_curl_command):
curl_command = request_to_curl(request_object)
assert curl_command == expected_curl_command
def test_get(self):
request_object = Request("https://www.example.com")
expected_curl_command = "curl -X GET https://www.example.com"
self._test_request(request_object, expected_curl_command)
def test_post(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
'curl -X POST https://www.httpbin.org/post --data-raw \'{"foo": "bar"}\''
)
self._test_request(request_object, expected_curl_command)
def test_headers(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
headers={"Content-Type": "application/json", "Accept": "application/json"},
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
' --data-raw \'{"foo": "bar"}\''
" -H 'Content-Type: application/json' -H 'Accept: application/json'"
)
self._test_request(request_object, expected_curl_command)
def test_cookies_dict(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
cookies={"foo": "bar"},
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
" --data-raw '{\"foo\": \"bar\"}' --cookie 'foo=bar'"
)
self._test_request(request_object, expected_curl_command)
def test_cookies_list(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
cookies=[{"foo": "bar"}],
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
" --data-raw '{\"foo\": \"bar\"}' --cookie 'foo=bar'"
)
self._test_request(request_object, expected_curl_command)
| TestRequestToCurl |
python | urllib3__urllib3 | src/urllib3/response.py | {
"start": 1418,
"end": 2356
} | class ____(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None # type: ignore[assignment]
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None # type: ignore[assignment]
def flush(self) -> bytes:
return self._obj.flush()
| DeflateDecoder |
python | google__pytype | pytype/tools/merge_pyi/test_data/heuristics.py | {
"start": 142,
"end": 223
} | class ____:
def __init__(self):
pass
def f(self, x):
pass
| B |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax_error.py | {
"start": 2614,
"end": 2717
} | class ____:
my_var: int | str # [unsupported-binary-operation]
@dataclasses.dataclass
| CustomDataClass |
python | numba__numba | numba/core/ir.py | {
"start": 9487,
"end": 9650
} | class ____(object):
"""Abstract base class for anything that can be the RHS of an assignment.
This class **does not** define any methods.
"""
| AbstractRHS |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/type.py | {
"start": 798,
"end": 1828
} | class ____: type X = int
type Point = tuple[float, float]
type Point[T] = tuple[T, T]
type IntFunc[**P] = Callable[P, int] # ParamSpec
type LabeledTuple[*Ts] = tuple[str, *Ts] # TypeVarTuple
type HashableSequence[T: Hashable] = Sequence[T] # TypeVar with bound
type IntOrStrSequence[T: (int, str)] = Sequence[T] # TypeVar with constraints
# Type as an identifier
type *a + b, c # ((type * a) + b), c
type *(a + b), c # (type * (a + b)), c
type (*a + b, c) # type ((*(a + b)), c)
type -a * b + c # (type - (a * b)) + c
type -(a * b) + c # (type - (a * b)) + c
type (-a) * b + c # (type (-(a * b))) + c
type ().a # (type()).a
type (()).a # (type(())).a
type ((),).a # (type(())).a
type [a].b # (type[a]).b
type [a,].b # (type[(a,)]).b (not (type[a]).b)
type [(a,)].b # (type[(a,)]).b
type()[a:
b] # (type())[a: b]
if type := 1: pass
type = lambda query: query == event
print(type(12))
type(type)
a = (
type in C
)
a = (
type(b)
)
type (
X = int
)
type = 1
type = x = 1
x = type = 1
lambda x: type | X |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_linked_artifacts.py | {
"start": 690,
"end": 840
} | class ____(GQLResult):
node: Optional[FetchLinkedArtifactsArtifactArtifactMembershipsEdgesNode]
| FetchLinkedArtifactsArtifactArtifactMembershipsEdges |
python | numpy__numpy | numpy/fft/tests/test_pocketfft.py | {
"start": 22253,
"end": 24446
} | class ____:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.irfft, a)
def test_irfft_with_n_1_regression():
# Regression test for gh-25661
x = np.arange(10)
np.fft.irfft(x, n=1)
np.fft.hfft(x, n=1)
np.fft.irfft(np.array([0], complex), n=10)
def test_irfft_with_n_large_regression():
# Regression test for gh-25679
x = np.arange(5) * (1 + 1j)
result = np.fft.hfft(x, n=10)
expected = np.array([20., 9.91628173, -11.8819096, 7.1048486,
-6.62459848, 4., -3.37540152, -0.16057669,
1.8819096, -20.86055364])
assert_allclose(result, expected)
@pytest.mark.parametrize("fft", [
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft
])
@pytest.mark.parametrize("data", [
np.array([False, True, False]),
np.arange(10, dtype=np.uint8),
np.arange(5, dtype=np.int16),
])
def test_fft_with_integer_or_bool_input(data, fft):
# Regression test for gh-25819
result = fft(data)
float_data = data.astype(np.result_type(data, 1.))
expected = fft(float_data)
assert_array_equal(result, expected)
| TestFFTThreadSafe |
python | getsentry__sentry | src/sentry/types/token.py | {
"start": 14,
"end": 554
} | class ____(enum.StrEnum):
"""
Represents the various API/auth token types in the Sentry code base.
The values equate to the expected prefix of each of the token types.
"""
USER = "sntryu_"
ORG = "sntrys_"
USER_APP = "sntrya_"
INTEGRATION = "sntryi_"
# tokens created prior to our prefixing
__empty__: None = None
@classmethod
def choices(cls) -> list[tuple[None, None] | tuple[str, str]]:
return [(None, None), *((e.value, e.name.replace("_", " ").title()) for e in cls)]
| AuthTokenType |
python | spack__spack | lib/spack/spack/cmd/external.py | {
"start": 9640,
"end": 9700
} | class ____(spack.error.SpackError):
pass
| NoManifestFileError |
python | apache__thrift | lib/py/src/transport/THttpClient.py | {
"start": 975,
"end": 7434
} | class ____(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None, cafile=None, cert_file=None, key_file=None, ssl_context=None):
"""THttpClient supports two different types of construction:
THttpClient(host, port, path) - deprecated
THttpClient(uri, [port=<n>, path=<s>, cafile=<filename>, cert_file=<filename>, key_file=<filename>, ssl_context=<context>])
Only the second supports https. To properly authenticate against the server,
provide the client's identity by specifying cert_file and key_file. To properly
authenticate the server, specify either cafile or ssl_context with a CA defined.
NOTE: if ssl_context is defined, it will override any provided cert_file, key_file, and cafile.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http{s}://host:port/path') constructor",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http.client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http.client.HTTPS_PORT
if (cafile or cert_file or key_file) and not ssl_context:
self.context = ssl.create_default_context(cafile=cafile)
self.context.load_cert_chain(certfile=cert_file, keyfile=key_file)
else:
self.context = ssl_context
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
try:
proxy = urllib.request.getproxies()[self.scheme]
except KeyError:
proxy = None
else:
if urllib.request.proxy_bypass(self.host):
proxy = None
if proxy:
parsed = urllib.parse.urlparse(proxy)
self.realhost = self.host
self.realport = self.port
self.host = parsed.hostname
self.port = parsed.port
self.proxy_auth = self.basic_proxy_auth_header(parsed)
else:
self.realhost = self.realport = self.proxy_auth = None
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
self.__custom_headers = None
self.headers = None
@staticmethod
def basic_proxy_auth_header(proxy):
if proxy is None or not proxy.username:
return None
ap = "%s:%s" % (urllib.parse.unquote(proxy.username),
urllib.parse.unquote(proxy.password))
cr = base64.b64encode(ap.encode()).strip()
return "Basic " + six.ensure_str(cr)
def using_proxy(self):
return self.realhost is not None
def open(self):
if self.scheme == 'http':
self.__http = http.client.HTTPConnection(self.host, self.port,
timeout=self.__timeout)
elif self.scheme == 'https':
self.__http = http.client.HTTPSConnection(self.host, self.port,
timeout=self.__timeout,
context=self.context)
if self.using_proxy():
self.__http.set_tunnel(self.realhost, self.realport,
{"Proxy-Authorization": self.proxy_auth})
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http_response.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
# HTTP request
if self.using_proxy() and self.scheme == "http":
# need full URL of real host for HTTP proxy here (HTTPS uses CONNECT tunnel)
self.__http.putrequest('POST', "http://%s:%s%s" %
(self.realhost, self.realport, self.path))
else:
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if self.using_proxy() and self.scheme == "http" and self.proxy_auth is not None:
self.__http.putheader("Proxy-Authorization", self.proxy_auth)
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in self.__custom_headers.items():
self.__http.putheader(key, val)
# Saves the cookie sent by the server in the previous response.
# HTTPConnection.putheader can only be called after a request has been
# started, and before it's been sent.
if self.headers and 'Set-Cookie' in self.headers:
self.__http.putheader('Cookie', self.headers['Set-Cookie'])
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
| THttpClient |
python | openai__gym | gym/envs/mujoco/ant_v3.py | {
"start": 161,
"end": 5705
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 20,
}
def __init__(
self,
xml_file="ant.xml",
ctrl_cost_weight=0.5,
contact_cost_weight=5e-4,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.2, 1.0),
contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
**kwargs
):
utils.EzPickle.__init__(
self,
xml_file,
ctrl_cost_weight,
contact_cost_weight,
healthy_reward,
terminate_when_unhealthy,
healthy_z_range,
contact_force_range,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs
)
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(111,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(113,), dtype=np.float64
)
MuJocoPyEnv.__init__(
self, xml_file, 5, observation_space=observation_space, **kwargs
)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
@property
def contact_cost(self):
contact_cost = self._contact_cost_weight * np.sum(
np.square(self.contact_forces)
)
return contact_cost
@property
def is_healthy(self):
state = self.state_vector()
min_z, max_z = self._healthy_z_range
is_healthy = np.isfinite(state).all() and min_z <= state[2] <= max_z
return is_healthy
@property
def terminated(self):
terminated = not self.is_healthy if self._terminate_when_unhealthy else False
return terminated
def step(self, action):
xy_position_before = self.get_body_com("torso")[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com("torso")[:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
reward = rewards - costs
terminated = self.terminated
observation = self._get_obs()
info = {
"reward_forward": forward_reward,
"reward_ctrl": -ctrl_cost,
"reward_contact": -contact_cost,
"reward_survive": healthy_reward,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
if self.render_mode == "human":
self.render()
return observation, reward, terminated, False, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
contact_force = self.contact_forces.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observations = np.concatenate((position, velocity, contact_force))
return observations
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = (
self.init_qvel
+ self._reset_noise_scale * self.np_random.standard_normal(self.model.nv)
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
assert self.viewer is not None
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| AntEnv |
python | coleifer__peewee | tests/models.py | {
"start": 71009,
"end": 77338
} | class ____(ModelTestCase):
data = (
('huey', 'meow'),
('huey', 'purr'),
('zaizee', 'hiss'),
('mickey', 'woof'))
requires = [User, Tweet]
def setUp(self):
super(TestJoinModelAlias, self).setUp()
users = {}
for pk, (username, tweet) in enumerate(self.data, 1):
if username not in users:
user = User.create(id=len(users) + 1, username=username)
users[username] = user
else:
user = users[username]
Tweet.create(id=pk, user=user, content=tweet)
def _test_query(self, alias_expr):
UA = alias_expr()
return (Tweet
.select(Tweet, UA)
.order_by(UA.username, Tweet.content))
def assertTweets(self, query, user_attr='user'):
with self.assertQueryCount(1):
data = [(getattr(tweet, user_attr).username, tweet.content)
for tweet in query]
self.assertEqual(sorted(self.data), data)
def test_control(self):
self.assertTweets(self._test_query(lambda: User).join(User))
def test_join_aliased_columns(self):
query = (Tweet
.select(Tweet.id.alias('tweet_id'), Tweet.content)
.order_by(Tweet.id))
self.assertEqual([(t.tweet_id, t.content) for t in query], [
(1, 'meow'),
(2, 'purr'),
(3, 'hiss'),
(4, 'woof')])
query = (Tweet
.select(Tweet.id.alias('tweet_id'), Tweet.content)
.join(User)
.where(User.username == 'huey')
.order_by(Tweet.id))
self.assertEqual([(t.tweet_id, t.content) for t in query], [
(1, 'meow'),
(2, 'purr')])
def test_join(self):
UA = User.alias('ua')
query = self._test_query(lambda: UA).join(UA)
self.assertTweets(query)
def test_join_on(self):
UA = User.alias('ua')
query = self._test_query(lambda: UA).join(UA, on=(Tweet.user == UA.id))
self.assertTweets(query)
def test_join_on_field(self):
UA = User.alias('ua')
query = self._test_query(lambda: UA)
query = query.join(UA, on=Tweet.user)
self.assertTweets(query)
def test_join_on_alias(self):
UA = User.alias('ua')
query = self._test_query(lambda: UA)
query = query.join(UA, on=(Tweet.user == UA.id).alias('foo'))
self.assertTweets(query, 'foo')
def test_join_attr(self):
UA = User.alias('ua')
query = self._test_query(lambda: UA).join(UA, attr='baz')
self.assertTweets(query, 'baz')
def test_join_on_alias_attr(self):
UA = User.alias('ua')
q = self._test_query(lambda: UA)
q = q.join(UA, on=(Tweet.user == UA.id).alias('foo'), attr='bar')
self.assertTweets(q, 'bar')
def _test_query_backref(self, alias_expr):
TA = alias_expr()
return (User
.select(User, TA)
.order_by(User.username, TA.content))
def assertUsers(self, query, tweet_attr='tweet'):
with self.assertQueryCount(1):
data = [(user.username, getattr(user, tweet_attr).content)
for user in query]
self.assertEqual(sorted(self.data), data)
def test_control_backref(self):
self.assertUsers(self._test_query_backref(lambda: Tweet).join(Tweet))
def test_join_backref(self):
TA = Tweet.alias('ta')
query = self._test_query_backref(lambda: TA).join(TA)
self.assertUsers(query)
def test_join_on_backref(self):
TA = Tweet.alias('ta')
query = self._test_query_backref(lambda: TA)
query = query.join(TA, on=(User.id == TA.user_id))
self.assertUsers(query)
def test_join_on_field_backref(self):
TA = Tweet.alias('ta')
query = self._test_query_backref(lambda: TA)
query = query.join(TA, on=TA.user)
self.assertUsers(query)
def test_join_on_alias_backref(self):
TA = Tweet.alias('ta')
query = self._test_query_backref(lambda: TA)
query = query.join(TA, on=(User.id == TA.user_id).alias('foo'))
self.assertUsers(query, 'foo')
def test_join_attr_backref(self):
TA = Tweet.alias('ta')
query = self._test_query_backref(lambda: TA).join(TA, attr='baz')
self.assertUsers(query, 'baz')
def test_join_alias_twice(self):
# Test that a model-alias can be both the source and the dest by
# joining from User -> Tweet -> User (as "foo").
TA = Tweet.alias('ta')
UA = User.alias('ua')
with self.assertQueryCount(1):
query = (User
.select(User, TA, UA)
.join(TA)
.join(UA, on=(TA.user_id == UA.id).alias('foo'))
.order_by(User.username, TA.content))
data = [(row.username, row.tweet.content, row.tweet.foo.username)
for row in query]
self.assertEqual(data, [
('huey', 'meow', 'huey'),
('huey', 'purr', 'huey'),
('mickey', 'woof', 'mickey'),
('zaizee', 'hiss', 'zaizee')])
def test_alias_filter(self):
UA = User.alias('ua')
lookups = ({'ua__username': 'huey'}, {'user__username': 'huey'})
for lookup in lookups:
with self.assertQueryCount(1):
query = (Tweet
.select(Tweet.content, UA.username)
.join(UA)
.filter(**lookup)
.order_by(Tweet.content))
self.assertSQL(query, (
'SELECT "t1"."content", "ua"."username" '
'FROM "tweet" AS "t1" '
'INNER JOIN "users" AS "ua" '
'ON ("t1"."user_id" = "ua"."id") '
'WHERE ("ua"."username" = ?) '
'ORDER BY "t1"."content"'), ['huey'])
data = [(t.content, t.user.username) for t in query]
self.assertEqual(data, [('meow', 'huey'), ('purr', 'huey')])
@skip_unless(
IS_POSTGRESQL or IS_MYSQL_ADVANCED_FEATURES or IS_SQLITE_25 or IS_CRDB,
'window function')
| TestJoinModelAlias |
python | eth-brownie__brownie | brownie/network/middlewares/caching.py | {
"start": 3346,
"end": 11065
} | class ____(BrownieMiddlewareABC):
"""
Web3 middleware for request caching.
"""
def __init__(self, w3: Web3) -> None:
super().__init__(w3)
self.table_key: Final = f"chain{CONFIG.active_network['chainid']}"
self.cur: Final = Cursor(_get_data_folder().joinpath("cache.db"))
self.cur.execute(f"CREATE TABLE IF NOT EXISTS {self.table_key} (method, params, result)")
self.lock: Final = threading.Lock()
self.event: Final = threading.Event()
self.start_block_filter_loop()
def start_block_filter_loop(self):
self.event.clear()
self.loop_thread = threading.Thread(target=self.loop_exception_handler, daemon=True)
self.loop_thread.start()
self.event.wait()
@classmethod
def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]:
if CONFIG.settings["eager_caching"] is False:
# do not cache when user doesn't want it
return None
if network_type != "live":
# do not cache on development chains
return None
try:
latest = w3.eth.get_block("latest")
except Exception:
return None
if latest.timestamp - w3.eth.get_block(latest.number - 50).timestamp < 250:
# do not cache on chains with an average block time of less than 5 seconds
return None
if _new_filter(w3) is None:
# do not cache if we cannot create a filter for new blocks
return None
return 0
@property
def time_since(self) -> float:
return time.time() - self.last_request
def loop_exception_handler(self) -> None:
try:
self.block_filter_loop()
except Exception:
# catch unhandled exceptions to avoid random error messages in the console
self.block_cache.clear()
self.is_killed = True
def block_filter_loop(self) -> None:
# initialize required state variables within the loop to avoid recursion death
latest = self.w3.eth.get_block("latest")
self.last_block = latest.hash
self.last_block_seen = latest.timestamp
self.last_request = time.time()
self.block_cache: OrderedDict = OrderedDict()
self.block_filter = self.w3.eth.filter("latest")
self.is_killed = False
self.event.set()
new_blocks: List[LogReceipt]
while not self.is_killed:
# if the last RPC request was > 60 seconds ago, reduce the rate of updates.
# we eventually settle at one query per minute after 10 minutes of no requests.
with self.lock:
if self.time_since > 60:
self.block_cache.clear()
self.event.clear()
if self.time_since > 60:
self.event.wait(min(self.time_since / 10, 60))
# query the filter for new blocks
with self.lock:
try:
new_blocks = self.block_filter.get_new_entries()
except (AttributeError, ValueError):
# web3 has disconnected, or the filter has expired from inactivity
# some public nodes allow a filter initially, but block it several seconds later
block_filter = _new_filter(self.w3)
if block_filter is None:
return
self.block_filter = block_filter
# continue in try: except: block is not supported by mypyc
# as of jul 23 2025 so we use this workaround instead.
should_skip = True
else:
should_skip = False
if new_blocks:
self.block_cache[new_blocks[-1]] = {}
self.last_block = new_blocks[-1]
self.last_block_seen = time.time()
if len(self.block_cache) > 5:
old_key = list(self.block_cache)[0]
del self.block_cache[old_key]
# continue in try: except: block is not supported by mypyc
# as of jul 23 2025 so we use this workaround instead.
if should_skip:
pass
elif new_blocks and self.time_since < 15:
# if this update found a new block and we've been querying
# frequently, we can wait a few seconds before the next update
time.sleep(5)
elif time.time() - self.last_block_seen < 15:
# if it's been less than 15 seconds since the last block, wait 2 seconds
time.sleep(2)
else:
# if it's been more than 15 seconds, only wait 1 second
time.sleep(1)
def process_request(
self,
make_request: Callable,
method: RPCEndpoint,
params: Sequence[Any],
) -> Dict[str, Any]:
if method in (
# caching any of these means we die of recursion death so let's not do that
"eth_getFilterChanges",
"eth_newBlockFilter",
"eth_uninstallFilter",
# used to check connectivity
"web3_clientVersion",
# caching these causes weirdness with transaction broadcasting and replacement
"eth_sendTransaction",
"eth_sendRawTransaction",
"eth_sign",
"eth_signTransaction",
"eth_getTransactionByHash",
"eth_getTransactionReceipt",
"eth_chainId",
):
return make_request(method, params)
# try to return a cached value
param_str = ujson_dumps(params, separators=(",", ""), default=str)
# check if the value is available within the long-term cache
if method in LONGTERM_CACHE:
row = self.cur.fetchone(
f"SELECT result FROM {self.table_key} WHERE method=? AND params=?",
(method, param_str),
)
if row:
data = row[0]
if isinstance(data, bytes):
data = HexBytes(data)
return {"id": "cache", "jsonrpc": "2.0", "result": data}
if not self.loop_thread.is_alive():
# restart the block filter loop if it has crashed (usually from a ConnectionError)
self.start_block_filter_loop()
with self.lock:
self.last_request = time.time()
self.event.set()
try:
return self.block_cache[self.last_block][method][param_str]
except KeyError:
pass
# cached value is unavailable, make a request and cache the result
with self.lock:
response = make_request(method, params)
self.block_cache.setdefault(self.last_block, {}).setdefault(method, {})
self.block_cache[self.last_block][method][param_str] = response
# check if the value can be added to long-term cache
if "result" in response and method in LONGTERM_CACHE:
result = response["result"]
if LONGTERM_CACHE[method](self.w3, result):
if isinstance(result, (dict, list, tuple)):
result = ujson_dumps(response, separators=(",", ""), default=str)
self.cur.insert(self.table_key, method, param_str, result)
return response
def uninstall(self) -> None:
self.is_killed = True
self.block_cache.clear()
if self.w3.isConnected():
self.w3.eth.uninstall_filter(self.block_filter.filter_id)
| RequestCachingMiddleware |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 19072,
"end": 19308
} | class ____(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
code = 'permission_denied_custom'
def has_object_permission(self, request, view, obj):
return False
| BasicObjectPermWithDetail |
python | pytorch__pytorch | torch/distributed/tensor/_dtensor_spec.py | {
"start": 2131,
"end": 29972
} | class ____:
mesh: DeviceMesh
placements: tuple[Placement, ...]
# tensor meta will only be set during sharding propagation
tensor_meta: TensorMeta | None = None
# When a tensor dimension is sharded across multiple mesh axes,
# `shard_order` specifies the sequence in which these shardings are applied.
# This order determines how tensor shards are mapped and distributed across
# devices.
#
# Example:
# For a tensor of shape [8, 16] and a 3D device mesh, if dim 0 is sharded over
# mesh dim 1, and dim 1 is sharded over mesh dim 0 and then mesh dim 2,
# the shard_order would be:
# shard_order = (
# ShardOrderEntry(tensor_dim=0, mesh_dims=(1,)),
# ShardOrderEntry(tensor_dim=1, mesh_dims=(0, 2)),
# )
shard_order: ShardOrder = None # type: ignore[assignment]
def __post_init__(self) -> None:
if not isinstance(self.placements, tuple):
self.placements = tuple(self.placements)
if self.shard_order is None:
# pyrefly: ignore [bad-assignment]
self.shard_order = DTensorSpec.compute_default_shard_order(self.placements)
self._hash: int | None = None
@staticmethod
def compute_default_shard_order(
placements: tuple[Placement, ...],
) -> ShardOrder:
"""
Compute the default shard order from placements.
Returns a ShardOrder where each ShardOrderEntry maps a tensor dimension
to the mesh dimensions it's sharded on, in left-to-right order.
"""
# follow default left-to-right device order if shard_order is not specified
tensor_dim_to_mesh_dims: defaultdict[int, list[int]] = defaultdict(list)
mesh_ndim = len(placements)
for mesh_dim in range(mesh_ndim):
# shard_order doesn't work with _StridedShard
if isinstance(placements[mesh_dim], _StridedShard):
return ()
if isinstance(placements[mesh_dim], Shard):
placement = cast(Shard, placements[mesh_dim])
shard_dim = placement.dim
assert shard_dim >= 0, (
f"Shard dim {shard_dim} in placements {placements} must be normalized"
)
tensor_dim_to_mesh_dims[shard_dim].append(mesh_dim)
# Convert dict into ShardOrderEntry tuples
default_shard_order = tuple(
ShardOrderEntry(tensor_dim=key, mesh_dims=tuple(value))
for key, value in sorted(tensor_dim_to_mesh_dims.items())
if value
)
return default_shard_order
@staticmethod
def _convert_shard_order_to_StridedShard(
shard_order: ShardOrder, placements: tuple[Placement, ...], mesh: DeviceMesh
) -> tuple[Placement, ...]:
"""
Convert ShardOrder to placements with _StridedShard.
This function converts a ShardOrder specification into a tuple of Placement objects,
using _StridedShard when a tensor dimension is sharded across multiple mesh dimensions
in a non-default order. The split_factor of each _StridedShard is determined by the
product of mesh dimension sizes that appear earlier in the shard order but later in
the placement tuple.
Args:
shard_order: ShardOrder specification indicating which tensor dimensions are
sharded on which mesh dimensions and in what execution order.
placements: Tuple of Placement objects that does not contain _StridedShard.
mesh: DeviceMesh containing the size information for each mesh dimension.
Returns:
Updated tuple of Placement objects with Shard or _StridedShard placements.
Algorithm:
For each ShardOrderEntry in shard_order:
- For each mesh dimension in the entry's mesh_dims (in order):
- Calculate split_factor as the product of mesh sizes for all mesh dimensions
that appear:
1. Earlier in the shard order (lower index in mesh_dims), and
2. Later in the placement tuple (higher mesh dimension index)
- If split_factor == 1: use normal Shard
- Otherwise: use _StridedShard with the calculated split_factor
Example:
>>> # xdoctest: +SKIP("Requires DeviceMesh")
>>> # Tensor dimension 0 sharded on mesh dims [2, 0, 1] in that order
>>> # mesh = DeviceMesh([4, 3, 2]) # sizes: mesh[0]=4, mesh[1]=3, mesh[2]=2
>>> shard_order = (ShardOrderEntry(tensor_dim=0, mesh_dims=(2, 0, 1)),)
>>> placements = (Shard(0), Shard(0), Shard(0))
>>> # For mesh_dim=2 (index 0 in mesh_dims): no earlier dims, split_factor=1
>>> # -> placements[2] = Shard(0)
>>> # For mesh_dim=0 (index 1 in mesh_dims): mesh_dim=2 is earlier and has index 2>0
>>> # -> split_factor = mesh.size(2) = 2
>>> # -> placements[0] = _StridedShard(0, split_factor=2)
>>> # For mesh_dim=1 (index 2 in mesh_dims): mesh_dim=2 is earlier and has index 2>1
>>> # -> split_factor = mesh.size(2) = 2
>>> # -> placements[1] = _StridedShard(0, split_factor=2)
>>> # Result: (_StridedShard(0, sf=2), _StridedShard(0, sf=2), Shard(0))
"""
placements_list = list(placements)
for entry in shard_order:
tensor_dim = entry.tensor_dim
mesh_dims = entry.mesh_dims
for idx in range(len(mesh_dims)):
# TODO(zpcore): split_factor from `view` and `shard order`
# should be able to be multiplied into one. Need to loosen the
# condition here.
mesh_dim = mesh_dims[idx]
if type(placements[mesh_dim]) is not Shard:
raise ValueError(
f"Only Shard placement can be converted to _StridedShard, "
f"found {placements[mesh_dim]} in {placements=}."
)
split_factor = math.prod(
mesh.size(i) for i in mesh_dims[:idx] if i > mesh_dim
)
if split_factor == 1:
# use normal Shard
placements_list[mesh_dim] = Shard(tensor_dim)
else:
placements_list[mesh_dim] = _StridedShard(
tensor_dim, split_factor=split_factor
)
return tuple(placements_list)
@staticmethod
def _maybe_convert_StridedShard_to_shard_order(
placements: tuple[Placement, ...], mesh: DeviceMesh
) -> ShardOrder | None:
"""
Try to convert _StridedShard placements to ShardOrder.
This is the inverse of `_convert_shard_order_to_StridedShard`. It reconstructs the shard
order by examining the split_factor of each _StridedShard and determining its position
in the execution order. If the _StridedShard configuration cannot be represented as a
valid ShardOrder (i.e., there's no shard order that produces the observed split_factors),
this function returns None.
Args:
placements: Tuple of Placement objects that may contain _StridedShard.
mesh: DeviceMesh containing the size information for each mesh dimension.
Returns:
ShardOrder if conversion is possible, None otherwise. For placements without
_StridedShard, returns the default shard order.
Algorithm:
1. If no _StridedShard in placements, return default shard order
2. Create an empty list for each tensor dimension to represent mesh dim ordering
3. Iterate through placements in reverse order (right to left):
- For each Shard/_StridedShard on a tensor dimension:
- Extract its split_factor (1 for Shard, split_factor for _StridedShard)
- Find the position in mesh_dims_order where accumulated_sf equals split_factor
- accumulated_sf is the product of mesh sizes of mesh dimensions that appear
earlier in mesh_dims_order (lower indices)
- Insert mesh_dim at the found position
4. If no valid position found for any split_factor, return None (unable to convert)
5. Construct ShardOrderEntry for each tensor dimension from mesh_dims_order
Example:
>>> # xdoctest: +SKIP("Requires DeviceMesh")
>>> # mesh = DeviceMesh([4, 3, 2]) # sizes: mesh[0]=4, mesh[1]=3, mesh[2]=2
>>> # placements = (_StridedShard(0, sf=2), _StridedShard(0, sf=2), Shard(0))
>>> # Process tensor_dim=0 from right to left:
>>> # - mesh_dim=2: Shard(0) with sf=1
>>> # Try position 0: accumulated_sf=1, matches! Insert at position 0
>>> # Current mesh_dims_order order: [2]
>>> # - mesh_dim=1: _StridedShard(0, sf=2) with sf=2
>>> # Try position 0: accumulated_sf=1, no match
>>> # Try position 1: accumulated_sf=1*mesh.size(2)=2, matches! Insert at position 1
>>> # Current mesh_dims_order order: [2, 1]
>>> # - mesh_dim=0: _StridedShard(0, sf=2) with sf=2
>>> # Try position 0: accumulated_sf=1, no match
>>> # Try position 1: accumulated_sf=1*mesh.size(2)=2, matches! Insert at position 1
>>> # Final mesh_dims_order order: [2, 0, 1]
>>> # Result: ShardOrder((ShardOrderEntry(tensor_dim=0, mesh_dims=(2, 0, 1)),))
>>> # This means: first shard on mesh_dim=2, then mesh_dim=0, then mesh_dim=1
Note:
This function validates that _StridedShard can be represented as a ShardOrder.
Not all _StridedShard configurations are valid - the split_factor must match
the product of mesh sizes in some execution order.
"""
if not any(isinstance(p, _StridedShard) for p in placements):
return DTensorSpec.compute_default_shard_order(placements)
max_tensor_dim = (
max([i.dim for i in placements if isinstance(i, Shard | _StridedShard)]) + 1
)
shard_order = []
tensor_dim_to_mesh_dims_order: list[list[int]] = [
[] for i in range(max_tensor_dim)
]
for mesh_dim in reversed(range(len(placements))):
cur_placement = placements[mesh_dim]
# _StridedShard may not be a subclass of Shard in the future, so write in this way:
if isinstance(cur_placement, Shard | _StridedShard):
tensor_dim = cur_placement.dim
mesh_dims_order = tensor_dim_to_mesh_dims_order[tensor_dim]
cur_sf = 1
if isinstance(cur_placement, _StridedShard):
cur_sf = cur_placement.split_factor
accumulated_sf = 1
find_order = False
for i in range(len(mesh_dims_order) + 1):
if accumulated_sf == cur_sf:
mesh_dims_order.insert(i, mesh_dim)
find_order = True
break
if i < len(mesh_dims_order):
accumulated_sf *= mesh.size(mesh_dims_order[i])
if not find_order:
# _StridedShard is not convertible to ShardOrder
return None
else:
if not isinstance(cur_placement, Replicate | Partial | MaskPartial):
raise ValueError(
f"Unsupported placement type {type(cur_placement)} encountered in "
f"{placements}; expected Replicate, Partial, or MaskPartial."
)
for tensor_dim in range(max_tensor_dim):
if len(tensor_dim_to_mesh_dims_order[tensor_dim]) > 0:
shard_order.append(
ShardOrderEntry(
tensor_dim=tensor_dim,
mesh_dims=tuple(tensor_dim_to_mesh_dims_order[tensor_dim]),
)
)
return tuple(shard_order)
def _verify_shard_order(self, shard_order: ShardOrder) -> None:
"""Verify that the shard_order is valid and matches the placements."""
total_shard = 0
if any(isinstance(p, _StridedShard) for p in self.placements):
return
prev_tensor_dim = -1
for entry in shard_order:
tensor_dim = entry.tensor_dim
mesh_dims = entry.mesh_dims
assert len(mesh_dims) > 0, f"shard_order {shard_order} has empty mesh dim"
assert tensor_dim >= 0, (
f"shard_order {shard_order} has invalid tensor dim {tensor_dim}"
)
assert tensor_dim > prev_tensor_dim, (
"tensor dim should be sorted in shard_order"
)
prev_tensor_dim = tensor_dim
total_shard += len(mesh_dims)
for mesh_dim in mesh_dims:
assert 0 <= mesh_dim < len(self.placements), (
f"shard_order {shard_order} has invalid mesh dim {mesh_dims}"
)
assert self.placements[mesh_dim] == Shard(tensor_dim), (
f"placement[{mesh_dim}] doesn't have a matching shard in shard_order"
)
assert total_shard == sum(1 for p in self.placements if isinstance(p, Shard))
def __setattr__(self, attr: str, value: Any) -> None:
if attr == "shard_order" and value is not None:
self._verify_shard_order(value)
super().__setattr__(attr, value)
# Make sure to recompute the hash in case any of the hashed attributes
# change (though we do not expect `mesh`, `placements` or `shard_order`
# to change)
if hasattr(self, "_hash") and attr in (
"mesh",
"placements",
"tensor_meta",
"shard_order",
):
self._hash = None
# This assert was triggered by buggy handling for dict outputs in some
# FX passes, where you accidentally iterate over a dict and try to put
# keys into TensorMeta. See https://github.com/pytorch/pytorch/issues/157919
if attr == "tensor_meta" and value is not None:
from torch.fx.passes.shape_prop import TensorMetadata
# TODO: the TensorMetadata arises from
# test/distributed/tensor/experimental/test_tp_transform.py::TensorParallelTest::test_tp_transform_e2e
# but I actually can't reproduce it, maybe it is also a bug!
assert isinstance(value, TensorMeta | TensorMetadata), value
def _hash_impl(self) -> int:
# hashing and equality check for DTensorSpec are used to cache the sharding
# propagation results. We only need to consider the mesh, placements, shape
# dtype and stride.
# Caveat: we need to keep this in mind and sync hash and eq if we add more
# fields to them.
if self.tensor_meta is not None:
return hash(
(
self.mesh,
self.placements,
self.shard_order,
self.tensor_meta.shape,
self.tensor_meta.stride,
self.tensor_meta.dtype,
)
)
return hash((self.mesh, self.placements, self.shard_order))
def __hash__(self) -> int:
# We lazily cache the spec to avoid recomputing the hash upon each
# use, where we make sure to update the hash when the `tensor_meta`
# changes by overriding `__setattr__`. This must be lazy so that Dynamo
# does not try to hash non-singleton `SymInt`s for the stride.
if self._hash is None:
self._hash = self._hash_impl()
return self._hash
def _check_equals(self, other: object, skip_shapes: bool = False) -> bool:
if not (
isinstance(other, DTensorSpec)
and self.mesh == other.mesh
and self.placements == other.placements
and self.shard_order == other.shard_order
):
return False
if self.tensor_meta is None or other.tensor_meta is None:
return self.tensor_meta == other.tensor_meta
if skip_shapes:
return self.tensor_meta.dtype == other.tensor_meta.dtype
return (
self.tensor_meta.shape == other.tensor_meta.shape # type: ignore[union-attr]
and self.tensor_meta.stride == other.tensor_meta.stride # type: ignore[union-attr]
and self.tensor_meta.dtype == other.tensor_meta.dtype # type: ignore[union-attr]
)
def __eq__(self, other: object, /) -> bool:
return self._check_equals(other)
def __str__(self) -> str:
"""
human readable representation of the DTensorSpec
"""
placement_str = self.format_shard_order_str(self.placements, self.shard_order)
if self.tensor_meta is not None:
tensor_shape = _stringify_shape(self.tensor_meta.shape)
tensor_dtype = dtype_abbrs[self.tensor_meta.dtype]
else:
tensor_shape = "unknown shape"
tensor_dtype = "unknown dtype"
return f"Spec({tensor_dtype}{tensor_shape}({placement_str}))"
@staticmethod
def is_default_device_order(shard_order: ShardOrder) -> bool:
"""
Check if the device order is the default left-to-right order.
"""
for entry in shard_order:
mesh_dims = entry.mesh_dims
is_increasing = all(
prev < nxt for prev, nxt in itertools.pairwise(mesh_dims)
)
if not is_increasing:
return False
return True
@staticmethod
def format_shard_order_str(
placements: tuple[Placement, ...],
shard_order: ShardOrder | None = None,
) -> str:
"""
Format DTensor sharding information as a human-readable string.
This method formats the sharding pattern in mesh-centric order, showing the placement
for each mesh dimension sequentially. When a tensor dimension is sharded across multiple
mesh dimensions, the order index indicates the execution sequence of the sharding operations.
Args:
placements: Tuple of placement objects for each mesh dimension.
shard_order: Optional ShardOrder specifying the sharding order.
Returns:
String representation of the sharding pattern in mesh-centric format.
Example:
For a 3D tensor on a 2x2x2x2 mesh (16 devices) with::
placements = [Partial(), Shard(1), Shard(1), Replicate()]
shard_order = (ShardOrderEntry(tensor_dim=1, mesh_dims=(2, 1)),)
Mesh configuration:
- mesh_dim_0: Partial reduction (sum)
- mesh_dim_1: Shard tensor dimension 1 (executed second, order index 1)
- mesh_dim_2: Shard tensor dimension 1 (executed first, order index 0)
- mesh_dim_3: Replicate
Output: ``"PS(1)[1]S(1)[0]R"``
Explanation:
- ``P``: mesh dimension 0 has partial reduction
- ``S(1)[1]``: mesh dimension 1 shards tensor dimension 1 (order index 1 means second)
- ``S(1)[0]``: mesh dimension 2 shards tensor dimension 1 (order index 0 means first)
- ``R``: mesh dimension 3 replicates
The format follows mesh dimension order (0, 1, 2, 3), and when a tensor dimension
is sharded across multiple mesh dimensions, the bracketed index shows the execution
order: ``[0]`` is executed first, ``[1]`` is executed second, etc.
"""
out_str = ""
# native dtensor-style sharding representation: map from mesh
# dim to tensor dim
for mesh_dim, placement in enumerate(placements):
if isinstance(placement, Shard):
if shard_order is not None:
for entry in shard_order:
tensor_dim = entry.tensor_dim
mesh_dims = entry.mesh_dims
if placement.dim == tensor_dim:
assert mesh_dim in mesh_dims
if len(mesh_dims) > 1:
out_str += f"{placement}[{mesh_dims.index(mesh_dim)}]"
else:
# no need to show device order if the tensor dim is
# only sharded in one mesh dim
out_str += str(placement)
break
else:
out_str += str(placement)
else:
out_str += str(placement)
return out_str
@property
def shape(self) -> torch.Size:
if self.tensor_meta is None:
raise ValueError("tensor_meta is not set")
return self.tensor_meta.shape
@property
def stride(self) -> tuple[int, ...]:
if self.tensor_meta is None:
raise ValueError("tensor_meta is not set")
return self.tensor_meta.stride
@property
def ndim(self) -> int:
if self.tensor_meta is None:
raise ValueError("tensor_meta is not set")
return len(self.tensor_meta.shape)
@property
def num_shards(self) -> int:
num_shards = 1
for i, placement in enumerate(self.placements):
if placement.is_shard():
num_shards *= self.mesh.size(i)
return num_shards
@property
def device_mesh(self) -> DeviceMesh:
# simple aliasing for the mesh field, make some
# checks that mixes DTensor/DTensorSpec easier
return self.mesh
@property
def dim_map(self) -> list[int]:
"""
dim_map is a property we derive from `placements` of
the distributed tensor. It simply return a list of ints
where dim_map[i] denotes the sharding mapping to the mesh
dimension, and len(dim_map) == dist_tensor.ndim
dim_map[i] = -1: means tensor dim i replicate on mesh
dim_map[i] = j: means tensor dim i shard on mesh dim j
For example, we have a dist tensor that have the shape of
[18, 20, 30], and device_mesh([0, 1, 2, 3]), placements:
[Shard(1)], the dim_map of this placement would be:
[-1, 0, -1]. This representation is pretty helpful during
sharding propagation where we could know exactly each
tensor dimension is sharded or not.
Note that if placements contains `_Partial`, we have to
explicitly deal with it, so that when we create a DTensorSpec
with dim_map, we could properly record the pending sums.
"""
# dims mapping of dist tensor sharding
# return size of tensor ndim, -1 represent replicate
# and int >=0 represent shard on that device mesh dim
r = [-1] * self.ndim
for i, placement in enumerate(self.placements):
if placement.is_shard():
shard_dim = cast(Shard, placement).dim
if r[shard_dim] > -1:
raise ValueError(
f"Tensor dim {shard_dim} is already sharded on mesh dim {r[shard_dim]},"
" DTensor operator implementation does not support things like hybrid"
" sharding strategies yet (i.e. [Shard(0), Shard(0)])"
)
r[shard_dim] = i
return r
@property
def num_shards_map(self) -> list[int]:
"""
dim_map is a property we derive from `placements` of
the distributed tensor. Unlike `dim_map`, `num_shards_map`
denotes how many shards each tensor dim has. Like `dim_map`:
len(num_shards_map) == dist_tensor.ndim
num_shards_map[i] = 1: means tensor dim i is not sharded
num_shards_map[i] = j: means tensor dim i has j shards in total
For example, we have a dist tensor of shape [18, 20, 30],
a device_mesh ([[0, 1, 2, 3], [4, 5, 6, 7]]), and placements
([Shard(1), Shard(0)]), the num_shards_map of this distributed tensor
would be: [4, 2, 1].
"""
r = [1] * self.ndim
for i, placement in enumerate(self.placements):
if placement.is_shard():
shard_dim = cast(Shard, placement).dim
r[shard_dim] *= self.mesh.size(i)
return r
@property
def sums(self) -> list[int]:
"""
sums is a property we derive from `placements` of the
distributed tensor. It simply return a list of ints where
sums[i] denotes the pending sum (partial) on mesh dim i
"""
return [
idx
for idx, placement in enumerate(self.placements)
if placement.is_partial()
]
@classmethod
def from_dim_map(
cls,
mesh: DeviceMesh,
dim_map: list[int],
sums: list[int],
tensor_meta: TensorMeta | None = None,
) -> "DTensorSpec":
"""
Construct a DTensorSpec from dim_map list and pending sum.
Args:
mesh (class:`DeviceMesh`): device mesh to be used in the DTensorSpec
dim_map (List[int]): a list of integer that represents sharding on each
tensor dimension, see `dim_map` property doc for details
sums (List[int]): a list of integer that represents the dist tensor have
pending sum on which device mesh dimension.
tensor meta (TensorMeta): DTensor metadata
Return:
a class:`DTensorSpec` object
"""
# by default replicate on device mesh dims
placements: list[Placement] = [Replicate() for _ in range(mesh.ndim)]
# find all mesh dims that need pending reductions
for s in sums:
placements[s] = Partial()
for i, m in enumerate(dim_map):
if m >= 0:
placement = placements[m]
if placement.is_shard():
placement = cast(Shard, placement)
raise RuntimeError(
f"DeviceMesh dimension can't be mapped to two dimension of the same tensor: {i} and {placement.dim}"
)
elif placement.is_partial():
raise RuntimeError(
f"DeviceMesh dimension {m} cannot be both shard and partial!"
)
placements[m] = Shard(i)
return cls(mesh, tuple(placements), tensor_meta=tensor_meta)
def is_replicated(self) -> bool:
"""
return True if the current DTensorSpec replicates on all mesh dims (devices)
"""
return all(placement.is_replicate() for placement in self.placements)
def is_sharded(self) -> bool:
"""
return True if the current DTensorSpec is sharded on any mesh dims (devices)
"""
return any(placement.is_shard() for placement in self.placements)
def shallow_copy_with_tensor_meta(
self, tensor_meta: TensorMeta | None
) -> "DTensorSpec":
"""
Shallow copy the DTensorSpec with a new tensor_meta.
"""
assert tensor_meta is not None, "shallow copy with no tensor_meta!"
return DTensorSpec(
self.mesh,
self.placements,
tensor_meta=tensor_meta,
)
| DTensorSpec |
python | ipython__ipython | tests/test_run.py | {
"start": 3813,
"end": 6108
} | class ____(tt.TempFileMixin):
def setUp(self):
content = "a = [1,2,3]\nb = 1"
self.mktmp(content)
def run_tmpfile(self):
_ip = get_ipython()
# This fails on Windows if self.tmpfile.name has spaces or "~" in it.
# See below and ticket https://bugs.launchpad.net/bugs/366353
_ip.run_line_magic("run", self.fname)
def run_tmpfile_p(self):
_ip = get_ipython()
# This fails on Windows if self.tmpfile.name has spaces or "~" in it.
# See below and ticket https://bugs.launchpad.net/bugs/366353
_ip.run_line_magic("run", "-p %s" % self.fname)
def test_builtins_id(self):
"""Check that %run doesn't damage __builtins__"""
_ip = get_ipython()
# Test that the id of __builtins__ is not modified by %run
bid1 = id(_ip.user_ns["__builtins__"])
self.run_tmpfile()
bid2 = id(_ip.user_ns["__builtins__"])
assert bid1 == bid2
def test_builtins_type(self):
"""Check that the type of __builtins__ doesn't change with %run.
However, the above could pass if __builtins__ was already modified to
be a dict (it should be a module) by a previous use of %run. So we
also check explicitly that it really is a module:
"""
_ip = get_ipython()
self.run_tmpfile()
assert type(_ip.user_ns["__builtins__"]) == type(sys)
def test_run_profile(self):
"""Test that the option -p, which invokes the profiler, do not
crash by invoking execfile"""
self.run_tmpfile_p()
def test_run_debug_twice(self):
# https://github.com/ipython/ipython/issues/10028
_ip = get_ipython()
with tt.fake_input(["c"]):
_ip.run_line_magic("run", "-d %s" % self.fname)
with tt.fake_input(["c"]):
_ip.run_line_magic("run", "-d %s" % self.fname)
def test_run_debug_twice_with_breakpoint(self):
"""Make a valid python temp file."""
_ip = get_ipython()
with tt.fake_input(["b 2", "c", "c"]):
_ip.run_line_magic("run", "-d %s" % self.fname)
with tt.fake_input(["c"]):
with tt.AssertNotPrints("KeyError"):
_ip.run_line_magic("run", "-d %s" % self.fname)
| TestMagicRunPass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/lambda9.py | {
"start": 514,
"end": 880
} | class ____: ...
x1 = Flow[Data]().map(lambda aa: _get_date(reveal_type(aa, expected_text="Data")))
reveal_type(x1, expected_text="Flow[str]")
x2 = x1.map(lambda bb: reveal_type(bb, expected_text="str"))
reveal_type(x2, expected_text="Flow[str]")
x3 = x2.map(lambda cc: "any value")
reveal_type(x3, expected_text="Flow[str]")
def _get_date(d: Data) -> str: ...
| Data |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2.py | {
"start": 1883,
"end": 14935
} | class ____:
def test_project(self, cloud_run_worker_v2_job_config):
assert cloud_run_worker_v2_job_config.project == "my_project"
def test_job_name(self, cloud_run_worker_v2_job_config):
assert cloud_run_worker_v2_job_config.job_name[:-33] == "my-job-name"
def test_job_name_is_slug(self, cloud_run_worker_v2_job_config_noncompliant_name):
assert cloud_run_worker_v2_job_config_noncompliant_name.job_name[
:-33
] == slugify_name("MY_JOB_NAME")
def test_job_name_different_after_retry(self, cloud_run_worker_v2_job_config):
job_name_1 = cloud_run_worker_v2_job_config.job_name
cloud_run_worker_v2_job_config._job_name = None
job_name_2 = cloud_run_worker_v2_job_config.job_name
assert job_name_1[:-33] == job_name_2[:-33]
assert job_name_1 != job_name_2
def test_populate_timeout(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._populate_timeout()
assert (
cloud_run_worker_v2_job_config.job_body["template"]["template"]["timeout"]
== "86400s"
)
def test_populate_env(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._populate_env()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"] == [
{"name": "ENV1", "value": "VALUE1"},
{"name": "ENV2", "value": "VALUE2"},
]
def test_populate_env_with_secrets(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config.env_from_secrets = {
"SECRET_ENV1": SecretKeySelector(secret="SECRET1", version="latest")
}
cloud_run_worker_v2_job_config._populate_env()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"] == [
{"name": "ENV1", "value": "VALUE1"},
{"name": "ENV2", "value": "VALUE2"},
{
"name": "SECRET_ENV1",
"valueSource": {
"secretKeyRef": {"secret": "SECRET1", "version": "latest"}
},
},
]
def test_populate_env_with_existing_envs(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config.job_body["template"]["template"]["containers"][
0
]["env"] = [{"name": "ENV0", "value": "VALUE0"}]
cloud_run_worker_v2_job_config.env_from_secrets = {
"SECRET_ENV1": SecretKeySelector(secret="SECRET1", version="latest")
}
cloud_run_worker_v2_job_config._populate_env()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"] == [
{"name": "ENV0", "value": "VALUE0"},
{"name": "ENV1", "value": "VALUE1"},
{"name": "ENV2", "value": "VALUE2"},
{
"name": "SECRET_ENV1",
"valueSource": {
"secretKeyRef": {"secret": "SECRET1", "version": "latest"}
},
},
]
def test_populate_image_if_not_present(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._populate_image_if_not_present()
assert (
cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["image"]
== f"docker.io/{get_prefect_image_name()}"
)
def test_populate_or_format_command(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._populate_or_format_command()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["command"] == ["prefect", "flow-run", "execute"]
def test_format_args_if_present(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._format_args_if_present()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["args"] == ["-m", "prefect.engine"]
@pytest.mark.parametrize("vpc_access", [{"connector": None}, {}, None])
def test_remove_vpc_access_if_connector_unset(
self, cloud_run_worker_v2_job_config, vpc_access
):
cloud_run_worker_v2_job_config.job_body["template"]["template"]["vpcAccess"] = (
vpc_access
)
cloud_run_worker_v2_job_config._remove_vpc_access_if_unset()
assert (
"vpcAccess"
not in cloud_run_worker_v2_job_config.job_body["template"]["template"]
)
def test_remove_vpc_access_originally_not_present(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.job_body["template"]["template"].pop("vpcAccess")
cloud_run_worker_v2_job_config._remove_vpc_access_if_unset()
assert (
"vpcAccess"
not in cloud_run_worker_v2_job_config.job_body["template"]["template"]
)
def test_vpc_access_left_alone_if_connector_set(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.job_body["template"]["template"]["vpcAccess"][
"connector"
] = "projects/my_project/locations/us-central1/connectors/my-connector"
cloud_run_worker_v2_job_config._remove_vpc_access_if_unset()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"vpcAccess"
] == {
"connector": "projects/my_project/locations/us-central1/connectors/my-connector" # noqa E501
}
def test_vpc_access_left_alone_if_network_config_set(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.job_body["template"]["template"]["vpcAccess"][
"networkInterfaces"
] = [{"network": "projects/my_project/global/networks/my-network"}]
cloud_run_worker_v2_job_config._remove_vpc_access_if_unset()
assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
"vpcAccess"
] == {
"connector": None,
"networkInterfaces": [
{"network": "projects/my_project/global/networks/my-network"}
],
}
def test_configure_cloudsql_volumes_no_instances(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.cloudsql_instances = []
cloud_run_worker_v2_job_config._configure_cloudsql_volumes()
template = cloud_run_worker_v2_job_config.job_body["template"]["template"]
assert "volumes" not in template
assert "volumeMounts" not in template["containers"][0]
def test_configure_cloudsql_volumes_preserves_existing_volumes(
self, cloud_run_worker_v2_job_config
):
template = cloud_run_worker_v2_job_config.job_body["template"]["template"]
template["volumes"] = [{"name": "existing-volume", "emptyDir": {}}]
template["containers"][0]["volumeMounts"] = [
{"name": "existing-volume", "mountPath": "/existing"}
]
cloud_run_worker_v2_job_config.cloudsql_instances = ["project:region:instance1"]
cloud_run_worker_v2_job_config._configure_cloudsql_volumes()
assert len(template["volumes"]) == 2
assert template["volumes"][0] == {"name": "existing-volume", "emptyDir": {}}
assert template["volumes"][1] == {
"name": "cloudsql",
"cloudSqlInstance": {"instances": ["project:region:instance1"]},
}
assert len(template["containers"][0]["volumeMounts"]) == 2
assert template["containers"][0]["volumeMounts"][0] == {
"name": "existing-volume",
"mountPath": "/existing",
}
assert template["containers"][0]["volumeMounts"][1] == {
"name": "cloudsql",
"mountPath": "/cloudsql",
}
def test_prepare_for_flow_run_configures_cloudsql(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.cloudsql_instances = ["project:region:instance1"]
class MockFlowRun:
id = "test-id"
name = "test-run"
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=MockFlowRun(), deployment=None, flow=None
)
template = cloud_run_worker_v2_job_config.job_body["template"]["template"]
assert any(
vol["name"] == "cloudsql"
and vol["cloudSqlInstance"]["instances"] == ["project:region:instance1"]
for vol in template["volumes"]
)
assert any(
mount["name"] == "cloudsql" and mount["mountPath"] == "/cloudsql"
for mount in template["containers"][0]["volumeMounts"]
)
def test_populate_env_with_prefect_api_key_secret(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
def test_populate_env_with_prefect_api_auth_string_secret(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
def test_populate_env_with_both_prefect_secrets(
self, cloud_run_worker_v2_job_config
):
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
def test_populate_env_with_all_secret_types(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config.env_from_secrets = {
"SECRET_ENV1": SecretKeySelector(secret="SECRET1", version="latest")
}
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
assert {
"name": "SECRET_ENV1",
"valueSource": {"secretKeyRef": {"secret": "SECRET1", "version": "latest"}},
} in env_vars
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
| TestCloudRunWorkerJobV2Configuration |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 6343,
"end": 6543
} | class ____(Message):
"""
Too many expressions in an assignment with star-unpacking
"""
message = 'too many expressions in star-unpacking assignment'
| TooManyExpressionsInStarredAssignment |
python | pymupdf__PyMuPDF | wdev.py | {
"start": 9458,
"end": 14888
} | class ____:
'''
Windows only. Information about installed Python with specific word size
and version. Defaults to the currently-running Python.
Members:
.path:
Path of python binary.
.version:
`{major}.{minor}`, e.g. `3.9` or `3.11`. Same as `version` passed
to `__init__()` if not None, otherwise the inferred version.
.include:
Python include path.
.cpu:
A `WindowsCpu` instance, same as `cpu` passed to `__init__()` if
not None, otherwise the inferred cpu.
We parse the output from `py -0p` to find all available python
installations.
'''
def __init__( self, cpu=None, version=None, verbose=True):
'''
Args:
cpu:
A WindowsCpu instance. If None, we use whatever we are running
on.
version:
Two-digit Python version as a string such as `3.8`. If None we
use current Python's version.
verbose:
If true we show diagnostics.
'''
if cpu is None:
cpu = WindowsCpu(_cpu_name())
if version is None:
version = '.'.join(platform.python_version().split('.')[:2])
_log(f'Looking for Python {version=} {cpu.bits=}.')
if '.'.join(platform.python_version().split('.')[:2]) == version:
# Current python matches, so use it directly. This avoids problems
# on Github where experimental python-3.13 was not available via
# `py`, and is kept here in case a similar problems happens with
# future Python versions.
_log(f'{cpu=} {version=}: using {sys.executable=}.')
self.path = sys.executable
self.version = version
self.cpu = cpu
self.include = sysconfig.get_path('include')
else:
command = 'py -0p'
if verbose:
_log(f'{cpu=} {version=}: Running: {command}')
text = subprocess.check_output( command, shell=True, text=True)
for line in text.split('\n'):
#_log( f' {line}')
if m := re.match( '^ *-V:([0-9.]+)(-32)? ([*])? +(.+)$', line):
version2 = m.group(1)
bits = 32 if m.group(2) else 64
current = m.group(3)
path = m.group(4).strip()
elif m := re.match( '^ *-([0-9.]+)-((32)|(64)) +(.+)$', line):
version2 = m.group(1)
bits = int(m.group(2))
path = m.group(5).strip()
else:
if verbose:
_log( f'No match for {line=}')
continue
if verbose:
_log( f'{version2=} {bits=} {path=} from {line=}.')
if bits != cpu.bits or version2 != version:
continue
root = os.path.dirname(path)
if not os.path.exists(path):
# Sometimes it seems that the specified .../python.exe does not exist,
# and we have to change it to .../python<version>.exe.
#
assert path.endswith('.exe'), f'path={path!r}'
path2 = f'{path[:-4]}{version}.exe'
_log( f'Python {path!r} does not exist; changed to: {path2!r}')
assert os.path.exists( path2)
path = path2
self.path = path
self.version = version
self.cpu = cpu
command = f'{self.path} -c "import sysconfig; print(sysconfig.get_path(\'include\'))"'
_log(f'Finding Python include path by running {command=}.')
self.include = subprocess.check_output(command, shell=True, text=True).strip()
_log(f'Python include path is {self.include=}.')
#_log( f'pipcl.py:WindowsPython():\n{self.description_ml(" ")}')
break
else:
_log(f'Failed to find python matching cpu={cpu}.')
_log(f'Output from {command!r} was:\n{text}')
raise Exception( f'Failed to find python matching cpu={cpu} {version=}.')
# Oddly there doesn't seem to be a
# `sysconfig.get_path('libs')`, but it seems to be next
# to `includes`:
self.libs = os.path.abspath(f'{self.include}/../libs')
_log( f'WindowsPython:\n{self.description_ml(" ")}')
def description_ml(self, indent=''):
ret = textwrap.dedent(f'''
path: {self.path}
version: {self.version}
cpu: {self.cpu}
include: {self.include}
libs: {self.libs}
''')
return textwrap.indent( ret, indent)
def __repr__(self):
return f'path={self.path!r} version={self.version!r} cpu={self.cpu!r} include={self.include!r} libs={self.libs!r}'
# Internal helpers.
#
def _cpu_name():
'''
Returns `x32` or `x64` depending on Python build.
'''
#log(f'sys.maxsize={hex(sys.maxsize)}')
return f'x{32 if sys.maxsize == 2**31 - 1 else 64}'
def _log(text='', caller=1):
'''
Logs lines with prefix.
'''
pipcl.log1(text, caller+1)
| WindowsPython |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 21441,
"end": 27904
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.feature_encoder = SpeechT5FeatureEncoder(config)
self.feature_projection = SpeechT5FeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.pos_conv_embed = SpeechT5PositionalConvEmbedding(config)
self.pos_sinusoidal_embed = SpeechT5SinusoidalPositionalEmbedding(
config.max_speech_positions + config.pad_token_id + 1,
config.hidden_size,
config.pad_token_id,
)
def freeze_feature_encoder(self):
self.feature_encoder._freeze_parameters()
def forward(
self,
input_values: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
):
extract_features = self.feature_encoder(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1],
attention_mask,
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
positional_conv_embedding = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + positional_conv_embedding
if attention_mask is not None:
padding_mask = attention_mask.ne(1).long()
else:
padding_mask = torch.zeros(hidden_states.shape[:2], dtype=torch.long, device=hidden_states.device)
positional_sinusoidal_embeddings = self.pos_sinusoidal_embed(padding_mask)
hidden_states = hidden_states + positional_sinusoidal_embeddings
return hidden_states, attention_mask
# Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feature_vector_attention_mask
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
# Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feat_extract_output_lengths
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
| SpeechT5SpeechEncoderPrenet |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 5136,
"end": 5804
} | class ____(PreTrainedModel):
config_class = ErnieConfig
base_model_prefix = "ernie"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ErnieLayer,
"attentions": ErnieSelfAttention,
"cross_attentions": ErnieCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, ErnieLMPredictionHead):
init.zeros_(module.bias)
| ErniePreTrainedModel |
python | pydata__xarray | xarray/core/coordinates.py | {
"start": 37204,
"end": 45724
} | class ____(Coordinates, Generic[T_DataArray]):
"""Dictionary like container for DataArray coordinates (variables + indexes).
This collection can be passed directly to the :py:class:`~xarray.Dataset`
and :py:class:`~xarray.DataArray` constructors via their `coords` argument.
This will add both the coordinates variables and their index.
"""
_data: T_DataArray
__slots__ = ("_data",)
def __init__(self, dataarray: T_DataArray) -> None:
self._data = dataarray
@property
def dims(self) -> tuple[Hashable, ...]:
return self._data.dims
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from coordinate names to dtypes.
Cannot be modified directly, but is updated when adding new variables.
See Also
--------
DataArray.dtype
"""
return Frozen({n: v.dtype for n, v in self._data._coords.items()})
@property
def _names(self) -> set[Hashable]:
return set(self._data._coords)
def __getitem__(self, key: Hashable) -> T_DataArray:
return self._data._getitem_coord(key)
def _update_coords(
self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
) -> None:
validate_dataarray_coords(
self._data.shape, Coordinates._construct_direct(coords, indexes), self.dims
)
self._data._coords = coords
self._data._indexes = indexes
def _drop_coords(self, coord_names):
# should drop indexed coordinates only
for name in coord_names:
del self._data._coords[name]
del self._data._indexes[name]
@property
def variables(self):
return Frozen(self._data._coords)
def to_dataset(self) -> Dataset:
from xarray.core.dataset import Dataset
coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}
indexes = dict(self._data.xindexes)
return Dataset._construct_direct(coords, set(coords), indexes=indexes)
def __delitem__(self, key: Hashable) -> None:
if key not in self:
raise KeyError(
f"{key!r} is not in coordinate variables {tuple(self.keys())}"
)
assert_no_index_corrupted(self._data.xindexes, {key})
del self._data._coords[key]
if key in self._data._indexes:
del self._data._indexes[key]
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython."""
return self._data._ipython_key_completions_()
def drop_indexed_coords(
coords_to_drop: set[Hashable], coords: Coordinates
) -> Coordinates:
"""Drop indexed coordinates associated with coordinates in coords_to_drop.
This will raise an error in case it corrupts any passed index and its
coordinate variables.
"""
new_variables = dict(coords.variables)
new_indexes = dict(coords.xindexes)
for idx, idx_coords in coords.xindexes.group_by_index():
idx_drop_coords = set(idx_coords) & coords_to_drop
# special case for pandas multi-index: still allow but deprecate
# dropping only its dimension coordinate.
# TODO: remove when removing PandasMultiIndex's dimension coordinate.
if isinstance(idx, PandasMultiIndex) and idx_drop_coords == {idx.dim}:
idx_drop_coords.update(idx.index.names)
emit_user_level_warning(
f"updating coordinate {idx.dim!r}, which is a PandasMultiIndex, would leave "
f"the multi-index level coordinates {list(idx.index.names)!r} in an inconsistent state. "
f"This will raise an error in the future. Use `.drop_vars({list(idx_coords)!r})` "
"to drop the coordinates' values before assigning new coordinate values.",
FutureWarning,
)
elif idx_drop_coords and len(idx_drop_coords) != len(idx_coords):
idx_drop_coords_str = ", ".join(f"{k!r}" for k in idx_drop_coords)
idx_coords_str = ", ".join(f"{k!r}" for k in idx_coords)
raise ValueError(
f"cannot drop or update coordinate(s) {idx_drop_coords_str}, which would corrupt "
f"the following index built from coordinates {idx_coords_str}:\n"
f"{idx}"
)
for k in idx_drop_coords:
del new_variables[k]
del new_indexes[k]
return Coordinates._construct_direct(coords=new_variables, indexes=new_indexes)
def assert_coordinate_consistent(obj: T_Xarray, coords: Mapping[Any, Variable]) -> None:
"""Make sure the dimension coordinate of obj is consistent with coords.
obj: DataArray or Dataset
coords: Dict-like of variables
"""
for k in obj.dims:
# make sure there are no conflict in dimension coordinates
if k in coords and k in obj.coords and not coords[k].equals(obj[k].variable):
raise IndexError(
f"dimension coordinate {k!r} conflicts between "
f"indexed and indexing objects:\n{obj[k]}\nvs.\n{coords[k]}"
)
def create_coords_with_default_indexes(
coords: Mapping[Any, Any], data_vars: DataVars | None = None
) -> Coordinates:
"""Returns a Coordinates object from a mapping of coordinates (arbitrary objects).
Create default (pandas) indexes for each of the input dimension coordinates.
Extract coordinates from each input DataArray.
"""
# Note: data_vars is needed here only because a pd.MultiIndex object
# can be promoted as coordinates.
# TODO: It won't be relevant anymore when this behavior will be dropped
# in favor of the more explicit ``Coordinates.from_pandas_multiindex()``.
from xarray.core.dataarray import DataArray
all_variables = dict(coords)
if data_vars is not None:
all_variables.update(data_vars)
indexes: dict[Hashable, Index] = {}
variables: dict[Hashable, Variable] = {}
# promote any pandas multi-index in data_vars as coordinates
coords_promoted: dict[Hashable, Any] = {}
pd_mindex_keys: list[Hashable] = []
for k, v in all_variables.items():
if isinstance(v, pd.MultiIndex):
coords_promoted[k] = v
pd_mindex_keys.append(k)
elif k in coords:
coords_promoted[k] = v
if pd_mindex_keys:
pd_mindex_keys_fmt = ",".join([f"'{k}'" for k in pd_mindex_keys])
emit_user_level_warning(
f"the `pandas.MultiIndex` object(s) passed as {pd_mindex_keys_fmt} coordinate(s) or "
"data variable(s) will no longer be implicitly promoted and wrapped into "
"multiple indexed coordinates in the future "
"(i.e., one coordinate for each multi-index level + one dimension coordinate). "
"If you want to keep this behavior, you need to first wrap it explicitly using "
"`mindex_coords = xarray.Coordinates.from_pandas_multiindex(mindex_obj, 'dim')` "
"and pass it as coordinates, e.g., `xarray.Dataset(coords=mindex_coords)`, "
"`dataset.assign_coords(mindex_coords)` or `dataarray.assign_coords(mindex_coords)`.",
FutureWarning,
)
dataarray_coords: list[DataArrayCoordinates] = []
for name, obj in coords_promoted.items():
if isinstance(obj, DataArray):
dataarray_coords.append(obj.coords)
variable = as_variable(obj, name=name, auto_convert=False)
if variable.dims == (name,):
# still needed to convert to IndexVariable first due to some
# pandas multi-index edge cases.
variable = variable.to_index_variable()
idx, idx_vars = create_default_index_implicit(variable, all_variables)
indexes.update(dict.fromkeys(idx_vars, idx))
variables.update(idx_vars)
all_variables.update(idx_vars)
else:
variables[name] = variable
new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes)
# extract and merge coordinates and indexes from input DataArrays
if dataarray_coords:
prioritized = {k: (v, indexes.get(k)) for k, v in variables.items()}
variables, indexes = merge_coordinates_without_align(
dataarray_coords + [new_coords],
prioritized=prioritized,
)
new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes)
return new_coords
| DataArrayCoordinates |
python | realpython__materials | python-property/rectangle.py | {
"start": 0,
"end": 187
} | class ____:
def __init__(self, width, height):
self.width = width
self.height = height
@property
def area(self):
return self.width * self.height
| Rectangle |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 7101,
"end": 8080
} | class ____:
def test_repr(self):
result = self.arrow.__repr__()
assert result == f"<Arrow [{self.arrow._datetime.isoformat()}]>"
def test_str(self):
result = self.arrow.__str__()
assert result == self.arrow._datetime.isoformat()
def test_hash(self):
result = self.arrow.__hash__()
assert result == self.arrow._datetime.__hash__()
def test_format(self):
result = f"{self.arrow:YYYY-MM-DD}"
assert result == "2013-02-03"
def test_bare_format(self):
result = self.arrow.format()
assert result == "2013-02-03 12:30:45+00:00"
def test_format_no_format_string(self):
result = f"{self.arrow}"
assert result == str(self.arrow)
def test_clone(self):
result = self.arrow.clone()
assert result is not self.arrow
assert result._datetime == self.arrow._datetime
@pytest.mark.usefixtures("time_2013_01_01")
| TestTestArrowRepresentation |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py | {
"start": 826,
"end": 17435
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 384):
The dimensionality of the hidden layers.
vocab_size (`int`, *optional*, defaults to 78):
The size of the vocabulary.
num_mel_bins (`int`, *optional*, defaults to 80):
The number of mel filters used in the filter bank.
encoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the encoder.
encoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the encoder.
encoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the encoder.
decoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the decoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the decoder.
decoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the decoder.
speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
The number of layers in the post-net of the speech decoder.
speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
The number of units in the post-net layers of the speech decoder.
speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
The kernel size in the post-net of the speech decoder.
positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolution kernel used in the position-wise layer.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before encoder layers.
decoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before decoder layers.
encoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after encoder layers.
decoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after decoder layers.
reduction_factor (`int`, *optional*, defaults to 1):
The factor by which the speech frame rate is reduced.
speaking_speed (`float`, *optional*, defaults to 1.0):
The speed of the speech produced.
use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use macaron style in the conformer.
use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use convolutional neural networks in the conformer.
encoder_kernel_size (`int`, *optional*, defaults to 7):
The kernel size used in the encoder.
decoder_kernel_size (`int`, *optional*, defaults to 31):
The kernel size used in the decoder.
duration_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the duration predictor.
duration_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the duration predictor.
duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the duration predictor.
energy_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the energy predictor.
energy_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the energy predictor.
energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the energy predictor.
energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the energy predictor.
energy_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the energy embed layer.
energy_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the energy embed layer.
stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
Specifies whether to stop gradients from the energy predictor.
pitch_predictor_layers (`int`, *optional*, defaults to 5):
The number of layers in the pitch predictor.
pitch_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the pitch predictor.
pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
The kernel size used in the pitch predictor.
pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the pitch predictor.
pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the pitch embed layer.
pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the pitch embed layer.
stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
Specifies whether to stop gradients from the pitch predictor.
encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the encoder.
encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the encoder.
encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the encoder.
decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the decoder.
decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the decoder.
decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the decoder.
duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the duration predictor.
speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the speech decoder postnet.
max_source_positions (`int`, *optional*, defaults to 5000):
if `"relative"` position embeddings are used, defines the maximum source input positions.
use_masking (`bool`, *optional*, defaults to `True`):
Specifies whether to use masking in the model.
use_weighted_masking (`bool`, *optional*, defaults to `False`):
Specifies whether to use weighted masking in the model.
num_speakers (`int`, *optional*):
Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
speaker id embedding layer.
num_languages (`int`, *optional*):
Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
language id embedding layer.
speaker_embed_dim (`int`, *optional*):
Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Specifies whether the model is an encoder-decoder.
convolution_bias (`bool`, *optional*, defaults to `True`):
Specifies whether to use bias in convolutions of the conformer's convolution module.
Example:
```python
>>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
>>> # Initializing a FastSpeech2Conformer style configuration
>>> configuration = FastSpeech2ConformerConfig()
>>> # Initializing a model from the FastSpeech2Conformer style configuration
>>> model = FastSpeech2ConformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "fastspeech2_conformer"
base_config_key = "model_config"
attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"}
def __init__(
self,
hidden_size=384,
vocab_size=78,
num_mel_bins=80,
encoder_num_attention_heads=2,
encoder_layers=4,
encoder_linear_units=1536,
decoder_layers=4,
decoder_num_attention_heads=2,
decoder_linear_units=1536,
speech_decoder_postnet_layers=5,
speech_decoder_postnet_units=256,
speech_decoder_postnet_kernel=5,
positionwise_conv_kernel_size=3,
encoder_normalize_before=False,
decoder_normalize_before=False,
encoder_concat_after=False,
decoder_concat_after=False,
reduction_factor=1,
speaking_speed=1.0,
use_macaron_style_in_conformer=True,
use_cnn_in_conformer=True,
encoder_kernel_size=7,
decoder_kernel_size=31,
duration_predictor_layers=2,
duration_predictor_channels=256,
duration_predictor_kernel_size=3,
energy_predictor_layers=2,
energy_predictor_channels=256,
energy_predictor_kernel_size=3,
energy_predictor_dropout=0.5,
energy_embed_kernel_size=1,
energy_embed_dropout=0.0,
stop_gradient_from_energy_predictor=False,
pitch_predictor_layers=5,
pitch_predictor_channels=256,
pitch_predictor_kernel_size=5,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=1,
pitch_embed_dropout=0.0,
stop_gradient_from_pitch_predictor=True,
encoder_dropout_rate=0.2,
encoder_positional_dropout_rate=0.2,
encoder_attention_dropout_rate=0.2,
decoder_dropout_rate=0.2,
decoder_positional_dropout_rate=0.2,
decoder_attention_dropout_rate=0.2,
duration_predictor_dropout_rate=0.2,
speech_decoder_postnet_dropout=0.5,
max_source_positions=5000,
use_masking=True,
use_weighted_masking=False,
num_speakers=None,
num_languages=None,
speaker_embed_dim=None,
is_encoder_decoder=True,
convolution_bias=True,
**kwargs,
):
if positionwise_conv_kernel_size % 2 == 0:
raise ValueError(
f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead."
)
if encoder_kernel_size % 2 == 0:
raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.")
if decoder_kernel_size % 2 == 0:
raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.")
if duration_predictor_kernel_size % 2 == 0:
raise ValueError(
f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead."
)
if energy_predictor_kernel_size % 2 == 0:
raise ValueError(
f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead."
)
if energy_embed_kernel_size % 2 == 0:
raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.")
if pitch_predictor_kernel_size % 2 == 0:
raise ValueError(
f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead."
)
if pitch_embed_kernel_size % 2 == 0:
raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.")
if hidden_size % encoder_num_attention_heads != 0:
raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.")
if hidden_size % decoder_num_attention_heads != 0:
raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.")
if use_masking and use_weighted_masking:
raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_mel_bins = num_mel_bins
self.encoder_config = {
"num_attention_heads": encoder_num_attention_heads,
"layers": encoder_layers,
"kernel_size": encoder_kernel_size,
"attention_dropout_rate": encoder_attention_dropout_rate,
"dropout_rate": encoder_dropout_rate,
"positional_dropout_rate": encoder_positional_dropout_rate,
"linear_units": encoder_linear_units,
"normalize_before": encoder_normalize_before,
"concat_after": encoder_concat_after,
}
self.decoder_config = {
"num_attention_heads": decoder_num_attention_heads,
"layers": decoder_layers,
"kernel_size": decoder_kernel_size,
"attention_dropout_rate": decoder_attention_dropout_rate,
"dropout_rate": decoder_dropout_rate,
"positional_dropout_rate": decoder_positional_dropout_rate,
"linear_units": decoder_linear_units,
"normalize_before": decoder_normalize_before,
"concat_after": decoder_concat_after,
}
self.encoder_num_attention_heads = encoder_num_attention_heads
self.encoder_layers = encoder_layers
self.duration_predictor_channels = duration_predictor_channels
self.duration_predictor_kernel_size = duration_predictor_kernel_size
self.duration_predictor_layers = duration_predictor_layers
self.energy_embed_dropout = energy_embed_dropout
self.energy_embed_kernel_size = energy_embed_kernel_size
self.energy_predictor_channels = energy_predictor_channels
self.energy_predictor_dropout = energy_predictor_dropout
self.energy_predictor_kernel_size = energy_predictor_kernel_size
self.energy_predictor_layers = energy_predictor_layers
self.pitch_embed_dropout = pitch_embed_dropout
self.pitch_embed_kernel_size = pitch_embed_kernel_size
self.pitch_predictor_channels = pitch_predictor_channels
self.pitch_predictor_dropout = pitch_predictor_dropout
self.pitch_predictor_kernel_size = pitch_predictor_kernel_size
self.pitch_predictor_layers = pitch_predictor_layers
self.positionwise_conv_kernel_size = positionwise_conv_kernel_size
self.speech_decoder_postnet_units = speech_decoder_postnet_units
self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
self.reduction_factor = reduction_factor
self.speaking_speed = speaking_speed
self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
self.max_source_positions = max_source_positions
self.use_cnn_in_conformer = use_cnn_in_conformer
self.use_macaron_style_in_conformer = use_macaron_style_in_conformer
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
self.num_speakers = num_speakers
self.num_languages = num_languages
self.speaker_embed_dim = speaker_embed_dim
self.duration_predictor_dropout_rate = duration_predictor_dropout_rate
self.is_encoder_decoder = is_encoder_decoder
self.convolution_bias = convolution_bias
super().__init__(
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
| FastSpeech2ConformerConfig |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-of-almost-unique-subarray.py | {
"start": 93,
"end": 792
} | class ____(object):
def maxSum(self, nums, m, k):
"""
:type nums: List[int]
:type m: int
:type k: int
:rtype: int
"""
lookup = collections.Counter()
result = curr = left = 0
for right in xrange(len(nums)):
curr += nums[right]
lookup[nums[right]] += 1
if right-left+1 == k+1:
lookup[nums[left]] -= 1
if lookup[nums[left]] == 0:
del lookup[nums[left]]
curr -= nums[left]
left += 1
if right-left+1 == k and len(lookup) >= m:
result = max(result, curr)
return result
| Solution |
python | python-poetry__poetry | tests/repositories/test_http_repository.py | {
"start": 707,
"end": 8545
} | class ____(HTTPRepository):
DIST_FIXTURES = Path(__file__).parent / "fixtures" / "pypi.org" / "dists"
def __init__(self, lazy_wheel: bool = True) -> None:
super().__init__("foo", "https://foo.com")
self._lazy_wheel = lazy_wheel
def _get_release_info(
self, name: NormalizedName, version: Version
) -> dict[str, Any]:
raise NotImplementedError
@pytest.mark.parametrize("lazy_wheel", [False, True])
@pytest.mark.parametrize("supports_range_requests", [None, False, True])
def test_get_info_from_wheel(
mocker: MockerFixture, lazy_wheel: bool, supports_range_requests: bool | None
) -> None:
filename = "poetry_core-1.5.0-py3-none-any.whl"
filepath = MockRepository.DIST_FIXTURES / filename
with ZipFile(filepath) as zf:
metadata, _ = parse_email(zf.read("poetry_core-1.5.0.dist-info/METADATA"))
mock_metadata_from_wheel_url = mocker.patch(
"poetry.repositories.http_repository.metadata_from_wheel_url",
return_value=metadata,
)
mock_download = mocker.patch(
"poetry.repositories.http_repository.download_file",
side_effect=lambda _, dest, *args, **kwargs: shutil.copy(filepath, dest),
)
domain = "foo.com"
url = f"https://{domain}/{filename}"
repo = MockRepository(lazy_wheel)
assert not repo._supports_range_requests
if lazy_wheel and supports_range_requests is not None:
repo._supports_range_requests[domain] = supports_range_requests
info = repo._get_info_from_wheel(Link(url))
assert info.name == "poetry-core"
assert info.version == "1.5.0"
assert info.requires_dist == [
'importlib-metadata (>=1.7.0) ; python_version < "3.8"'
]
if lazy_wheel and supports_range_requests is not False:
mock_metadata_from_wheel_url.assert_called_once_with(
filename, url, repo.session
)
mock_download.assert_not_called()
assert repo._supports_range_requests[domain] is True
else:
mock_metadata_from_wheel_url.assert_not_called()
mock_download.assert_called_once_with(
url,
mocker.ANY,
session=repo.session,
raise_accepts_ranges=lazy_wheel,
max_retries=0,
)
if lazy_wheel:
assert repo._supports_range_requests[domain] is False
else:
assert domain not in repo._supports_range_requests
def test_get_info_from_wheel_state_sequence(mocker: MockerFixture) -> None:
"""
1. We know nothing:
Try range requests, which are not supported and fall back to complete download.
2. Range requests were not supported so far:
We do not try range requests again.
3. Range requests were still not supported so far:
We do not try range requests again, but we notice that the response header
contains "Accept-Ranges: bytes", so range requests are at least supported
for some files, which means we want to try again.
4. Range requests are supported for some files:
We try range requests (success).
5. Range requests are supported for some files:
We try range requests (failure), but do not forget that range requests are
supported for some files.
6. Range requests are supported for some files:
We try range requests (success).
"""
mock_metadata_from_wheel_url = mocker.patch(
"poetry.repositories.http_repository.metadata_from_wheel_url"
)
mock_download = mocker.patch("poetry.repositories.http_repository.download_file")
filename = "poetry_core-1.5.0-py3-none-any.whl"
domain = "foo.com"
link = Link(f"https://{domain}/{filename}")
repo = MockRepository()
# 1. range request and download
mock_metadata_from_wheel_url.side_effect = HTTPRangeRequestUnsupportedError
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 1
assert mock_download.call_count == 1
assert mock_download.call_args[1]["raise_accepts_ranges"] is False
# 2. only download
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 1
assert mock_download.call_count == 2
assert mock_download.call_args[1]["raise_accepts_ranges"] is True
# 3. download and range request
mock_metadata_from_wheel_url.side_effect = None
mock_download.side_effect = HTTPRangeRequestSupportedError
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 2
assert mock_download.call_count == 3
assert mock_download.call_args[1]["raise_accepts_ranges"] is True
# 4. only range request
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 3
assert mock_download.call_count == 3
# 5. range request and download
mock_metadata_from_wheel_url.side_effect = HTTPRangeRequestUnsupportedError
mock_download.side_effect = None
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 4
assert mock_download.call_count == 4
assert mock_download.call_args[1]["raise_accepts_ranges"] is False
# 6. only range request
mock_metadata_from_wheel_url.side_effect = None
with contextlib.suppress(PackageInfoError):
repo._get_info_from_wheel(link)
assert mock_metadata_from_wheel_url.call_count == 5
assert mock_download.call_count == 4
@pytest.mark.parametrize(
"mock_hashes",
[
None,
{"sha256": "e216b70f013c47b82a72540d34347632c5bfe59fd54f5fe5d51f6a68b19aaf84"},
{"md5": "be7589b4902793e66d7d979bd8581591"},
],
)
def test_calculate_sha256(
mocker: MockerFixture, mock_hashes: dict[str, Any] | None
) -> None:
filename = "poetry_core-1.5.0-py3-none-any.whl"
filepath = MockRepository.DIST_FIXTURES / filename
mock_download = mocker.patch(
"poetry.repositories.http_repository.download_file",
side_effect=lambda _, dest, *args, **kwargs: shutil.copy(filepath, dest),
)
domain = "foo.com"
link = Link(f"https://{domain}/{filename}", hashes=mock_hashes)
repo = MockRepository()
calculated_hash = repo.calculate_sha256(link)
assert mock_download.call_count == 1
assert (
calculated_hash
== "sha256:e216b70f013c47b82a72540d34347632c5bfe59fd54f5fe5d51f6a68b19aaf84"
)
def test_calculate_sha256_defaults_to_sha256_on_md5_errors(
mocker: MockerFixture,
) -> None:
raised_value_error = False
def mock_hashlib_md5_error() -> None:
nonlocal raised_value_error
raised_value_error = True
raise ValueError(
"[digital envelope routines: EVP_DigestInit_ex] disabled for FIPS"
)
filename = "poetry_core-1.5.0-py3-none-any.whl"
filepath = MockRepository.DIST_FIXTURES / filename
mock_download = mocker.patch(
"poetry.repositories.http_repository.download_file",
side_effect=lambda _, dest, *args, **kwargs: shutil.copy(filepath, dest),
)
mock_hashlib_md5 = mocker.patch("hashlib.md5", side_effect=mock_hashlib_md5_error)
domain = "foo.com"
link = Link(
f"https://{domain}/{filename}",
hashes={"md5": "be7589b4902793e66d7d979bd8581591"},
)
repo = MockRepository()
calculated_hash = repo.calculate_sha256(link)
assert raised_value_error
assert mock_download.call_count == 1
assert mock_hashlib_md5.call_count == 1
assert (
calculated_hash
== "sha256:e216b70f013c47b82a72540d34347632c5bfe59fd54f5fe5d51f6a68b19aaf84"
)
| MockRepository |
python | PyCQA__pylint | pylint/testutils/_primer/primer_command.py | {
"start": 460,
"end": 585
} | class ____(TypedDict):
commit: str
messages: list[OldJsonExport]
PackageMessages = dict[str, PackageData]
| PackageData |
python | kubernetes-client__python | kubernetes/base/dynamic/test_client.py | {
"start": 18916,
"end": 20213
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
config = base.get_e2e_configuration()
cls.client = DynamicClient(api_client.ApiClient(configuration=config))
cls.pod_manifest = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'foo-pod'},
'spec': {'containers': [{'name': "main", 'image': "busybox"}]},
}
def test_dict_type(self):
self.assertEqual(self.client.serialize_body(self.pod_manifest), self.pod_manifest)
def test_resource_instance_type(self):
inst = ResourceInstance(self.client, self.pod_manifest)
self.assertEqual(self.client.serialize_body(inst), self.pod_manifest)
def test_resource_field(self):
"""`ResourceField` is a special type which overwrites `__getattr__` method to return `None`
when a non-existent attribute was accessed. which means it can pass any `hasattr(...)` tests.
"""
params = {
"foo": "bar",
"self": True
}
res = ResourceField(params=params)
self.assertEqual(res["foo"], params["foo"])
self.assertEqual(res["self"], params["self"])
self.assertEqual(self.client.serialize_body(res), params)
| TestDynamicClientSerialization |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/summary_ops/summary_v1_image_op_test.py | {
"start": 1144,
"end": 4272
} | class ____(test.TestCase):
def _AsSummary(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def _CheckProto(self, image_summ, shape):
"""Verify that the non-image parts of the image_summ proto match shape."""
# Only the first 3 images are returned.
for v in image_summ.value:
v.image.ClearField("encoded_image_string")
expected = "\n".join("""
value {
tag: "img/image/%d"
image { height: %d width: %d colorspace: %d }
}""" % ((i,) + shape[1:]) for i in range(3))
self.assertProtoEquals(expected, image_summ)
@test_util.run_deprecated_v1
def testImageSummary(self):
for depth in (1, 3, 4):
for positive in False, True:
with self.session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
# Build a mostly random image with one nan
const = np.random.randn(*shape).astype(np.float32)
const[0, 1, 2] = 0 # Make the nan entry not the max
if positive:
const = 1 + np.maximum(const, 0)
scale = 255 / const.reshape(4, -1).max(axis=1)
offset = 0
else:
scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
offset = 128
adjusted = np.floor(scale[:, None, None, None] * const + offset)
const[0, 1, 2, depth // 2] = np.nan
# Summarize
summ = summary.image("img", const)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency
image = image_ops.decode_png(image_summ.value[0]
.image.encoded_image_string).eval()
self.assertAllEqual(image[1, 2], bad_color)
image[1, 2] = adjusted[0, 1, 2]
self.assertAllClose(image, adjusted[0], rtol=2e-5, atol=2e-5)
# Check the rest of the proto
self._CheckProto(image_summ, shape)
@test_util.run_deprecated_v1
def testImageSummaryUint8(self):
np.random.seed(7)
for depth in (1, 3, 4):
with self.session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
images = np.random.randint(256, size=shape).astype(np.uint8)
tf_images = ops.convert_to_tensor(images)
self.assertEqual(tf_images.dtype, dtypes.uint8)
# Summarize
summ = summary.image("img", tf_images)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency.
# Since we're uint8, everything should be exact.
image = image_ops.decode_png(image_summ.value[0]
.image.encoded_image_string).eval()
self.assertAllEqual(image, images[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
if __name__ == "__main__":
test.main()
| SummaryV1ImageOpTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/combinations_test.py | {
"start": 4375,
"end": 5107
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
# Note that test case fixtures are executed in both the main process and
# worker processes.
super().setUp()
if combinations.in_main_process():
combinations.env().tf_data_service_dispatcher = "localhost"
def testTfDataServiceDispatcher(self):
self.assertEqual(combinations.env().tf_data_service_dispatcher, "localhost")
def testUpdateEnvInWorker(self):
with self.assertRaises(ValueError):
combinations.env().tf_data_service_dispatcher = "localhost"
# unittest.expectedFailure doesn't work with parameterized test methods, so we
# have to decorate the class instead.
@unittest.expectedFailure
| ClusterCombinationTestEnvTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/formatted_text/pygments.py | {
"start": 274,
"end": 780
} | class ____:
"""
Turn a pygments token list into a list of prompt_toolkit text fragments
(``(style_str, text)`` tuples).
"""
def __init__(self, token_list: list[tuple[Token, str]]) -> None:
self.token_list = token_list
def __pt_formatted_text__(self) -> StyleAndTextTuples:
result: StyleAndTextTuples = []
for token, text in self.token_list:
result.append(("class:" + pygments_token_to_classname(token), text))
return result
| PygmentsTokens |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 19308,
"end": 19458
} | class ____(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
| PermissionInstanceView |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF012.py | {
"start": 2532,
"end": 2690
} | class ____(SQLModel):
id: int
i_j: list[L] = list()
# Lint should account for deferred annotations
# See https://github.com/astral-sh/ruff/issues/15857
| N |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_init.py | {
"start": 26773,
"end": 36644
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 4
@skip_if_lt_x_gpu(1)
def test_meta_device_1d_init(self):
default_pg = torch.distributed.distributed_c10d._get_default_group()
mesh = init_device_mesh(device_type.type, mesh_shape=(default_pg.size(),))
# Test both even sharding (8), uneven sharding (3), and empty local tensor (1)
for mlp_dim in (8, 3, 1):
# cover foreach_copy code path for bf16
for mp_policy in (
MixedPrecisionPolicy(),
MixedPrecisionPolicy(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32
),
):
with torch.device("meta"):
model = nn.Sequential(
MLP(mlp_dim, dim_multiplier=1, with_buffer=True, bias=False),
MLP(mlp_dim, dim_multiplier=1, bias=False),
)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
fully_shard(model[0], mesh=mesh, mp_policy=mp_policy)
fully_shard(model[1], mesh=mesh, mp_policy=mp_policy)
fully_shard(model, mesh=mesh, mp_policy=mp_policy)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
self._test_to_empty_and_reset_parameters(model, mesh, mlp_dim)
# Test that we can call `fully_shard` under meta-device context and
# that `init_device_mesh` call still works
mlp_dim = 8
with torch.device("meta"):
model = nn.Sequential(MLP(mlp_dim, with_buffer=True), MLP(mlp_dim))
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
for module in (model[0], model[1], model):
fully_shard(module)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
self._test_to_empty_and_reset_parameters(model, mesh, mlp_dim)
@skip_if_lt_x_gpu(1)
def test_meta_device_2d_init(self):
assert self.world_size >= 4, f"{self.world_size}"
dp_size = 2
global_mesh = init_device_mesh(
device_type.type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
# Test both even sharding (8) and uneven sharding (3)
for mlp_dim in (8, 3):
with torch.device("meta"):
model = MLP(mlp_dim, with_buffer=True)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
parallelize_module(
model,
tp_mesh,
{"in_proj": ColwiseParallel(), "out_proj": RowwiseParallel()},
)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
fully_shard(model.in_proj, mesh=dp_mesh)
fully_shard(model.out_proj, mesh=dp_mesh)
fully_shard(model, mesh=dp_mesh)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
self._test_to_empty_and_reset_parameters(model, global_mesh, mlp_dim)
def _test_to_empty_and_reset_parameters(
self, model: nn.Module, mesh: DeviceMesh, mlp_dim: int
):
# Check that we can materialize it on GPU with empty values
device = torch.device(
device_type.type, torch.get_device_module(device_type).current_device()
)
model.to_empty(device=device)
for param in model.parameters():
self.assertEqual(param.device, device)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
# Check that `reset_parameters()` on each module initializes values
const = 1337
for tensor in itertools.chain(model.parameters(), model.buffers()):
tensor.detach().fill_(const)
for module in model.modules():
if hasattr(module, "reset_parameters"):
module.reset_parameters()
for param in model.parameters():
local_tensor = param.to_local()
if local_tensor.numel() > 0:
self.assertNotEqual(local_tensor, torch.ones_like(local_tensor) * const)
for buffer in model.buffers():
self.assertNotEqual(buffer, torch.ones_like(buffer) * const)
# Check that we can run an iteration without erroring
inp = torch.randn((4, mlp_dim), device=device_type.type)
model(inp).sum().backward()
optim.step()
@skip_if_lt_x_gpu(1)
def test_invalid_meta_device_init(self):
default_pg = torch.distributed.distributed_c10d._get_default_group()
mesh = init_device_mesh(device_type.type, mesh_shape=(default_pg.size(),))
mlp_dim = 8
with torch.device("meta"):
model = nn.Sequential(MLP(mlp_dim, with_buffer=True), MLP(mlp_dim))
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
fully_shard(model[0], mesh=mesh)
fully_shard(model[1], mesh=mesh)
fully_shard(model, mesh=mesh)
inp = torch.randn((4, mlp_dim), device=device_type.type)
error_regex = (
"FSDP parameters should be materialized from meta device before training, "
"but the following were still on meta device: "
r"\['0.in_proj.weight', '0.in_proj.bias', '0.out_proj.weight', '0.out_proj.bias'\]"
)
with self.assertRaisesRegex(RuntimeError, error_regex):
model(inp)
@skip_if_lt_x_gpu(1)
def test_rank0_broadcast_meta_device_init(self):
model_args = ModelArgs(dropout_p=0.0)
# Assume we have a CPU full state dict on rank 0
if self.rank == 0:
torch.manual_seed(42)
ref_model = Transformer(model_args)
full_sd = ref_model.state_dict()
for param in full_sd.values():
self.assertEqual(param.device, torch.device("cpu"))
# Initialize the sharded model on meta device
fsdp_mesh = init_device_mesh(device_type.type, (self.world_size,))
with torch.device("meta"):
model = Transformer(model_args)
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard(module, mesh=fsdp_mesh)
fully_shard(model, mesh=fsdp_mesh)
for param in model.parameters():
self.assertEqual(param.device, torch.device("meta"))
# Construct a sharded state dict from the rank 0 full state dict by
# broadcasting and sharding
meta_sharded_sd = model.state_dict()
sharded_sd = {}
if self.rank == 0:
self.assertEqual(len(meta_sharded_sd), len(full_sd))
self.assertEqual(list(meta_sharded_sd.keys()), list(full_sd.keys()))
for (param_name, full_param), sharded_meta_param in zip(
full_sd.items(), meta_sharded_sd.values()
):
full_param = full_param.detach().to(device_type)
mesh = sharded_meta_param.device_mesh
dist.broadcast(full_param, src=0, group=mesh.get_group(0))
sharded_tensor = distribute_tensor(
full_param, mesh, sharded_meta_param.placements
)
sharded_sd[param_name] = nn.Parameter(sharded_tensor)
else:
for param_name, sharded_meta_param in meta_sharded_sd.items():
full_tensor = torch.empty(
sharded_meta_param.size(),
device=device_type.type,
dtype=sharded_meta_param.dtype,
)
mesh = sharded_meta_param.device_mesh
dist.broadcast(full_tensor, src=0, group=mesh.get_group(0))
sharded_tensor = distribute_tensor(
full_tensor, mesh, sharded_meta_param.placements
)
sharded_sd[param_name] = nn.Parameter(sharded_tensor)
model.load_state_dict(sharded_sd, assign=True)
for param in model.parameters():
self.assertIsInstance(param, DTensor)
self.assertEqual(param.device.type, device_type.type)
# Construct the reference model on nonzero ranks by broadcasting the
# unsharded model from rank 0 and sharding on all ranks
if self.rank != 0:
ref_model = Transformer(model_args)
for param in ref_model.parameters():
torch.distributed.broadcast(param.detach(), src=0)
for module in ref_model.modules():
if isinstance(module, TransformerBlock):
fully_shard(module, mesh=fsdp_mesh)
fully_shard(ref_model, mesh=fsdp_mesh)
for (param_name, param), (ref_param_name, ref_param) in zip(
model.named_parameters(), ref_model.named_parameters()
):
self.assertEqual(param_name, ref_param_name)
self.assertEqual(param, ref_param)
# Check one forward/backward for parity
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
loss = model(inp).sum()
loss.backward()
ref_loss = ref_model(inp).sum()
ref_loss.backward()
self.assertEqual(loss, ref_loss)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
self.assertEqual(param.grad, ref_param.grad)
| TestFullyShardMetaDeviceInit |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 3349,
"end": 3957
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
author_foo = indexes.FacetCharField(facet_for="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
pub_date_exact = indexes.FacetDateTimeField(facet_for="pub_date")
def get_model(self):
return MockModel
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def prepare_pub_date_exact(self, obj):
return "2010-10-26T01:54:32"
| GoodFacetedMockSearchIndex |
python | kubernetes-client__python | kubernetes/client/models/v1_stateful_set_spec.py | {
"start": 383,
"end": 17014
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'ordinals': 'V1StatefulSetOrdinals',
'persistent_volume_claim_retention_policy': 'V1StatefulSetPersistentVolumeClaimRetentionPolicy',
'pod_management_policy': 'str',
'replicas': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'service_name': 'str',
'template': 'V1PodTemplateSpec',
'update_strategy': 'V1StatefulSetUpdateStrategy',
'volume_claim_templates': 'list[V1PersistentVolumeClaim]'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'ordinals': 'ordinals',
'persistent_volume_claim_retention_policy': 'persistentVolumeClaimRetentionPolicy',
'pod_management_policy': 'podManagementPolicy',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'service_name': 'serviceName',
'template': 'template',
'update_strategy': 'updateStrategy',
'volume_claim_templates': 'volumeClaimTemplates'
}
def __init__(self, min_ready_seconds=None, ordinals=None, persistent_volume_claim_retention_policy=None, pod_management_policy=None, replicas=None, revision_history_limit=None, selector=None, service_name=None, template=None, update_strategy=None, volume_claim_templates=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._ordinals = None
self._persistent_volume_claim_retention_policy = None
self._pod_management_policy = None
self._replicas = None
self._revision_history_limit = None
self._selector = None
self._service_name = None
self._template = None
self._update_strategy = None
self._volume_claim_templates = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if ordinals is not None:
self.ordinals = ordinals
if persistent_volume_claim_retention_policy is not None:
self.persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
if pod_management_policy is not None:
self.pod_management_policy = pod_management_policy
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.selector = selector
if service_name is not None:
self.service_name = service_name
self.template = template
if update_strategy is not None:
self.update_strategy = update_strategy
if volume_claim_templates is not None:
self.volume_claim_templates = volume_claim_templates
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:return: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1StatefulSetSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def ordinals(self):
"""Gets the ordinals of this V1StatefulSetSpec. # noqa: E501
:return: The ordinals of this V1StatefulSetSpec. # noqa: E501
:rtype: V1StatefulSetOrdinals
"""
return self._ordinals
@ordinals.setter
def ordinals(self, ordinals):
"""Sets the ordinals of this V1StatefulSetSpec.
:param ordinals: The ordinals of this V1StatefulSetSpec. # noqa: E501
:type: V1StatefulSetOrdinals
"""
self._ordinals = ordinals
@property
def persistent_volume_claim_retention_policy(self):
"""Gets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:return: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:rtype: V1StatefulSetPersistentVolumeClaimRetentionPolicy
"""
return self._persistent_volume_claim_retention_policy
@persistent_volume_claim_retention_policy.setter
def persistent_volume_claim_retention_policy(self, persistent_volume_claim_retention_policy):
"""Sets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec.
:param persistent_volume_claim_retention_policy: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:type: V1StatefulSetPersistentVolumeClaimRetentionPolicy
"""
self._persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
@property
def pod_management_policy(self):
"""Gets the pod_management_policy of this V1StatefulSetSpec. # noqa: E501
podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
:return: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
:rtype: str
"""
return self._pod_management_policy
@pod_management_policy.setter
def pod_management_policy(self, pod_management_policy):
"""Sets the pod_management_policy of this V1StatefulSetSpec.
podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
:param pod_management_policy: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
:type: str
"""
self._pod_management_policy = pod_management_policy
@property
def replicas(self):
"""Gets the replicas of this V1StatefulSetSpec. # noqa: E501
replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
:return: The replicas of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1StatefulSetSpec.
replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
:param replicas: The replicas of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""Gets the revision_history_limit of this V1StatefulSetSpec. # noqa: E501
revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
:return: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""Sets the revision_history_limit of this V1StatefulSetSpec.
revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
:param revision_history_limit: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""Gets the selector of this V1StatefulSetSpec. # noqa: E501
:return: The selector of this V1StatefulSetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1StatefulSetSpec.
:param selector: The selector of this V1StatefulSetSpec. # noqa: E501
:type: V1LabelSelector
"""
if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def service_name(self):
"""Gets the service_name of this V1StatefulSetSpec. # noqa: E501
serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
:return: The service_name of this V1StatefulSetSpec. # noqa: E501
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this V1StatefulSetSpec.
serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
:param service_name: The service_name of this V1StatefulSetSpec. # noqa: E501
:type: str
"""
self._service_name = service_name
@property
def template(self):
"""Gets the template of this V1StatefulSetSpec. # noqa: E501
:return: The template of this V1StatefulSetSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1StatefulSetSpec.
:param template: The template of this V1StatefulSetSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def update_strategy(self):
"""Gets the update_strategy of this V1StatefulSetSpec. # noqa: E501
:return: The update_strategy of this V1StatefulSetSpec. # noqa: E501
:rtype: V1StatefulSetUpdateStrategy
"""
return self._update_strategy
@update_strategy.setter
def update_strategy(self, update_strategy):
"""Sets the update_strategy of this V1StatefulSetSpec.
:param update_strategy: The update_strategy of this V1StatefulSetSpec. # noqa: E501
:type: V1StatefulSetUpdateStrategy
"""
self._update_strategy = update_strategy
@property
def volume_claim_templates(self):
"""Gets the volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
:return: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
:rtype: list[V1PersistentVolumeClaim]
"""
return self._volume_claim_templates
@volume_claim_templates.setter
def volume_claim_templates(self, volume_claim_templates):
"""Sets the volume_claim_templates of this V1StatefulSetSpec.
volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
:param volume_claim_templates: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
:type: list[V1PersistentVolumeClaim]
"""
self._volume_claim_templates = volume_claim_templates
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetSpec):
return True
return self.to_dict() != other.to_dict()
| V1StatefulSetSpec |
python | ethereum__web3.py | tests/integration/go_ethereum/common.py | {
"start": 3282,
"end": 4099
} | class ____(AsyncEthModuleTest):
@pytest.mark.xfail(reason="eth_signTypedData has not been released in geth")
@pytest.mark.asyncio
async def test_eth_sign_typed_data(
self, async_w3, keyfile_account_address_dual_type, async_skip_if_testrpc
):
await super().test_eth_sign_typed_data(
async_w3, keyfile_account_address_dual_type, async_skip_if_testrpc
)
@pytest.mark.xfail(reason="eth_signTypedData has not been released in geth")
@pytest.mark.asyncio
async def test_invalid_eth_sign_typed_data(
self, async_w3, keyfile_account_address_dual_type, async_skip_if_testrpc
):
await super().test_invalid_eth_sign_typed_data(
async_w3, keyfile_account_address_dual_type, async_skip_if_testrpc
)
| GoEthereumAsyncEthModuleTest |
python | getsentry__sentry | src/sentry/integrations/bitbucket/integration.py | {
"start": 3639,
"end": 6684
} | class ____(RepositoryIntegration, BitbucketIssuesSpec):
codeowners_locations = [".bitbucket/CODEOWNERS"]
@property
def integration_name(self) -> str:
return IntegrationProviderSlug.BITBUCKET.value
def get_client(self):
return BitbucketApiClient(integration=self.model)
# IntegrationInstallation methods
def error_message_from_json(self, data):
return data.get("error", {}).get("message", "unknown error")
# RepositoryIntegration methods
def get_repositories(
self, query: str | None = None, page_number_limit: int | None = None
) -> list[dict[str, Any]]:
username = self.model.metadata.get("uuid", self.username)
if not query:
resp = self.get_client().get_repos(username)
return [
{"identifier": repo["full_name"], "name": repo["full_name"]}
for repo in resp.get("values", [])
]
exact_query = f'name="{query}"'
fuzzy_query = f'name~"{query}"'
exact_search_resp = self.get_client().search_repositories(username, exact_query)
fuzzy_search_resp = self.get_client().search_repositories(username, fuzzy_query)
result: OrderedSet[str] = OrderedSet()
for j in exact_search_resp.get("values", []):
result.add(j["full_name"])
for i in fuzzy_search_resp.get("values", []):
result.add(i["full_name"])
return [{"identifier": full_name, "name": full_name} for full_name in result]
def has_repo_access(self, repo: RpcRepository) -> bool:
client = self.get_client()
try:
client.get_hooks(repo.config["name"])
except ApiError:
return False
return True
def get_unmigratable_repositories(self) -> list[RpcRepository]:
repos = repository_service.get_repositories(
organization_id=self.organization_id,
providers=[IntegrationProviderSlug.BITBUCKET.value],
)
accessible_repos = [r["identifier"] for r in self.get_repositories()]
return [repo for repo in repos if repo.name not in accessible_repos]
def source_url_matches(self, url: str) -> bool:
return url.startswith(f'https://{self.model.metadata["domain_name"]}') or url.startswith(
"https://bitbucket.org",
)
def format_source_url(self, repo: Repository, filepath: str, branch: str | None) -> str:
return f"https://bitbucket.org/{repo.name}/src/{branch}/{filepath}"
def extract_branch_from_source_url(self, repo: Repository, url: str) -> str:
url = url.replace(f"{repo.url}/src/", "")
branch, _, _ = url.partition("/")
return branch
def extract_source_path_from_source_url(self, repo: Repository, url: str) -> str:
url = url.replace(f"{repo.url}/src/", "")
_, _, source_path = url.partition("/")
return source_path
# Bitbucket only methods
@property
def username(self):
return self.model.name
| BitbucketIntegration |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_api.py | {
"start": 68370,
"end": 70922
} | class ____(TestCase):
"""Import API endpoint tests."""
fixtures = ["eric.json", "test_data.json"]
def test_permissions(self):
"""Ensure user repositories aren't leaked to other users."""
client = APIClient()
account_a = get(SocialAccount, provider="github")
account_b = get(SocialAccount, provider="github")
account_c = get(SocialAccount, provider="github")
user_a = get(User, password="test")
user_b = get(User, password="test")
user_c = get(User, password="test")
org_a = get(RemoteOrganization)
get(
RemoteOrganizationRelation,
remote_organization=org_a,
user=user_a,
account=account_a,
)
repo_a = get(
RemoteRepository,
organization=org_a,
)
get(
RemoteRepositoryRelation,
remote_repository=repo_a,
user=user_a,
account=account_a,
)
repo_b = get(
RemoteRepository,
organization=None,
)
get(
RemoteRepositoryRelation,
remote_repository=repo_b,
user=user_b,
account=account_b,
)
client.force_authenticate(user=user_a)
resp = client.get("/api/v2/remote/repo/", format="json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data["results"]
self.assertEqual(repos[0]["id"], repo_a.id)
self.assertEqual(repos[0]["organization"]["id"], org_a.id)
self.assertEqual(len(repos), 1)
resp = client.get("/api/v2/remote/org/", format="json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
orgs = resp.data["results"]
self.assertEqual(orgs[0]["id"], org_a.id)
self.assertEqual(len(orgs), 1)
client.force_authenticate(user=user_b)
resp = client.get("/api/v2/remote/repo/", format="json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data["results"]
self.assertEqual(repos[0]["id"], repo_b.id)
self.assertEqual(repos[0]["organization"], None)
self.assertEqual(len(repos), 1)
client.force_authenticate(user=user_c)
resp = client.get("/api/v2/remote/repo/", format="json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data["results"]
self.assertEqual(len(repos), 0)
@mock.patch("readthedocs.core.views.hooks.trigger_build")
| APIImportTests |
python | getsentry__sentry | tests/sentry/api/bases/test_organizationmember.py | {
"start": 3441,
"end": 4499
} | class ____(PermissionBaseTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_cls = MemberAndStaffPermission
def test_superuser(self) -> None:
superuser = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.org, user=superuser, is_superuser=True)
assert self.has_object_perm("PUT", self.org, user=superuser, is_superuser=True)
assert self.has_object_perm("POST", self.org, user=superuser, is_superuser=True)
assert self.has_object_perm("DELETE", self.org, user=superuser, is_superuser=True)
def test_staff(self) -> None:
staff_user = self.create_user(is_staff=True)
assert self.has_object_perm("GET", self.org, user=staff_user, is_staff=True)
assert self.has_object_perm("PUT", self.org, user=staff_user, is_staff=True)
assert self.has_object_perm("POST", self.org, user=staff_user, is_staff=True)
assert self.has_object_perm("DELETE", self.org, user=staff_user, is_staff=True)
| OrganizationAndStaffPermissionTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/security.py | {
"start": 6799,
"end": 7039
} | class ____(PermittedDagFilter):
"""A parameter that filters the permitted dag runs for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(DagRun.dag_id.in_(self.value or set()))
| PermittedDagRunFilter |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 93791,
"end": 94655
} | class ____(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
| MeanSquaredError |
python | scikit-image__scikit-image | src/skimage/_shared/utils.py | {
"start": 3743,
"end": 6169
} | class ____:
"""Decorator for changing the default value of an argument.
Parameters
----------
arg_name : str
The name of the argument to be updated.
new_value : any
The argument new value.
changed_version : str
The package version in which the change will be introduced.
warning_msg : str
Optional warning message. If None, a generic warning message
is used.
stacklevel : {None, int}, optional
If None, the decorator attempts to detect the appropriate stacklevel for the
deprecation warning automatically. This can fail, e.g., due to
decorating a closure, in which case you can set the stacklevel manually
here. The outermost decorator should have stacklevel 2, the next inner
one stacklevel 3, etc.
"""
def __init__(
self, arg_name, *, new_value, changed_version, warning_msg=None, stacklevel=None
):
self.arg_name = arg_name
self.new_value = new_value
self.warning_msg = warning_msg
self.changed_version = changed_version
self.stacklevel = stacklevel
def __call__(self, func):
parameters = inspect.signature(func).parameters
arg_idx = list(parameters.keys()).index(self.arg_name)
old_value = parameters[self.arg_name].default
if self.warning_msg is None:
self.warning_msg = (
f'The new recommended value for {self.arg_name} is '
f'{self.new_value}. Until version {self.changed_version}, '
f'the default {self.arg_name} value is {old_value}. '
f'From version {self.changed_version}, the {self.arg_name} '
f'default value will be {self.new_value}. To avoid '
f'this warning, please explicitly set {self.arg_name} value.'
)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
if len(args) < arg_idx + 1 and self.arg_name not in kwargs.keys():
stacklevel = (
self.stacklevel
if self.stacklevel is not None
else _warning_stacklevel(func)
)
# warn that arg_name default value changed:
warnings.warn(self.warning_msg, FutureWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return fixed_func
| change_default_value |
python | getsentry__sentry | tests/sentry/spans/test_buffer.py | {
"start": 2973,
"end": 23941
} | class ____:
pass
def process_spans(spans: Sequence[Span | _SplitBatch], buffer: SpansBuffer, now):
"""
Call buffer.process_spans on the list of spans.
We get a sequence of spans like this:
A
B
C
SPLIT
D
A, B, C will land in a batch, D will land in its own batch.
"""
span_chunks: list[list[Span]] = [[]]
for span in spans:
if isinstance(span, _SplitBatch):
if span_chunks[-1]:
span_chunks.append([])
else:
span_chunks[-1].append(span)
for chunk in span_chunks:
buffer.process_spans(chunk, now)
@pytest.mark.parametrize(
"spans",
list(
itertools.permutations(
[
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id=None,
segment_id=None,
is_segment_span=True,
project_id=1,
end_timestamp=1700000000.0,
),
]
)
),
)
def test_basic(buffer: SpansBuffer, spans) -> None:
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
assert buffer.flush_segments(now=5) == {}
rv = buffer.flush_segments(now=11)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "b" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"a" * 16, b"b" * 16, False),
_output_segment(b"b" * 16, b"b" * 16, True),
_output_segment(b"c" * 16, b"b" * 16, False),
_output_segment(b"d" * 16, b"b" * 16, False),
],
)
}
buffer.done_flush_segments(rv)
assert buffer.flush_segments(now=30) == {}
assert list(buffer.get_memory_info())
assert_clean(buffer.client)
@pytest.mark.parametrize(
"spans",
list(
itertools.permutations(
[
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
_SplitBatch(),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
is_segment_span=True,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
]
)
),
)
def test_deep(buffer: SpansBuffer, spans) -> None:
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
rv = buffer.flush_segments(now=10)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "a" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"a" * 16, b"a" * 16, True),
_output_segment(b"b" * 16, b"a" * 16, False),
_output_segment(b"c" * 16, b"a" * 16, False),
_output_segment(b"d" * 16, b"a" * 16, False),
],
)
}
buffer.done_flush_segments(rv)
rv = buffer.flush_segments(now=60)
assert rv == {}
assert_clean(buffer.client)
@pytest.mark.parametrize(
"spans",
list(
itertools.permutations(
[
Span(
payload=_payload("e" * 16),
trace_id="a" * 32,
span_id="e" * 16,
parent_span_id="d" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id="c" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
is_segment_span=True,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
]
)
),
)
def test_deep2(buffer: SpansBuffer, spans) -> None:
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
rv = buffer.flush_segments(now=10)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "a" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"a" * 16, b"a" * 16, True),
_output_segment(b"b" * 16, b"a" * 16, False),
_output_segment(b"c" * 16, b"a" * 16, False),
_output_segment(b"d" * 16, b"a" * 16, False),
_output_segment(b"e" * 16, b"a" * 16, False),
],
)
}
buffer.done_flush_segments(rv)
rv = buffer.flush_segments(now=60)
assert rv == {}
assert_clean(buffer.client)
@pytest.mark.parametrize(
"spans",
list(
itertools.permutations(
[
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("e" * 16),
trace_id="a" * 32,
span_id="e" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id=None,
is_segment_span=True,
segment_id=None,
project_id=2,
end_timestamp=1700000000.0,
),
]
)
),
)
def test_parent_in_other_project(buffer: SpansBuffer, spans) -> None:
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
assert buffer.flush_segments(now=5) == {}
rv = buffer.flush_segments(now=11)
assert rv == {
_segment_id(2, "a" * 32, "b" * 16): FlushedSegment(
queue_key=mock.ANY, spans=[_output_segment(b"b" * 16, b"b" * 16, True)]
)
}
buffer.done_flush_segments(rv)
# TODO: flush faster, since we already saw parent in other project
assert buffer.flush_segments(now=30) == {}
rv = buffer.flush_segments(now=60)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "b" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"c" * 16, b"b" * 16, False),
_output_segment(b"d" * 16, b"b" * 16, False),
_output_segment(b"e" * 16, b"b" * 16, False),
],
)
}
buffer.done_flush_segments(rv)
assert buffer.flush_segments(now=90) == {}
assert_clean(buffer.client)
@pytest.mark.parametrize(
"spans",
shallow_permutations(
[
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="d" * 16,
project_id=1,
segment_id=None,
is_segment_span=True,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("e" * 16),
trace_id="a" * 32,
span_id="e" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id=None,
is_segment_span=True,
segment_id=None,
project_id=2,
end_timestamp=1700000000.0,
),
]
),
)
def test_parent_in_other_project_and_nested_is_segment_span(buffer: SpansBuffer, spans) -> None:
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
assert buffer.flush_segments(now=5) == {}
rv = buffer.flush_segments(now=11)
assert rv == {
_segment_id(2, "a" * 32, "b" * 16): FlushedSegment(
queue_key=mock.ANY, spans=[_output_segment(b"b" * 16, b"b" * 16, True)]
),
_segment_id(1, "a" * 32, "c" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"c" * 16, b"c" * 16, True),
],
),
}
buffer.done_flush_segments(rv)
# TODO: flush faster, since we already saw parent in other project
assert buffer.flush_segments(now=30) == {}
rv = buffer.flush_segments(now=60)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "b" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"d" * 16, b"b" * 16, False),
_output_segment(b"e" * 16, b"b" * 16, False),
],
),
}
buffer.done_flush_segments(rv)
assert buffer.flush_segments(now=90) == {}
assert_clean(buffer.client)
def test_flush_rebalance(buffer: SpansBuffer) -> None:
spans = [
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
segment_id=None,
project_id=1,
is_segment_span=True,
end_timestamp=1700000000.0,
)
]
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
assert buffer.flush_segments(now=5) == {}
rv = buffer.flush_segments(now=11)
assert rv == {
_segment_id(1, "a" * 32, "a" * 16): FlushedSegment(
queue_key=mock.ANY, spans=[_output_segment(b"a" * 16, b"a" * 16, True)]
),
}
# Clear out assigned shards, simulating a rebalance operation.
buffer.assigned_shards.clear()
buffer.done_flush_segments(rv)
rv = buffer.flush_segments(now=20)
assert not rv
assert_clean(buffer.client)
@pytest.mark.parametrize("compression_level", [-1, 0])
def test_compression_functionality(compression_level) -> None:
"""Test that compression is working correctly at various compression levels."""
with override_options({**DEFAULT_OPTIONS, "spans.buffer.compression.level": compression_level}):
buffer = SpansBuffer(assigned_shards=list(range(32)))
def make_payload(span_id: str):
return orjson.dumps(
{
"span_id": span_id,
"trace_id": "a" * 32,
"data": {"message": "x" * 1000},
"extra_data": {"field": "y" * 500},
}
)
spans = [
Span(
payload=make_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id=None,
project_id=1,
segment_id=None,
is_segment_span=True,
end_timestamp=1700000000.0,
),
Span(
payload=make_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
Span(
payload=make_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000000.0,
),
]
buffer.process_spans(spans, now=0)
segment_key = _segment_id(1, "a" * 32, "b" * 16)
stored_data = buffer.client.zrange(segment_key, 0, -1, withscores=False)
assert len(stored_data) > 0
segments = buffer.flush_segments(now=11)
assert len(segments) == 1
segment = list(segments.values())[0]
assert len(segment.spans) == 3
span_ids = set()
for span in segment.spans:
assert "data" in span.payload
assert "extra_data" in span.payload
assert span.payload["data"]["message"] == "x" * 1000
assert span.payload["extra_data"]["field"] == "y" * 500
span_ids.add(span.payload["span_id"])
expected_span_ids = {"a" * 16, "b" * 16, "c" * 16}
assert span_ids == expected_span_ids
buffer.done_flush_segments(segments)
assert_clean(buffer.client)
def test_max_segment_spans_limit(buffer: SpansBuffer) -> None:
batch1 = [
Span(
payload=_payload("c" * 16),
trace_id="a" * 32,
span_id="c" * 16,
parent_span_id="b" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000001.0,
),
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000002.0,
),
]
batch2 = [
Span(
payload=_payload("d" * 16),
trace_id="a" * 32,
span_id="d" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000003.0,
),
Span(
payload=_payload("e" * 16),
trace_id="a" * 32,
span_id="e" * 16,
parent_span_id="a" * 16,
segment_id=None,
project_id=1,
end_timestamp=1700000004.0,
),
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
project_id=1,
segment_id=None,
is_segment_span=True,
end_timestamp=1700000005.0,
),
]
with override_options({"spans.buffer.max-segment-bytes": 200}):
buffer.process_spans(batch1, now=0)
buffer.process_spans(batch2, now=0)
rv = buffer.flush_segments(now=11)
segment = rv[_segment_id(1, "a" * 32, "a" * 16)]
retained_span_ids = {span.payload["span_id"] for span in segment.spans}
# NB: The buffer can only remove entire batches, using the minimum timestamp within the batch.
# The first batch with "b" and "c" should be removed.
assert retained_span_ids == {"a" * 16, "d" * 16, "e" * 16}
# NB: We currently accept that we leak redirect keys when we limit segments.
# buffer.done_flush_segments(rv)
# assert_clean(buffer.client)
def test_kafka_slice_id(buffer: SpansBuffer) -> None:
with override_options(DEFAULT_OPTIONS):
buffer = SpansBuffer(assigned_shards=list(range(1)), slice_id=2)
queue_key = buffer._get_queue_key(0)
assert queue_key == b"span-buf:q:2-0"
spans = [
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
project_id=1,
segment_id=None,
is_segment_span=True,
end_timestamp=1700000000.0,
)
]
process_spans(spans, buffer, now=0)
assert buffer.client.keys("span-buf:q:*") == [queue_key]
segments = buffer.flush_segments(now=11)
buffer.done_flush_segments(segments)
assert_clean(buffer.client)
def test_preassigned_disconnected_segment(buffer: SpansBuffer) -> None:
# Test that a segment with two spans that are not directly connected, but
# where the `segment_id` is available ahead of time, is correctly joined and
# returned.
spans = [
Span(
payload=_payload("b" * 16),
trace_id="a" * 32,
span_id="b" * 16,
parent_span_id="c" * 16, # does not exist in this segment
project_id=1,
segment_id="a" * 16, # refers to the correct span below
end_timestamp=1700000000.0,
),
Span(
payload=_payload("a" * 16),
trace_id="a" * 32,
span_id="a" * 16,
parent_span_id=None,
project_id=1,
segment_id="a" * 16,
is_segment_span=True,
end_timestamp=1700000001.0,
),
]
process_spans(spans, buffer, now=0)
assert_ttls(buffer.client)
assert buffer.flush_segments(now=5) == {}
rv = buffer.flush_segments(now=11)
_normalize_output(rv)
assert rv == {
_segment_id(1, "a" * 32, "a" * 16): FlushedSegment(
queue_key=mock.ANY,
spans=[
_output_segment(b"a" * 16, b"a" * 16, True),
_output_segment(b"b" * 16, b"a" * 16, False),
],
)
}
buffer.done_flush_segments(rv)
assert buffer.flush_segments(now=30) == {}
assert list(buffer.get_memory_info())
assert_clean(buffer.client)
| _SplitBatch |
python | gevent__gevent | src/gevent/util.py | {
"start": 18717,
"end": 22571
} | class ____(object):
"""
A context manager for ensuring a block of code switches greenlets.
This performs a similar function as the :doc:`monitoring thread
</monitoring>`, but the scope is limited to the body of the with
statement. If the code within the body doesn't yield to the hub
(and doesn't raise an exception), then upon exiting the
context manager an :exc:`AssertionError` will be raised.
This is useful in unit tests and for debugging purposes.
:keyword float max_blocking_time: If given, the body is allowed
to block for up to this many fractional seconds before
an error is raised.
:keyword bool hub_only: If True, then *max_blocking_time* only
refers to the amount of time spent between switches into the
hub. If False, then it refers to the maximum time between
*any* switches. If *max_blocking_time* is not given, has no
effect.
Example::
# This will always raise an exception: nothing switched
with assert_switches():
pass
# This will never raise an exception; nothing switched,
# but it happened very fast
with assert_switches(max_blocking_time=1.0):
pass
.. versionadded:: 1.3
.. versionchanged:: 1.4
If an exception is raised, it now includes information about
the duration of blocking and the parameters of this object.
"""
hub = None
tracer = None
_entered = None
def __init__(self, max_blocking_time=None, hub_only=False):
self.max_blocking_time = max_blocking_time
self.hub_only = hub_only
def __enter__(self):
from gevent import get_hub
from gevent import _tracer
self.hub = hub = get_hub()
# TODO: We could optimize this to use the GreenletTracer
# installed by the monitoring thread, if there is one.
# As it is, we will chain trace calls back to it.
if not self.max_blocking_time:
self.tracer = _tracer.GreenletTracer()
elif self.hub_only:
self.tracer = _tracer.HubSwitchTracer(hub, self.max_blocking_time)
else:
self.tracer = _tracer.MaxSwitchTracer(hub, self.max_blocking_time)
self._entered = perf_counter()
self.tracer.monitor_current_greenlet_blocking()
return self
def __exit__(self, t, v, tb):
self.tracer.kill()
hub = self.hub; self.hub = None
tracer = self.tracer; self.tracer = None
# Only check if there was no exception raised, we
# don't want to hide anything
if t is not None:
return
did_block = tracer.did_block_hub(hub)
if did_block:
execution_time_s = perf_counter() - self._entered
active_greenlet = did_block[1]
report_lines = tracer.did_block_hub_report(hub, active_greenlet, {})
message = 'To the hub' if self.hub_only else 'To any greenlet'
message += ' in %.4f seconds' % (execution_time_s,)
max_block = self.max_blocking_time
message += ' (max allowed %.4f seconds)' % (max_block,) if max_block else ''
message += '\n'
message += '\n'.join(report_lines)
raise _FailedToSwitch(message)
def clear_stack_frames(frame):
"""Do our best to clear local variables in all frames in a stack."""
# On Python 3, frames have a .clear() method that can raise a RuntimeError.
while frame is not None:
try:
frame.clear()
except (RuntimeError, AttributeError):
pass
try:
frame.f_locals.clear()
except AttributeError:
# Python 3.13 removed clear();
# f_locals is now a FrameLocalsProxy.
pass
frame = frame.f_back
| assert_switches |
python | numba__numba | numba/tests/test_tuples.py | {
"start": 3069,
"end": 4256
} | class ____(TestCase):
def test_array_tuple(self):
aryty = types.Array(types.float64, 1, 'C')
cfunc = njit((aryty, aryty))(tuple_return_usecase)
a = b = np.arange(5, dtype='float64')
ra, rb = cfunc(a, b)
self.assertPreciseEqual(ra, a)
self.assertPreciseEqual(rb, b)
del a, b
self.assertPreciseEqual(ra, rb)
def test_scalar_tuple(self):
scalarty = types.float32
cfunc = njit((scalarty, scalarty))(tuple_return_usecase)
a = b = 1
ra, rb = cfunc(a, b)
self.assertEqual(ra, a)
self.assertEqual(rb, b)
def test_hetero_tuple(self):
alltypes = []
allvalues = []
alltypes.append((types.int32, types.int64))
allvalues.append((1, 2))
alltypes.append((types.float32, types.float64))
allvalues.append((1.125, .25))
alltypes.append((types.int32, types.float64))
allvalues.append((1231, .5))
for (ta, tb), (a, b) in zip(alltypes, allvalues):
cfunc = njit((ta, tb))(tuple_return_usecase)
ra, rb = cfunc(a, b)
self.assertPreciseEqual((ra, rb), (a, b))
| TestTupleReturn |
python | scipy__scipy | scipy/optimize/_trustregion.py | {
"start": 875,
"end": 11866
} | class ____:
"""
Base/abstract class defining the quadratic model for trust-region
minimization. Child classes must implement the ``solve`` method.
Values of the objective function, Jacobian and Hessian (if provided) at
the current iterate ``x`` are evaluated on demand and then stored as
attributes ``fun``, ``jac``, ``hess``.
"""
def __init__(self, x, fun, jac, hess=None, hessp=None):
self._x = x
self._f = None
self._g = None
self._h = None
self._g_mag = None
self._cauchy_point = None
self._newton_point = None
self._fun = fun
self._jac = jac
self._hess = hess
self._hessp = hessp
def __call__(self, p):
return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p))
@property
def fun(self):
"""Value of objective function at current iteration."""
if self._f is None:
self._f = self._fun(self._x)
return self._f
@property
def jac(self):
"""Value of Jacobian of objective function at current iteration."""
if self._g is None:
self._g = self._jac(self._x)
return self._g
@property
def hess(self):
"""Value of Hessian of objective function at current iteration."""
if self._h is None:
self._h = self._hess(self._x)
return self._h
def hessp(self, p):
if self._hessp is not None:
return self._hessp(self._x, p)
else:
return np.dot(self.hess, p)
@property
def jac_mag(self):
"""Magnitude of jacobian of objective function at current iteration."""
if self._g_mag is None:
self._g_mag = scipy.linalg.norm(self.jac)
return self._g_mag
def get_boundaries_intersections(self, z, d, trust_radius):
"""
Solve the scalar quadratic equation ``||z + t d|| == trust_radius``.
This is like a line-sphere intersection.
Return the two values of t, sorted from low to high.
"""
a = np.dot(d, d)
b = 2 * np.dot(z, d)
c = np.dot(z, z) - trust_radius**2
sqrt_discriminant = math.sqrt(b*b - 4*a*c)
# The following calculation is mathematically
# equivalent to:
# ta = (-b - sqrt_discriminant) / (2*a)
# tb = (-b + sqrt_discriminant) / (2*a)
# but produce smaller round off errors.
# Look at Matrix Computation p.97
# for a better justification.
aux = b + math.copysign(sqrt_discriminant, b)
ta = -aux / (2*a)
tb = -2*c / aux
return sorted([ta, tb])
def solve(self, trust_radius):
raise NotImplementedError('The solve method should be implemented by '
'the child class')
def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,
subproblem=None, initial_trust_radius=1.0,
max_trust_radius=1000.0, eta=0.15, gtol=1e-4,
maxiter=None, disp=False, return_all=False,
callback=None, inexact=True, workers=None,
subproblem_maxiter=None, **unknown_options):
"""
Minimization of scalar function of one or more variables using a
trust-region algorithm.
Options for the trust-region algorithm are:
initial_trust_radius : float
Initial trust radius.
max_trust_radius : float
Never propose steps that are longer than this value.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol`
before successful termination.
maxiter : int
Maximum number of iterations to perform.
disp : bool
If True, print convergence message.
inexact : bool
Accuracy to solve subproblems. If True requires less nonlinear
iterations, but more vector products. Only effective for method
trust-krylov.
workers : int, map-like callable, optional
A map-like callable, such as `multiprocessing.Pool.map` for evaluating
any numerical differentiation in parallel.
This evaluation is carried out as ``workers(fun, iterable)``.
Only for 'trust-krylov', 'trust-ncg'.
.. versionadded:: 1.16.0
subproblem_maxiter : int, optional
Maximum number of iterations to perform per subproblem. Only affects
trust-exact. Default is 25.
.. versionadded:: 1.17.0
This function is called by the `minimize` function.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is currently required for trust-region '
'methods')
if hess is None and hessp is None:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is currently required for trust-region methods')
if subproblem is None:
raise ValueError('A subproblem solving strategy is required for '
'trust-region methods')
if not (0 <= eta < 0.25):
raise Exception('invalid acceptance stringency')
if max_trust_radius <= 0:
raise Exception('the max trust radius must be positive')
if initial_trust_radius <= 0:
raise ValueError('the initial trust radius must be positive')
if initial_trust_radius >= max_trust_radius:
raise ValueError('the initial trust radius must be less than the '
'max trust radius')
# force the initial guess into a nice format
x0 = np.asarray(x0).flatten()
# A ScalarFunction representing the problem. This caches calls to fun, jac,
# hess.
# the workers kwd only has an effect for trust-ncg, trust-krylov when
# estimating the Hessian with finite-differences. It's never used
# during calculation of jacobian, because callables are required for all
# methods.
sf = _prepare_scalar_function(
fun, x0, jac=jac, hess=hess, args=args, workers=workers
)
fun = sf.fun
jac = sf.grad
if callable(hess):
hess = sf.hess
elif callable(hessp):
# this elif statement must come before examining whether hess
# is estimated by FD methods or a HessianUpdateStrategy
pass
elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)):
# If the Hessian is being estimated by finite differences or a
# Hessian update strategy then ScalarFunction.hess returns a
# LinearOperator or a HessianUpdateStrategy. This enables the
# calculation/creation of a hessp. BUT you only want to do this
# if the user *hasn't* provided a callable(hessp) function.
hess = None
def hessp(x, p, *args):
return sf.hess(x).dot(p)
else:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is currently required for trust-region methods')
# ScalarFunction doesn't represent hessp
nhessp, hessp = _wrap_function(hessp, args)
# limit the number of iterations
if maxiter is None:
maxiter = len(x0)*200
# init the search status
warnflag = 0
# initialize the search
trust_radius = initial_trust_radius
x = x0
if return_all:
allvecs = [x]
subproblem_init_kw = {}
if hasattr(subproblem, 'MAXITER_DEFAULT'):
subproblem_init_kw['maxiter'] = subproblem_maxiter
m = subproblem(x, fun, jac, hess, hessp, **subproblem_init_kw)
k = 0
# search for the function min
# do not even start if the gradient is small enough
while m.jac_mag >= gtol:
# Solve the sub-problem.
# This gives us the proposed step relative to the current position
# and it tells us whether the proposed step
# has reached the trust region boundary or not.
try:
p, hits_boundary = m.solve(trust_radius)
except np.linalg.LinAlgError:
warnflag = 3
break
# calculate the predicted value at the proposed point
predicted_value = m(p)
# define the local approximation at the proposed point
x_proposed = x + p
m_proposed = subproblem(x_proposed, fun, jac, hess, hessp, **subproblem_init_kw)
# evaluate the ratio defined in equation (4.4)
actual_reduction = m.fun - m_proposed.fun
predicted_reduction = m.fun - predicted_value
if predicted_reduction <= 0:
warnflag = 2
break
rho = actual_reduction / predicted_reduction
# update the trust radius according to the actual/predicted ratio
if rho < 0.25:
trust_radius *= 0.25
elif rho > 0.75 and hits_boundary:
trust_radius = min(2*trust_radius, max_trust_radius)
# if the ratio is high enough then accept the proposed step
if rho > eta:
x = x_proposed
m = m_proposed
# append the best guess, call back, increment the iteration count
if return_all:
allvecs.append(np.copy(x))
k += 1
intermediate_result = OptimizeResult(x=x, fun=m.fun)
if _call_callback_maybe_halt(callback, intermediate_result):
break
# check if the gradient is small enough to stop
if m.jac_mag < gtol:
warnflag = 0
break
# check if we have looked at enough iterations
if k >= maxiter:
warnflag = 1
break
# print some stuff if requested
status_messages = (
_status_message['success'],
_status_message['maxiter'],
'A bad approximation caused failure to predict improvement.',
'A linalg error occurred, such as a non-psd Hessian.',
)
if disp:
if warnflag == 0:
print(status_messages[warnflag])
else:
warnings.warn(status_messages[warnflag], RuntimeWarning, stacklevel=3)
print(f" Current function value: {m.fun:f}")
print(f" Iterations: {k:d}")
print(f" Function evaluations: {sf.nfev:d}")
print(f" Gradient evaluations: {sf.ngev:d}")
print(f" Hessian evaluations: {sf.nhev + nhessp[0]:d}")
result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,
fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev,
nhev=sf.nhev + nhessp[0], nit=k,
message=status_messages[warnflag])
if hess is not None:
result['hess'] = m.hess
if return_all:
result['allvecs'] = allvecs
return result
| BaseQuadraticSubproblem |
python | sympy__sympy | sympy/physics/vector/vector.py | {
"start": 364,
"end": 24485
} | class ____(Printable, EvalfMixin):
"""The class used to define vectors.
It along with ReferenceFrame are the building blocks of describing a
classical mechanics system in PyDy and sympy.physics.vector.
Attributes
==========
simp : Boolean
Let certain methods use trigsimp on their outputs
"""
simp = False
is_number = False
def __init__(self, inlist):
"""This is the constructor for the Vector class. You should not be
calling this, it should only be used by other functions. You should be
treating Vectors like you would with if you were doing the math by
hand, and getting the first 3 from the standard basis vectors from a
ReferenceFrame.
The only exception is to create a zero vector:
zv = Vector(0)
"""
self.args = []
if inlist == 0:
inlist = []
if isinstance(inlist, dict):
d = inlist
else:
d = {}
for inp in inlist:
if inp[1] in d:
d[inp[1]] += inp[0]
else:
d[inp[1]] = inp[0]
for k, v in d.items():
if v != Matrix([0, 0, 0]):
self.args.append((v, k))
@property
def func(self):
"""Returns the class Vector. """
return Vector
def __hash__(self):
return hash(tuple(self.args))
def __add__(self, other):
"""The add operator for Vector. """
if other == 0:
return self
other = _check_vector(other)
return Vector(self.args + other.args)
def dot(self, other):
"""Dot product of two vectors.
Returns a scalar, the dot product of the two Vectors
Parameters
==========
other : Vector
The Vector which we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> dot(N.x, N.x)
1
>>> dot(N.x, N.y)
0
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> dot(N.y, A.y)
cos(q1)
"""
from sympy.physics.vector.dyadic import Dyadic, _check_dyadic
if isinstance(other, Dyadic):
other = _check_dyadic(other)
ol = Vector(0)
for v in other.args:
ol += v[0] * v[2] * (v[1].dot(self))
return ol
other = _check_vector(other)
out = S.Zero
for v1 in self.args:
for v2 in other.args:
out += ((v2[0].T) * (v2[1].dcm(v1[1])) * (v1[0]))[0]
if Vector.simp:
return trigsimp(out, recursive=True)
else:
return out
def __truediv__(self, other):
"""This uses mul and inputs self and 1 divided by other. """
return self.__mul__(S.One / other)
def __eq__(self, other):
"""Tests for equality.
It is very import to note that this is only as good as the SymPy
equality test; False does not always mean they are not equivalent
Vectors.
If other is 0, and self is empty, returns True.
If other is 0 and self is not empty, returns False.
If none of the above, only accepts other as a Vector.
"""
if other == 0:
other = Vector(0)
try:
other = _check_vector(other)
except TypeError:
return False
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
frame = self.args[0][1]
for v in frame:
if expand((self - other).dot(v)) != 0:
return False
return True
def __mul__(self, other):
"""Multiplies the Vector by a sympifyable expression.
Parameters
==========
other : Sympifyable
The scalar to multiply this Vector with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> b = Symbol('b')
>>> V = 10 * b * N.x
>>> print(V)
10*b*N.x
"""
newlist = list(self.args)
other = sympify(other)
for i in range(len(newlist)):
newlist[i] = (other * newlist[i][0], newlist[i][1])
return Vector(newlist)
def __neg__(self):
return self * -1
def outer(self, other):
"""Outer product between two Vectors.
A rank increasing operation, which returns a Dyadic from two Vectors
Parameters
==========
other : Vector
The Vector to take the outer product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> outer(N.x, N.x)
(N.x|N.x)
"""
from sympy.physics.vector.dyadic import Dyadic
other = _check_vector(other)
ol = Dyadic(0)
for v in self.args:
for v2 in other.args:
# it looks this way because if we are in the same frame and
# use the enumerate function on the same frame in a nested
# fashion, then bad things happen
ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)])
ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)])
ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)])
ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)])
ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)])
ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)])
ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)])
ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)])
ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)])
return ol
def _latex(self, printer):
"""Latex Printing method. """
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for v in ar:
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if v[0][j] == 1:
ol.append(' + ' + v[1].latex_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif v[0][j] == -1:
ol.append(' - ' + v[1].latex_vecs[j])
elif v[0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = printer._print(v[0][j])
if isinstance(v[0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + v[1].latex_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer):
"""Pretty Printing method. """
from sympy.printing.pretty.stringpict import prettyForm
terms = []
def juxtapose(a, b):
pa = printer._print(a)
pb = printer._print(b)
if a.is_Add:
pa = prettyForm(*pa.parens())
return printer._print_seq([pa, pb], delimiter=' ')
for M, N in self.args:
for i in range(3):
if M[i] == 0:
continue
elif M[i] == 1:
terms.append(prettyForm(N.pretty_vecs[i]))
elif M[i] == -1:
terms.append(prettyForm("-1") * prettyForm(N.pretty_vecs[i]))
else:
terms.append(juxtapose(M[i], N.pretty_vecs[i]))
if terms:
pretty_result = prettyForm.__add__(*terms)
else:
pretty_result = prettyForm("0")
return pretty_result
def __rsub__(self, other):
return (-1 * self) + other
def _sympystr(self, printer, order=True):
"""Printing method. """
if not order or len(self.args) == 1:
ar = list(self.args)
elif len(self.args) == 0:
return printer._print(0)
else:
d = {v[1]: v[0] for v in self.args}
keys = sorted(d.keys(), key=lambda x: x.index)
ar = []
for key in keys:
ar.append((d[key], key))
ol = [] # output list, to be concatenated to a string
for v in ar:
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if v[0][j] == 1:
ol.append(' + ' + v[1].str_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif v[0][j] == -1:
ol.append(' - ' + v[1].str_vecs[j])
elif v[0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = printer._print(v[0][j])
if isinstance(v[0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*' + v[1].str_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subtraction operator. """
return self.__add__(other * -1)
def cross(self, other):
"""The cross product operator for two Vectors.
Returns a Vector, expressed in the same ReferenceFrames as self.
Parameters
==========
other : Vector
The Vector which we are crossing with
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, cross
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> cross(N.x, N.y)
N.z
>>> A = ReferenceFrame('A')
>>> A.orient_axis(N, q1, N.x)
>>> cross(A.x, N.y)
N.z
>>> cross(N.y, A.x)
- sin(q1)*A.y - cos(q1)*A.z
"""
from sympy.physics.vector.dyadic import Dyadic, _check_dyadic
if isinstance(other, Dyadic):
other = _check_dyadic(other)
ol = Dyadic(0)
for i, v in enumerate(other.args):
ol += v[0] * ((self.cross(v[1])).outer(v[2]))
return ol
other = _check_vector(other)
if other.args == []:
return Vector(0)
def _det(mat):
"""This is needed as a little method for to find the determinant
of a list in python; needs to work for a 3x3 list.
SymPy's Matrix will not take in Vector, so need a custom function.
You should not be calling this.
"""
return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])
+ mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *
mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -
mat[1][1] * mat[2][0]))
outlist = []
ar = other.args # For brevity
for v in ar:
tempx = v[1].x
tempy = v[1].y
tempz = v[1].z
tempm = ([[tempx, tempy, tempz],
[self.dot(tempx), self.dot(tempy), self.dot(tempz)],
[Vector([v]).dot(tempx), Vector([v]).dot(tempy),
Vector([v]).dot(tempz)]])
outlist += _det(tempm).args
return Vector(outlist)
__radd__ = __add__
__rmul__ = __mul__
def separate(self):
"""
The constituents of this vector in different reference frames,
as per its definition.
Returns a dict mapping each ReferenceFrame to the corresponding
constituent Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> R1 = ReferenceFrame('R1')
>>> R2 = ReferenceFrame('R2')
>>> v = R1.x + R2.x
>>> v.separate() == {R1: R1.x, R2: R2.x}
True
"""
components = {}
for x in self.args:
components[x[1]] = Vector([x])
return components
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
__rand__ = __and__
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def __or__(self, other):
return self.outer(other)
__or__.__doc__ = outer.__doc__
def diff(self, var, frame, var_in_dcm=True):
"""Returns the partial derivative of the vector with respect to a
variable in the provided reference frame.
Parameters
==========
var : Symbol
What the partial derivative is taken with respect to.
frame : ReferenceFrame
The reference frame that the partial derivative is taken in.
var_in_dcm : boolean
If true, the differentiation algorithm assumes that the variable
may be present in any of the direction cosine matrices that relate
the frame to the frames of any component of the vector. But if it
is known that the variable is not present in the direction cosine
matrices, false can be set to skip full reexpression in the desired
frame.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.vector import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> t = Symbol('t')
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.diff(t, N)
- sin(q1)*q1'*N.x - cos(q1)*q1'*N.z
>>> A.x.diff(t, N).express(A).simplify()
- q1'*A.z
>>> B = ReferenceFrame('B')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> v = u1 * A.x + u2 * B.y
>>> v.diff(u2, N, var_in_dcm=False)
B.y
"""
from sympy.physics.vector.frame import _check_frame
_check_frame(frame)
var = sympify(var)
inlist = []
for vector_component in self.args:
measure_number = vector_component[0]
component_frame = vector_component[1]
if component_frame == frame:
inlist += [(measure_number.diff(var), frame)]
else:
# If the direction cosine matrix relating the component frame
# with the derivative frame does not contain the variable.
if not var_in_dcm or (frame.dcm(component_frame).diff(var) ==
zeros(3, 3)):
inlist += [(measure_number.diff(var), component_frame)]
else: # else express in the frame
reexp_vec_comp = Vector([vector_component]).express(frame)
deriv = reexp_vec_comp.args[0][0].diff(var)
inlist += Vector([(deriv, frame)]).args
return Vector(inlist)
def express(self, otherframe, variables=False):
"""
Returns a Vector equivalent to this one, expressed in otherframe.
Uses the global express method.
Parameters
==========
otherframe : ReferenceFrame
The frame for this Vector to be described in
variables : boolean
If True, the coordinate symbols(if present) in this Vector
are re-expressed in terms otherframe
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> from sympy.physics.vector import init_vprinting
>>> init_vprinting(pretty_print=False)
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.express(N)
cos(q1)*N.x - sin(q1)*N.z
"""
from sympy.physics.vector import express
return express(self, otherframe, variables=variables)
def to_matrix(self, reference_frame):
"""Returns the matrix form of the vector with respect to the given
frame.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows of the matrix correspond to.
Returns
-------
matrix : ImmutableMatrix, shape(3,1)
The matrix that gives the 1D vector.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> a, b, c = symbols('a, b, c')
>>> N = ReferenceFrame('N')
>>> vector = a * N.x + b * N.y + c * N.z
>>> vector.to_matrix(N)
Matrix([
[a],
[b],
[c]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> vector.to_matrix(A)
Matrix([
[ a],
[ b*cos(beta) + c*sin(beta)],
[-b*sin(beta) + c*cos(beta)]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
reference_frame]).reshape(3, 1)
def doit(self, **hints):
"""Calls .doit() on each term in the Vector"""
d = {}
for v in self.args:
d[v[1]] = v[0].applyfunc(lambda x: x.doit(**hints))
return Vector(d)
def dt(self, otherframe):
"""
Returns a Vector which is the time derivative of
the self Vector, taken in frame otherframe.
Calls the global time_derivative method
Parameters
==========
otherframe : ReferenceFrame
The frame to calculate the time derivative in
"""
from sympy.physics.vector import time_derivative
return time_derivative(self, otherframe)
def simplify(self):
"""Returns a simplified Vector."""
d = {}
for v in self.args:
d[v[1]] = simplify(v[0])
return Vector(d)
def subs(self, *args, **kwargs):
"""Substitution on the Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = N.x * s
>>> a.subs({s: 2})
2*N.x
"""
d = {}
for v in self.args:
d[v[1]] = v[0].subs(*args, **kwargs)
return Vector(d)
def magnitude(self):
"""Returns the magnitude (Euclidean norm) of self.
Warnings
========
Python ignores the leading negative sign so that might
give wrong results.
``-A.x.magnitude()`` would be treated as ``-(A.x.magnitude())``,
instead of ``(-A.x).magnitude()``.
"""
return sqrt(self.dot(self))
def normalize(self):
"""Returns a Vector of magnitude 1, codirectional with self."""
return Vector(self.args + []) / self.magnitude()
def applyfunc(self, f):
"""Apply a function to each component of a vector."""
if not callable(f):
raise TypeError("`f` must be callable.")
d = {}
for v in self.args:
d[v[1]] = v[0].applyfunc(f)
return Vector(d)
def angle_between(self, vec):
"""
Returns the smallest angle between Vector 'vec' and self.
Parameter
=========
vec : Vector
The Vector between which angle is needed.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> A = ReferenceFrame("A")
>>> v1 = A.x
>>> v2 = A.y
>>> v1.angle_between(v2)
pi/2
>>> v3 = A.x + A.y + A.z
>>> v1.angle_between(v3)
acos(sqrt(3)/3)
Warnings
========
Python ignores the leading negative sign so that might give wrong
results. ``-A.x.angle_between()`` would be treated as
``-(A.x.angle_between())``, instead of ``(-A.x).angle_between()``.
"""
vec1 = self.normalize()
vec2 = vec.normalize()
angle = acos(vec1.dot(vec2))
return angle
def free_symbols(self, reference_frame):
"""Returns the free symbols in the measure numbers of the vector
expressed in the given reference frame.
Parameters
==========
reference_frame : ReferenceFrame
The frame with respect to which the free symbols of the given
vector is to be determined.
Returns
=======
set of Symbol
set of symbols present in the measure numbers of
``reference_frame``.
"""
return self.to_matrix(reference_frame).free_symbols
def free_dynamicsymbols(self, reference_frame):
"""Returns the free dynamic symbols (functions of time ``t``) in the
measure numbers of the vector expressed in the given reference frame.
Parameters
==========
reference_frame : ReferenceFrame
The frame with respect to which the free dynamic symbols of the
given vector is to be determined.
Returns
=======
set
Set of functions of time ``t``, e.g.
``Function('f')(me.dynamicsymbols._t)``.
"""
# TODO : Circular dependency if imported at top. Should move
# find_dynamicsymbols into physics.vector.functions.
from sympy.physics.mechanics.functions import find_dynamicsymbols
return find_dynamicsymbols(self, reference_frame=reference_frame)
def _eval_evalf(self, prec):
if not self.args:
return self
new_args = []
dps = prec_to_dps(prec)
for mat, frame in self.args:
new_args.append([mat.evalf(n=dps), frame])
return Vector(new_args)
def xreplace(self, rule):
"""Replace occurrences of objects within the measure numbers of the
vector.
Parameters
==========
rule : dict-like
Expresses a replacement rule.
Returns
=======
Vector
Result of the replacement.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.physics.vector import ReferenceFrame
>>> A = ReferenceFrame('A')
>>> x, y, z = symbols('x y z')
>>> ((1 + x*y) * A.x).xreplace({x: pi})
(pi*y + 1)*A.x
>>> ((1 + x*y) * A.x).xreplace({x: pi, y: 2})
(1 + 2*pi)*A.x
Replacements occur only if an entire node in the expression tree is
matched:
>>> ((x*y + z) * A.x).xreplace({x*y: pi})
(z + pi)*A.x
>>> ((x*y*z) * A.x).xreplace({x*y: pi})
x*y*z*A.x
"""
new_args = []
for mat, frame in self.args:
mat = mat.xreplace(rule)
new_args.append([mat, frame])
return Vector(new_args)
| Vector |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_twodim_base.py | {
"start": 10446,
"end": 15168
} | class ____(TestCase):
def test_dtype(self):
out = array([[1, 0, 0], [1, 1, 0], [1, 1, 1]])
assert_array_equal(tri(3), out)
assert_array_equal(tri(3, dtype=bool), out.astype(bool))
def test_tril_triu_ndim2(self):
for dtype in np.typecodes["AllFloat"] + np.typecodes["AllInteger"]:
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
assert_array_equal(b, [[1, 0], [1, 1]])
assert_array_equal(c, b.T)
# should return the same dtype as the original array
assert_equal(b.dtype, a.dtype)
assert_equal(c.dtype, a.dtype)
def test_tril_triu_ndim3(self):
for dtype in np.typecodes["AllFloat"] + np.typecodes["AllInteger"]:
a = np.array(
[
[[1, 1], [1, 1]],
[[1, 1], [1, 0]],
[[1, 1], [0, 0]],
],
dtype=dtype,
)
a_tril_desired = np.array(
[
[[1, 0], [1, 1]],
[[1, 0], [1, 0]],
[[1, 0], [0, 0]],
],
dtype=dtype,
)
a_triu_desired = np.array(
[
[[1, 1], [0, 1]],
[[1, 1], [0, 0]],
[[1, 1], [0, 0]],
],
dtype=dtype,
)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
assert_array_equal(a_triu_observed, a_triu_desired)
assert_array_equal(a_tril_observed, a_tril_desired)
assert_equal(a_triu_observed.dtype, a.dtype)
assert_equal(a_tril_observed.dtype, a.dtype)
def test_tril_triu_with_inf(self):
# Issue 4859
arr = np.array([[1, 1, np.inf], [1, 1, 1], [np.inf, 1, 1]])
out_tril = np.array([[1, 0, 0], [1, 1, 0], [np.inf, 1, 1]])
out_triu = out_tril.T
assert_array_equal(np.triu(arr), out_triu)
assert_array_equal(np.tril(arr), out_tril)
def test_tril_triu_dtype(self):
# Issue 4916
# tril and triu should return the same dtype as input
for c in "efdFDBbhil?": # np.typecodes["All"]:
arr = np.zeros((3, 3), dtype=c)
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
@xfail # (reason="TODO: implement mask_indices")
def test_mask_indices(self):
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
assert_array_equal(a[iu1], array([1, 2, 5]))
@xfail # (reason="np.tril_indices == our tuple(tril_indices)")
def test_tril_indices(self):
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, k=2)
il3 = tril_indices(4, m=5)
il4 = tril_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# indexing:
assert_array_equal(a[il1], array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
assert_array_equal(b[il3], array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
assert_array_equal(
a,
array([[-1, 2, 3, 4], [-1, -1, 7, 8], [-1, -1, -1, 12], [-1, -1, -1, -1]]),
)
b[il3] = -1
assert_array_equal(
b,
array(
[
[-1, 2, 3, 4, 5],
[-1, -1, 8, 9, 10],
[-1, -1, -1, 14, 15],
[-1, -1, -1, -1, 20],
]
),
)
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
assert_array_equal(
a,
array(
[
[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
]
),
)
b[il4] = -10
assert_array_equal(
b,
array(
[
[-10, -10, -10, 4, 5],
[-10, -10, -10, -10, 10],
[-10, -10, -10, -10, -10],
[-10, -10, -10, -10, -10],
]
),
)
@xfail # (reason="np.triu_indices == our tuple(triu_indices)")
| TestTri |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 3709,
"end": 10319
} | class ____(CurrentAndPreviousCrashFreeRate):
hasHealthData: bool
def _get_team_memberships(
team_list: Sequence[int], user: User | RpcUser | AnonymousUser
) -> Iterable[OrganizationMemberTeam]:
"""Get memberships the user has in the provided team list"""
if not user.is_authenticated:
return []
return list(
OrganizationMemberTeam.objects.filter(
organizationmember__user_id=user.id, team__in=team_list
)
)
def get_access_by_project(
projects: Sequence[Project], user: User | RpcUser | AnonymousUser
) -> dict[Project, dict[str, Any]]:
request = env.request
project_teams = ProjectTeam.objects.filter(project__in=projects).values_list(
"project_id", "team_id"
)
project_to_teams = defaultdict(list)
teams_list = []
for project_id, team_id in project_teams:
project_to_teams[project_id].append(team_id)
teams_list.append(team_id)
team_memberships = _get_team_memberships(teams_list, user)
org_ids = {i.organization_id for i in projects}
org_roles = get_org_roles(org_ids, user)
is_superuser = request and is_active_superuser(request) and request.user == user
prefetch_related_objects(projects, "organization")
result: dict[Project, dict[str, Any]] = {}
has_team_roles_cache: dict[int, bool] = {}
with sentry_sdk.start_span(op="project.check-access"):
for project in projects:
parent_teams = [t for t in project_to_teams.get(project.id, [])]
member_teams = [m for m in team_memberships if m.team_id in parent_teams]
is_member = any(member_teams)
org_role = org_roles.get(project.organization_id)
has_access = bool(
is_member
or is_superuser
or project.organization.flags.allow_joinleave
or (org_role and roles.get(org_role).is_global)
)
team_scopes: set[str] = set()
if has_access:
# Project can be the child of several Teams, and the User can join
# several Teams and receive roles at each of them,
for member in member_teams:
team_scopes |= member.get_scopes(has_team_roles_cache)
if is_superuser:
org_role = organization_roles.get_top_dog().id
if org_role:
minimum_team_role = roles.get_minimum_team_role(org_role)
team_scopes |= minimum_team_role.scopes
result[project] = {
"is_member": is_member,
"has_access": has_access,
"access": team_scopes,
}
return result
def get_environments_by_projects(projects: Sequence[Project]) -> MutableMapping[int, list[str]]:
project_envs = (
EnvironmentProject.objects.filter(
project_id__in=[i.id for i in projects],
# Including the organization_id is necessary for postgres to use indexes
# efficiently.
environment__organization_id=projects[0].organization_id,
)
.exclude(
is_hidden=True,
# HACK(lb): avoiding the no environment value
)
.exclude(environment__name="")
.values("project_id", "environment__name")
)
environments_by_project = defaultdict(list)
for project_env in project_envs:
environments_by_project[project_env["project_id"]].append(project_env["environment__name"])
return environments_by_project
def get_features_for_projects(
all_projects: Sequence[Project],
user: User | RpcUser | AnonymousUser,
filter_unused_on_frontend_features: bool = False,
) -> MutableMapping[Project, list[str]]:
# Arrange to call features.has_for_batch rather than features.has
# for performance's sake
projects_by_org = defaultdict(list)
for project in all_projects:
projects_by_org[project.organization].append(project)
features_by_project = defaultdict(list)
project_features = [
feature
for feature in features.all(feature_type=ProjectFeature).keys()
if feature.startswith(_PROJECT_SCOPE_PREFIX)
]
if filter_unused_on_frontend_features:
project_features = [
feature
for feature in project_features
if feature[len(_PROJECT_SCOPE_PREFIX) :] not in PROJECT_FEATURES_NOT_USED_ON_FRONTEND
]
batch_checked = set()
for organization, projects in projects_by_org.items():
batch_features = features.batch_has(
project_features, actor=user, projects=projects, organization=organization
)
# batch_has has found some features
if batch_features:
for project in projects:
for feature_name, active in batch_features.get(f"project:{project.id}", {}).items():
if active:
features_by_project[project].append(
feature_name[len(_PROJECT_SCOPE_PREFIX) :]
)
batch_checked.add(feature_name)
for feature_name in project_features:
if feature_name in batch_checked:
continue
abbreviated_feature = feature_name[len(_PROJECT_SCOPE_PREFIX) :]
for organization, projects in projects_by_org.items():
result = features.has_for_batch(feature_name, organization, projects, user)
for project, flag in result.items():
if flag:
features_by_project[project].append(abbreviated_feature)
for project in all_projects:
if project.flags.has_releases:
features_by_project[project].append("releases")
return features_by_project
# Determines hasLogs based on SENTRY_MODE for SAAS use flags, otherwise (single tenant and self hosted) skip onboarding
# This is because has_logs is currently set via the outcomes consumer, which doesn't run in all envs.
def get_has_logs(project: Project) -> bool:
if settings.SENTRY_MODE == SentryMode.SAAS:
return bool(project.flags.has_logs)
return True
# Determines hasTraceMetrics based on SENTRY_MODE for SAAS use flags, otherwise (single tenant and self hosted) skip onboarding
# This is because has_trace_metrics is currently set via the outcomes consumer, which doesn't run in all envs.
def get_has_trace_metrics(project: Project) -> bool:
if settings.SENTRY_MODE == SentryMode.SAAS:
return bool(project.flags.has_trace_metrics)
return True
| CrashFreeRatesWithHealthData |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 105038,
"end": 105615
} | class ____(Structure):
_fields_ = [("max", c_uint),
("high", c_uint),
("partial", c_uint),
("low", c_uint),
("none", c_uint)
]
NVML_GPU_CERT_CHAIN_SIZE = 0x1000
NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE = 0x1400
NVML_CC_GPU_CEC_NONCE_SIZE = 0x20
NVML_CC_GPU_ATTESTATION_REPORT_SIZE = 0x2000
NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE = 0x1000
NVML_CC_CEC_ATTESTATION_REPORT_NOT_PRESENT = 0
NVML_CC_CEC_ATTESTATION_REPORT_PRESENT = 1
| c_nvmlRowRemapperHistogramValues |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/core/dbt_cli_event.py | {
"start": 24615,
"end": 25203
} | class ____(DbtCliEventMessage):
"""Represents a dbt CLI event that was produced using the dbt Fusion engine."""
@property
def is_result_event(self) -> bool:
return self.raw_event["info"]["name"] == "NodeFinished"
def _get_check_passed(self) -> bool:
return self._get_node_status() == NodeStatus.Success
def _get_check_severity(self) -> AssetCheckSeverity:
node_status = self._get_node_status()
return (
AssetCheckSeverity.WARN if node_status == NodeStatus.Warn else AssetCheckSeverity.ERROR
)
| DbtFusionCliEventMessage |
python | django__django | tests/delete_regress/models.py | {
"start": 2133,
"end": 2219
} | class ____(models.Model):
policy = models.ForeignKey(Policy, models.CASCADE)
| Version |
python | walkccc__LeetCode | solutions/2869. Minimum Operations to Collect Elements/2869.py | {
"start": 0,
"end": 239
} | class ____:
def minOperations(self, nums: list[int], k: int) -> int:
seen = set()
for i, num in enumerate(reversed(nums)):
if num > k:
continue
seen.add(num)
if len(seen) == k:
return i + 1
| Solution |
python | pypa__hatch | backend/src/hatchling/builders/custom.py | {
"start": 455,
"end": 2127
} | class ____(Generic[PluginManagerBound]):
PLUGIN_NAME = "custom"
def __new__( # type: ignore[misc]
cls,
root: str,
plugin_manager: PluginManagerBound | None = None,
config: dict[str, Any] | None = None,
metadata: ProjectMetadata | None = None,
app: Application | None = None,
) -> BuilderInterface:
project_metadata = ProjectMetadata(root, plugin_manager, config)
target_config = project_metadata.hatch.build_targets.get(cls.PLUGIN_NAME, {})
if not isinstance(target_config, dict):
message = f"Field `tool.hatch.build.targets.{cls.PLUGIN_NAME}` must be a table"
raise TypeError(message)
build_script = target_config.get("path", DEFAULT_BUILD_SCRIPT)
if not isinstance(build_script, str):
message = f"Option `path` for builder `{cls.PLUGIN_NAME}` must be a string"
raise TypeError(message)
if not build_script:
message = f"Option `path` for builder `{cls.PLUGIN_NAME}` must not be empty if defined"
raise ValueError(message)
path = os.path.normpath(os.path.join(root, build_script))
if not os.path.isfile(path):
message = f"Build script does not exist: {build_script}"
raise OSError(message)
hook_class = load_plugin_from_script(path, build_script, BuilderInterface, "builder") # type: ignore[type-abstract]
hook = hook_class(root, plugin_manager=plugin_manager, config=config, metadata=metadata, app=app)
# Always keep the name to avoid confusion
hook.PLUGIN_NAME = cls.PLUGIN_NAME
return hook
| CustomBuilder |
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/ondemand_loader_tool.py | {
"start": 660,
"end": 6003
} | class ____(AsyncBaseTool):
"""
On-demand data loader tool.
Loads data with by calling the provided loader function,
stores in index, and queries for relevant data with a
natural language query string.
"""
def __init__(
self,
loader: Callable[..., List[Document]],
index_cls: Type[BaseIndex],
index_kwargs: Dict,
metadata: ToolMetadata,
use_query_str_in_loader: bool = False,
query_str_kwargs_key: str = "query_str",
) -> None:
"""Init params."""
self._loader = loader
self._index_cls = index_cls
self._index_kwargs = index_kwargs
self._use_query_str_in_loader = use_query_str_in_loader
self._metadata = metadata
self._query_str_kwargs_key = query_str_kwargs_key
@property
def metadata(self) -> ToolMetadata:
return self._metadata
@classmethod
def from_defaults(
cls,
reader: BaseReader,
index_cls: Optional[Type[BaseIndex]] = None,
index_kwargs: Optional[Dict] = None,
use_query_str_in_loader: bool = False,
query_str_kwargs_key: str = "query_str",
name: Optional[str] = None,
description: Optional[str] = None,
fn_schema: Optional[Type[BaseModel]] = None,
) -> "OnDemandLoaderTool":
"""From defaults."""
# NOTE: fn_schema should be specified if you want to use as langchain Tool
index_cls = index_cls or VectorStoreIndex
index_kwargs = index_kwargs or {}
if description is None:
description = f"Tool to load data from {reader.__class__.__name__}"
if fn_schema is None:
fn_schema = create_schema_from_function(
name or "LoadData",
reader.load_data,
[(query_str_kwargs_key, str, None)],
)
metadata = ToolMetadata(name=name, description=description, fn_schema=fn_schema)
return cls(
loader=reader.load_data,
index_cls=index_cls,
index_kwargs=index_kwargs,
use_query_str_in_loader=use_query_str_in_loader,
query_str_kwargs_key=query_str_kwargs_key,
metadata=metadata,
)
@classmethod
def from_tool(
cls,
tool: FunctionTool,
index_cls: Optional[Type[BaseIndex]] = None,
index_kwargs: Optional[Dict] = None,
use_query_str_in_loader: bool = False,
query_str_kwargs_key: str = "query_str",
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
fn_schema: Optional[Type[BaseModel]] = None,
) -> "OnDemandLoaderTool":
"""From defaults."""
# NOTE: fn_schema should be specified if you want to use as langchain Tool
index_cls = index_cls or VectorStoreIndex
index_kwargs = index_kwargs or {}
if description is None:
description = f"Tool to load data from {tool.__class__.__name__}"
if fn_schema is None:
fn_schema = create_schema_from_function(
name or "LoadData", tool._fn, [(query_str_kwargs_key, str, None)]
)
metadata = ToolMetadata(
name=name,
description=description,
fn_schema=fn_schema,
return_direct=return_direct,
)
return cls(
loader=tool._fn,
index_cls=index_cls,
index_kwargs=index_kwargs,
use_query_str_in_loader=use_query_str_in_loader,
query_str_kwargs_key=query_str_kwargs_key,
metadata=metadata,
)
def _parse_args(self, *args: Any, **kwargs: Any) -> Tuple[str, List[Document]]:
if self._query_str_kwargs_key not in kwargs:
raise ValueError(
"Missing query_str in kwargs with parameter name: "
f"{self._query_str_kwargs_key}"
)
if self._use_query_str_in_loader:
query_str = kwargs[self._query_str_kwargs_key]
else:
query_str = kwargs.pop(self._query_str_kwargs_key)
docs = self._loader(*args, **kwargs)
return query_str, docs
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
"""Call."""
query_str, docs = self._parse_args(*args, **kwargs)
index = self._index_cls.from_documents(docs, **self._index_kwargs)
# TODO: add query kwargs
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.get_name(),
raw_input={"query": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
"""Async Call."""
query_str, docs = self._parse_args(*args, **kwargs)
index = self._index_cls.from_documents(docs, **self._index_kwargs)
# TODO: add query kwargs
query_engine = index.as_query_engine()
response = await query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.get_name(),
raw_input={"query": query_str},
raw_output=response,
)
| OnDemandLoaderTool |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 3735,
"end": 3885
} | class ____(Generic[TypeT]):
""" Simple class with slots """
__slots__ = ['value']
def __init__(self, value):
self.value = value
| Cls |
python | readthedocs__readthedocs.org | readthedocs/builds/views.py | {
"start": 1079,
"end": 1537
} | class ____:
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get("project_slug", None)
self.project = get_object_or_404(
Project.objects.public(self.request.user),
slug=self.project_slug,
)
queryset = Build.objects.public(
user=self.request.user,
project=self.project,
).select_related("project", "version")
return queryset
| BuildBase |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_auth_provider_details.py | {
"start": 116,
"end": 512
} | class ____(PermissionTestCase):
def setUp(self) -> None:
super().setUp()
self.path = reverse(
"sentry-api-0-organization-auth-provider", args=[self.organization.slug]
)
def test_member_can_get(self) -> None:
with self.feature("organizations:sso-basic"):
self.assert_member_can_access(self.path)
| OrganizationAuthProviderPermissionTest |
python | getsentry__sentry | src/sentry/runner/commands/cleanup.py | {
"start": 946,
"end": 35100
} | class ____(Exception):
"""
Exception raised when the cleanup process should be aborted.
"""
def get_project(value: str) -> int | None:
from sentry.models.project import Project
try:
if value.isdigit():
return int(value)
if "/" not in value:
return None
org, proj = value.split("/", 1)
return Project.objects.get(organization__slug=org, slug=proj).id
except Project.DoesNotExist:
return None
def get_organization(value: str) -> int | None:
from sentry.models.organization import Organization
try:
if value.isdigit():
return int(value)
return Organization.objects.get(slug=value).id
except Organization.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER: Final = "91650ec271ae4b3e8a67cdc909d80f8c"
_WorkQueue: TypeAlias = (
"Queue[Literal['91650ec271ae4b3e8a67cdc909d80f8c'] | tuple[str, tuple[int, ...]]]"
)
API_TOKEN_TTL_IN_DAYS = 30
def debug_output(msg: str) -> None:
if os.environ.get("SENTRY_CLEANUP_SILENT", None):
return
click.echo(msg)
def multiprocess_worker(task_queue: _WorkQueue) -> None:
# Configure within each Process
import logging
logger = logging.getLogger("sentry.cleanup")
from sentry.runner import configure
configure()
from sentry import options
from sentry.utils import metrics
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
model_name, chunk = j
if options.get("cleanup.abort_execution"):
logger.warning("Cleanup worker aborting due to cleanup.abort_execution flag")
task_queue.task_done()
return
try:
with sentry_sdk.start_transaction(
op="cleanup", name=f"{TRANSACTION_PREFIX}.multiprocess_worker"
):
task_execution(model_name, chunk)
except Exception:
metrics.incr(
"cleanup.error",
instance=model_name,
tags={"model": model_name, "type": "multiprocess_worker"},
sample_rate=1.0,
)
if os.environ.get("SENTRY_CLEANUP_SILENT", None):
capture_exception(tags={"model": model_name})
else:
logger.exception("Error processing chunk of %s objects", model_name)
finally:
task_queue.task_done()
def task_execution(model_name: str, chunk: tuple[int, ...]) -> None:
from sentry import deletions, models, similarity
from sentry.utils import metrics
from sentry.utils.imports import import_string
skip_child_relations_models = [
# Handled by other parts of cleanup
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity,
]
model = import_string(model_name)
task = deletions.get(
model=model,
query={"id__in": chunk},
skip_models=skip_child_relations_models,
transaction_id=uuid4().hex,
)
while True:
debug_output(f"Processing chunk of {len(chunk)} {model_name} objects")
metrics.incr("cleanup.chunk_processed", tags={"model": model_name}, amount=len(chunk))
if not task.chunk(apply_filter=True):
break
@click.command()
@click.option("--days", default=30, show_default=True, help="Numbers of days to truncate on.")
@click.option("--project", help="Limit truncation to only entries from project.")
@click.option("--organization", help="Limit truncation to only entries from organization.")
@click.option(
"--concurrency",
type=int,
default=1,
show_default=True,
help="The total number of concurrent worker processes to run.",
)
@click.option(
"--silent", "-q", default=False, is_flag=True, help="Run quietly. No output on success."
)
@click.option("--model", "-m", multiple=True)
@click.option("--router", "-r", default=None, help="Database router")
@log_options()
def cleanup(
days: int,
project: str | None,
organization: str | None,
concurrency: int,
silent: bool,
model: tuple[str, ...],
router: str | None,
) -> None:
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project or organization you want to limit this to,
this can be done with the `--project` or `--organization` flags respectively,
which accepts a project/organization ID or a string with the form `org/project` where both are slugs.
"""
_cleanup(
model=model,
days=days,
concurrency=concurrency,
silent=silent,
router=router,
project=project,
organization=organization,
)
def _cleanup(
model: tuple[str, ...],
days: int,
concurrency: int,
silent: bool,
router: str | None,
project: str | None = None,
organization: str | None = None,
start_from_project_id: int | None = None,
) -> None:
start_time = time.time()
_validate_and_setup_environment(concurrency, silent)
# Make sure we fork off multiprocessing pool
# before we import or configure the app
pool, task_queue = _start_pool(concurrency)
from sentry.runner import configure
if not settings.configured:
configure()
from sentry import options
from sentry.utils import metrics
# Start transaction AFTER creating the multiprocessing pool to avoid
# transaction context issues in child processes. This ensures only the
# main process tracks the overall cleanup operation performance.
with sentry_sdk.start_transaction(
op="cleanup", name=f"{TRANSACTION_PREFIX}.main"
) as transaction:
try:
# Check if cleanup should be aborted before starting
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
# Track which models were attempted for deletion
models_attempted: set[str] = set()
# Track which models were filtered out for legitimate reasons (silo/router)
models_legitimately_filtered: set[str] = set()
def is_filtered(model: type[BaseModel]) -> bool:
model_name = model.__name__.lower()
silo_limit = getattr(model._meta, "silo_limit", None)
if isinstance(silo_limit, SiloLimit) and not silo_limit.is_available():
models_legitimately_filtered.add(model_name)
return True
if router is not None and db_router.db_for_write(model) != router:
models_legitimately_filtered.add(model_name)
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
deletes = models_which_use_deletions_code_path()
_run_specialized_cleanups(is_filtered, days, models_attempted)
# Handle project/organization specific logic
project_id, organization_id = _handle_project_organization_cleanup(
project, organization, days, deletes
)
if organization_id is not None:
transaction.set_tag("organization_id", organization_id)
if project_id is not None:
transaction.set_tag("project_id", project_id)
run_bulk_query_deletes(
is_filtered,
days,
project,
project_id,
models_attempted,
)
run_bulk_deletes_in_deletes(
task_queue,
deletes,
is_filtered,
days,
project,
project_id,
models_attempted,
)
run_bulk_deletes_by_project(
task_queue, project_id, start_from_project_id, is_filtered, days, models_attempted
)
run_bulk_deletes_by_organization(
task_queue, organization_id, is_filtered, days, models_attempted
)
remove_file_blobs(is_filtered, models_attempted)
except CleanupExecutionAborted:
click.echo("Cleanup was aborted via cleanup.abort_execution option.")
metrics.incr(
"cleanup.aborted", instance=router, tags={"db_router": router}, sample_rate=1.0
)
capture_exception(tags={"db_router": router})
# Don't re-raise - this is expected behavior, not an error
except Exception:
capture_exception(tags={"db_router": router})
metrics.incr(
"cleanup.error", tags={"db_router": router, "type": "FATAL"}, sample_rate=1.0
)
raise
finally:
# Shut down our pool
_stop_pool(pool, task_queue)
duration = int(time.time() - start_time)
metrics.timing(
"cleanup.duration",
duration,
instance=router,
tags={"db_router": router},
sample_rate=1.0,
)
click.echo("Clean up took %s second(s)." % duration)
# Check for models that were specified but never attempted
if model_list:
_report_models_never_attempted(
model_list, models_attempted, models_legitimately_filtered
)
def continue_on_error(metric_type: str) -> Callable[..., Any]:
"""
Decorator that catches exceptions, tracks metrics, and continues execution.
Does NOT catch CleanupExecutionAborted - that exception is allowed to propagate
so the cleanup can be properly aborted.
Args:
metric_type: The type tag for the cleanup.error metric
Example:
@continue_on_error("specialized_cleanup_lost_passwords")
def remove_expired_values_for_lost_passwords(is_filtered, models_attempted):
...
"""
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except CleanupExecutionAborted:
# Don't catch abort exceptions - let them propagate
raise
except Exception:
from sentry.utils import metrics
capture_exception()
metrics.incr("cleanup.error", tags={"type": metric_type}, sample_rate=1.0)
return wrapper
return decorator
def _validate_and_setup_environment(concurrency: int, silent: bool) -> None:
"""Validate input parameters and set up environment variables."""
if concurrency < 1:
click.echo("Error: Minimum concurrency is 1", err=True)
raise click.Abort()
os.environ["_SENTRY_CLEANUP"] = "1"
if silent:
os.environ["SENTRY_CLEANUP_SILENT"] = "1"
def _run_specialized_cleanups(
is_filtered: Callable[[type[BaseModel]], bool],
days: int,
models_attempted: set[str],
) -> None:
"""Run specialized cleanup operations for specific models."""
from sentry import options
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
remove_expired_values_for_lost_passwords(is_filtered, models_attempted)
remove_expired_values_for_org_members(is_filtered, days, models_attempted)
delete_api_models(is_filtered, models_attempted)
exported_data(is_filtered, models_attempted)
def _handle_project_organization_cleanup(
project: str | None,
organization: str | None,
days: int,
deletes: list[tuple[type[BaseModel], str, str]],
) -> tuple[int | None, int | None]:
"""Handle project/organization specific cleanup logic."""
project_id = None
organization_id = None
if SiloMode.get_current_mode() != SiloMode.CONTROL:
if project:
remove_cross_project_models(deletes)
project_id = get_project_id_or_fail(project)
elif organization:
organization_id = get_organization_id_or_fail(organization)
else:
remove_old_nodestore_values(days)
return project_id, organization_id
def _report_models_never_attempted(
model_list: set[str], models_attempted: set[str], models_legitimately_filtered: set[str]
) -> None:
# Exclude models that were legitimately filtered out (silo/router restrictions)
models_never_attempted = model_list - models_attempted - models_legitimately_filtered
if models_never_attempted:
logger.warning(
"Models specified with --model were never attempted for deletion, must configure cleanup for this model",
extra={
"models_never_attempted": sorted(models_never_attempted),
"legitimately_filtered_models": (
sorted(models_legitimately_filtered) if models_legitimately_filtered else None
),
},
)
def _start_pool(concurrency: int) -> tuple[list[Process], _WorkQueue]:
pool: list[Process] = []
task_queue: _WorkQueue = Queue(1000)
for _ in range(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
return pool, task_queue
def _stop_pool(pool: Sequence[Process], task_queue: _WorkQueue) -> None:
# First, ensure all queued tasks are completed
task_queue.join()
# Stop the pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
@continue_on_error("specialized_cleanup_lost_passwords")
def remove_expired_values_for_lost_passwords(
is_filtered: Callable[[type[BaseModel]], bool], models_attempted: set[str]
) -> None:
from sentry.users.models.lostpasswordhash import LostPasswordHash
if is_filtered(LostPasswordHash):
debug_output(">> Skipping LostPasswordHash")
else:
debug_output("Removing expired values for LostPasswordHash")
models_attempted.add(LostPasswordHash.__name__.lower())
LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
@continue_on_error("specialized_cleanup_org_members")
def remove_expired_values_for_org_members(
is_filtered: Callable[[type[BaseModel]], bool], days: int, models_attempted: set[str]
) -> None:
from sentry.models.organizationmember import OrganizationMember
if is_filtered(OrganizationMember):
debug_output(">> Skipping OrganizationMember")
else:
debug_output("Removing expired values for OrganizationMember")
models_attempted.add(OrganizationMember.__name__.lower())
expired_threshold = timezone.now() - timedelta(days=days)
OrganizationMember.objects.delete_expired(expired_threshold)
@continue_on_error("specialized_cleanup_api_models")
def delete_api_models(
is_filtered: Callable[[type[BaseModel]], bool], models_attempted: set[str]
) -> None:
from sentry.models.apigrant import ApiGrant
from sentry.models.apitoken import ApiToken
for model_tp in (ApiGrant, ApiToken):
if is_filtered(model_tp):
debug_output(f">> Skipping {model_tp.__name__}")
else:
debug_output(f"Removing expired values for {model_tp.__name__}")
models_attempted.add(model_tp.__name__.lower())
queryset = model_tp.objects.filter(
expires_at__lt=(timezone.now() - timedelta(days=API_TOKEN_TTL_IN_DAYS))
)
# SentryAppInstallations are associated to ApiTokens. We're okay
# with these tokens sticking around so that the Integration can
# refresh them, but all other non-associated tokens should be
# deleted.
if model_tp is ApiToken:
queryset = queryset.filter(sentry_app_installation__isnull=True)
queryset.delete()
@continue_on_error("specialized_cleanup_exported_data")
def exported_data(
is_filtered: Callable[[type[BaseModel]], bool], models_attempted: set[str]
) -> None:
from sentry.data_export.models import ExportedData
if is_filtered(ExportedData):
debug_output(">> Skipping ExportedData files")
else:
debug_output("Removing expired files associated with ExportedData")
models_attempted.add(ExportedData.__name__.lower())
export_data_queryset = ExportedData.objects.filter(date_expired__lt=(timezone.now()))
for item in export_data_queryset:
item.delete_file()
def models_which_use_deletions_code_path() -> list[tuple[type[BaseModel], str, str]]:
from sentry.models.artifactbundle import ArtifactBundle
from sentry.models.commit import Commit
from sentry.models.eventattachment import EventAttachment
from sentry.models.files.file import File
from sentry.models.grouprulestatus import GroupRuleStatus
from sentry.models.pullrequest import PullRequest
from sentry.models.release import Release
from sentry.models.rulefirehistory import RuleFireHistory
from sentry.monitors.models import MonitorCheckIn
from sentry.replays.models import ReplayRecordingSegment
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
return [
(EventAttachment, "date_added", "date_added"),
(ReplayRecordingSegment, "date_added", "date_added"),
(ArtifactBundle, "date_added", "date_added"),
(MonitorCheckIn, "date_added", "date_added"),
(GroupRuleStatus, "date_added", "date_added"),
(PullRequest, "date_added", "date_added"),
(RuleFireHistory, "date_added", "date_added"),
(Release, "date_added", "date_added"),
(File, "timestamp", "id"),
(Commit, "date_added", "id"),
]
def remove_cross_project_models(
deletes: list[tuple[type[BaseModel], str, str]],
) -> list[tuple[type[BaseModel], str, str]]:
from sentry.models.artifactbundle import ArtifactBundle
from sentry.models.files.file import File
# These models span across projects, so let's skip them
deletes.remove((ArtifactBundle, "date_added", "date_added"))
deletes.remove((File, "timestamp", "id"))
return deletes
def get_project_id_or_fail(project: str) -> int:
click.echo("Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo("Error: Project not found", err=True)
raise click.Abort()
return project_id
def get_organization_id_or_fail(organization: str) -> int:
click.echo("Bulk NodeStore deletion not available for organization selection", err=True)
organization_id = get_organization(organization)
if organization_id is None:
click.echo("Error: Organization not found", err=True)
raise click.Abort()
return organization_id
@continue_on_error("nodestore_cleanup")
def remove_old_nodestore_values(days: int) -> None:
from sentry import nodestore, options
debug_output("Removing old NodeStore values")
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.backend.cleanup(cutoff)
except NotImplementedError:
click.echo("NodeStore backend does not support cleanup operation", err=True)
def generate_bulk_query_deletes() -> list[tuple[type[BaseModel], str, str | None]]:
from django.apps import apps
from sentry.models.groupemailthread import GroupEmailThread
from sentry.models.userreport import UserReport
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
additional_bulk_query_deletes = []
for entry in settings.ADDITIONAL_BULK_QUERY_DELETES:
app_name, model_name = entry[0].split(".")
model_tp = apps.get_model(app_name, model_name)
additional_bulk_query_deletes.append((model_tp, entry[1], entry[2]))
BULK_QUERY_DELETES = [
(UserReport, "date_added", None),
(GroupEmailThread, "date", None),
] + additional_bulk_query_deletes
return BULK_QUERY_DELETES
def run_bulk_query_deletes(
is_filtered: Callable[[type[BaseModel]], bool],
days: int,
project: str | None,
project_id: int | None,
models_attempted: set[str],
) -> None:
from sentry import options
from sentry.db.deletion import BulkDeleteQuery
from sentry.utils import metrics
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
debug_output("Running bulk query deletes in bulk_query_deletes")
bulk_query_deletes = generate_bulk_query_deletes()
for model_tp, dtfield, order_by in bulk_query_deletes:
chunk_size = 10000
if is_filtered(model_tp):
debug_output(">> Skipping %s" % model_tp.__name__)
else:
debug_output(f"Removing {model_tp.__name__} for days={days} project={project or '*'}")
models_attempted.add(model_tp.__name__.lower())
try:
BulkDeleteQuery(
model=model_tp,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
except Exception:
capture_exception(tags={"model": model_tp.__name__})
metrics.incr(
"cleanup.error",
instance=model_tp.__name__,
tags={"model": model_tp.__name__, "type": "bulk_delete_query"},
sample_rate=1.0,
)
def run_bulk_deletes_in_deletes(
task_queue: _WorkQueue,
deletes: list[tuple[type[BaseModel], str, str]],
is_filtered: Callable[[type[BaseModel]], bool],
days: int,
project: str | None,
project_id: int | None,
models_attempted: set[str],
) -> None:
from sentry import options
from sentry.db.deletion import BulkDeleteQuery
from sentry.utils import metrics
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
debug_output("Running bulk deletes in DELETES")
for model_tp, dtfield, order_by in deletes:
if is_filtered(model_tp):
debug_output(">> Skipping %s" % model_tp.__name__)
else:
debug_output(f"Removing {model_tp.__name__} for days={days} project={project or '*'}")
models_attempted.add(model_tp.__name__.lower())
try:
imp = ".".join((model_tp.__module__, model_tp.__name__))
q = BulkDeleteQuery(
model=model_tp,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
except Exception:
capture_exception(tags={"model": model_tp.__name__})
metrics.incr(
"cleanup.error",
instance=model_tp.__name__,
tags={"model": model_tp.__name__, "type": "bulk_delete_in_deletes"},
sample_rate=1.0,
)
# Ensure all tasks are completed before exiting
task_queue.join()
def run_bulk_deletes_by_project(
task_queue: _WorkQueue,
project_id: int | None,
start_from_project_id: int | None,
is_filtered: Callable[[type[BaseModel]], bool],
days: int,
models_attempted: set[str],
) -> None:
from sentry import options
from sentry.db.deletion import BulkDeleteQuery
from sentry.utils import metrics
from sentry.utils.query import RangeQuerySetWrapper
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
project_deletion_query, to_delete_by_project = prepare_deletes_by_project(
is_filtered, project_id, start_from_project_id
)
if project_deletion_query is not None and len(to_delete_by_project):
debug_output("Running bulk deletes in DELETES_BY_PROJECT")
for project_id_for_deletion in RangeQuerySetWrapper(
project_deletion_query.values_list("id", flat=True),
result_value_getter=lambda item: item,
):
for model_tp, dtfield, order_by in to_delete_by_project:
models_attempted.add(model_tp.__name__.lower())
debug_output(
f"Removing {model_tp.__name__} for days={days} project={project_id_for_deletion}"
)
try:
imp = ".".join((model_tp.__module__, model_tp.__name__))
q = BulkDeleteQuery(
model=model_tp,
dtfield=dtfield,
days=days,
project_id=project_id_for_deletion,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=DELETES_BY_PROJECT_CHUNK_SIZE):
task_queue.put((imp, chunk))
except Exception:
capture_exception(
tags={"model": model_tp.__name__, "project_id": project_id_for_deletion}
)
metrics.incr(
"cleanup.error",
instance=model_tp.__name__,
tags={"model": model_tp.__name__, "type": "bulk_delete_by_project"},
sample_rate=1.0,
)
# Ensure all tasks are completed before exiting
task_queue.join()
def run_bulk_deletes_by_organization(
task_queue: _WorkQueue,
organization_id: int | None,
is_filtered: Callable[[type[BaseModel]], bool],
days: int,
models_attempted: set[str],
) -> None:
from sentry import options
from sentry.db.deletion import BulkDeleteQuery
from sentry.utils import metrics
from sentry.utils.query import RangeQuerySetWrapper
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
organization_deletion_query, to_delete_by_organization = prepare_deletes_by_organization(
organization_id, is_filtered
)
if organization_deletion_query is not None and len(to_delete_by_organization):
debug_output("Running bulk deletes in DELETES_BY_ORGANIZATION")
for organization_id_for_deletion in RangeQuerySetWrapper(
organization_deletion_query.values_list("id", flat=True),
result_value_getter=lambda item: item,
):
for model_tp, dtfield, order_by in to_delete_by_organization:
models_attempted.add(model_tp.__name__.lower())
debug_output(
f"Removing {model_tp.__name__} for days={days} organization={organization_id_for_deletion}"
)
try:
imp = ".".join((model_tp.__module__, model_tp.__name__))
q = BulkDeleteQuery(
model=model_tp,
dtfield=dtfield,
days=days,
organization_id=organization_id_for_deletion,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
except Exception:
capture_exception(
tags={
"model": model_tp.__name__,
"organization_id": organization_id_for_deletion,
}
)
metrics.incr(
"cleanup.error",
instance=model_tp.__name__,
tags={"model": model_tp.__name__, "type": "bulk_delete_by_organization"},
sample_rate=1.0,
)
# Ensure all tasks are completed before exiting
task_queue.join()
def prepare_deletes_by_project(
is_filtered: Callable[[type[BaseModel]], bool],
project_id: int | None = None,
start_from_project_id: int | None = None,
) -> tuple[QuerySet[Any] | None, list[tuple[Any, str, str]]]:
from sentry.constants import ObjectStatus
from sentry.models.debugfile import ProjectDebugFile
from sentry.models.group import Group
from sentry.models.project import Project
# Deletions that we run per project. In some cases we can't use an index on just the date
# column, so as an alternative we use `(project_id, <date_col>)` instead
DELETES_BY_PROJECT: list[tuple[type[BaseModel], str, str]] = [
# Having an index on `last_seen` sometimes caused the planner to make a bad plan that
# used this index instead of a more appropriate one. This was causing a lot of postgres
# load, so we had to remove it.
(Group, "last_seen", "last_seen"),
(ProjectDebugFile, "date_accessed", "date_accessed"),
]
project_deletion_query = None
to_delete_by_project = []
if SiloMode.get_current_mode() != SiloMode.CONTROL:
debug_output("Preparing DELETES_BY_PROJECT context")
project_deletion_query = Project.objects.filter(status=ObjectStatus.ACTIVE)
if project_id is not None:
project_deletion_query = Project.objects.filter(id=project_id)
elif start_from_project_id is not None:
# When no specific project is provided, but a starting project ID is given,
# filter to start from that project ID (inclusive)
project_deletion_query = project_deletion_query.filter(id__gte=start_from_project_id)
debug_output(f"Starting project iteration from project ID {start_from_project_id}")
for model_tp_tup in DELETES_BY_PROJECT:
if is_filtered(model_tp_tup[0]):
debug_output(f">> Skipping {model_tp_tup[0].__name__}")
else:
to_delete_by_project.append(model_tp_tup)
return project_deletion_query, to_delete_by_project
def prepare_deletes_by_organization(
organization_id: int | None, is_filtered: Callable[[type[BaseModel]], bool]
) -> tuple[QuerySet[Any] | None, list[tuple[Any, str, str]]]:
from sentry.constants import ObjectStatus
from sentry.models.organization import Organization
from sentry.models.releasefile import ReleaseFile
# Deletions that we run per organization. In some cases we can't use an index on just the date
# column, so as an alternative we use `(organization_id, <date_col>)` instead
DELETES_BY_ORGANIZATION: list[tuple[type[BaseModel], str, str]] = [
(ReleaseFile, "date_accessed", "date_accessed"),
]
organization_deletion_query = None
to_delete_by_organization = []
if SiloMode.get_current_mode() != SiloMode.CONTROL:
debug_output("Preparing DELETES_BY_ORGANIZATION context")
organization_deletion_query = Organization.objects.filter(status=ObjectStatus.ACTIVE)
if organization_id is not None:
organization_deletion_query = Organization.objects.filter(id=organization_id)
for model_tp_tup in DELETES_BY_ORGANIZATION:
if is_filtered(model_tp_tup[0]):
debug_output(f">> Skipping {model_tp_tup[0].__name__}")
else:
to_delete_by_organization.append(model_tp_tup)
return organization_deletion_query, to_delete_by_organization
@continue_on_error("fileblob_cleanup")
def remove_file_blobs(
is_filtered: Callable[[type[BaseModel]], bool], models_attempted: set[str]
) -> None:
from sentry import options
from sentry.models.file import FileBlob
if options.get("cleanup.abort_execution"):
raise CleanupExecutionAborted()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if is_filtered(FileBlob):
debug_output(">> Skipping FileBlob")
else:
debug_output("Cleaning up unused FileBlob references")
models_attempted.add(FileBlob.__name__.lower())
cleanup_unused_files()
def cleanup_unused_files() -> None:
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models.files.file import File
from sentry.models.files.fileblob import FileBlob
from sentry.models.files.fileblobindex import FileBlobIndex
if os.environ.get("SENTRY_CLEANUP_SILENT", None):
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(timestamp__lte=cutoff)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
| CleanupExecutionAborted |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_storage_version_status.py | {
"start": 383,
"end": 6566
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'common_encoding_version': 'str',
'conditions': 'list[V1alpha1StorageVersionCondition]',
'storage_versions': 'list[V1alpha1ServerStorageVersion]'
}
attribute_map = {
'common_encoding_version': 'commonEncodingVersion',
'conditions': 'conditions',
'storage_versions': 'storageVersions'
}
def __init__(self, common_encoding_version=None, conditions=None, storage_versions=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1StorageVersionStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._common_encoding_version = None
self._conditions = None
self._storage_versions = None
self.discriminator = None
if common_encoding_version is not None:
self.common_encoding_version = common_encoding_version
if conditions is not None:
self.conditions = conditions
if storage_versions is not None:
self.storage_versions = storage_versions
@property
def common_encoding_version(self):
"""Gets the common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality. # noqa: E501
:return: The common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
:rtype: str
"""
return self._common_encoding_version
@common_encoding_version.setter
def common_encoding_version(self, common_encoding_version):
"""Sets the common_encoding_version of this V1alpha1StorageVersionStatus.
If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality. # noqa: E501
:param common_encoding_version: The common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
:type: str
"""
self._common_encoding_version = common_encoding_version
@property
def conditions(self):
"""Gets the conditions of this V1alpha1StorageVersionStatus. # noqa: E501
The latest available observations of the storageVersion's state. # noqa: E501
:return: The conditions of this V1alpha1StorageVersionStatus. # noqa: E501
:rtype: list[V1alpha1StorageVersionCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1alpha1StorageVersionStatus.
The latest available observations of the storageVersion's state. # noqa: E501
:param conditions: The conditions of this V1alpha1StorageVersionStatus. # noqa: E501
:type: list[V1alpha1StorageVersionCondition]
"""
self._conditions = conditions
@property
def storage_versions(self):
"""Gets the storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
The reported versions per API server instance. # noqa: E501
:return: The storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
:rtype: list[V1alpha1ServerStorageVersion]
"""
return self._storage_versions
@storage_versions.setter
def storage_versions(self, storage_versions):
"""Sets the storage_versions of this V1alpha1StorageVersionStatus.
The reported versions per API server instance. # noqa: E501
:param storage_versions: The storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
:type: list[V1alpha1ServerStorageVersion]
"""
self._storage_versions = storage_versions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1StorageVersionStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1StorageVersionStatus):
return True
return self.to_dict() != other.to_dict()
| V1alpha1StorageVersionStatus |
python | lazyprogrammer__machine_learning_examples | supervised_class/knn_vectorized.py | {
"start": 658,
"end": 3058
} | class ____(object):
def __init__(self, k):
self.k = k
def fit(self, X, y):
self.X = X
self.y = y
def predict(self, X):
N = len(X)
y = np.zeros(N)
# returns distances in a matrix
# of shape (N_test, N_train)
distances = pairwise_distances(X, self.X)
# now get the minimum k elements' indexes
# https://stackoverflow.com/questions/16817948/i-have-need-the-n-minimum-index-values-in-a-numpy-array
idx = distances.argsort(axis=1)[:, :self.k]
# now determine the winning votes
# each row of idx contains indexes from 0..Ntrain
# corresponding to the indexes of the closest samples
# from the training set
# NOTE: if you don't "believe" this works, test it
# in your console with simpler arrays
votes = self.y[idx]
# now y contains the classes in each row
# e.g.
# sample 0 --> [class0, class1, class1, class0, ...]
# unfortunately there's no good way to vectorize this
# https://stackoverflow.com/questions/19201972/can-numpy-bincount-work-with-2d-arrays
for i in range(N):
y[i] = np.bincount(votes[i]).argmax()
return y
def score(self, X, Y):
P = self.predict(X)
return np.mean(P == Y)
if __name__ == '__main__':
X, Y = get_data(2000)
Ntrain = 1000
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
train_scores = []
test_scores = []
ks = (1,2,3,4,5)
for k in ks:
print("\nk =", k)
knn = KNN(k)
t0 = datetime.now()
knn.fit(Xtrain, Ytrain)
print("Training time:", (datetime.now() - t0))
t0 = datetime.now()
train_score = knn.score(Xtrain, Ytrain)
train_scores.append(train_score)
print("Train accuracy:", train_score)
print("Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain))
t0 = datetime.now()
test_score = knn.score(Xtest, Ytest)
print("Test accuracy:", test_score)
test_scores.append(test_score)
print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest))
plt.plot(ks, train_scores, label='train scores')
plt.plot(ks, test_scores, label='test scores')
plt.legend()
plt.show()
| KNN |
python | PrefectHQ__prefect | src/prefect/server/concurrency/lease_storage/__init__.py | {
"start": 793,
"end": 3162
} | class ____(LeaseStorage[ConcurrencyLimitLeaseMetadata]):
async def create_lease(
self,
resource_ids: list[UUID],
ttl: timedelta,
metadata: ConcurrencyLimitLeaseMetadata | None = None,
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]: ...
async def read_lease(
self, lease_id: UUID
) -> ResourceLease[ConcurrencyLimitLeaseMetadata] | None: ...
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool | None:
"""
Renew a resource lease.
Args:
lease_id: The ID of the lease to renew.
ttl: The new amount of time the lease should be held for.
Returns:
True if the lease was successfully renewed, False if the lease
does not exist or has already expired. None may be returned by
legacy implementations for backwards compatibility (treated as success).
"""
...
async def revoke_lease(self, lease_id: UUID) -> None: ...
async def read_active_lease_ids(
self, limit: int = 100, offset: int = 0
) -> list[UUID]: ...
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]: ...
async def list_holders_for_limit(
self, limit_id: UUID
) -> list[tuple[UUID, ConcurrencyLeaseHolder]]:
"""
List all holders for a given concurrency limit.
Args:
limit_id: The ID of the concurrency limit to list holders for.
Returns:
A list of tuples containing the lease ID and ConcurrencyLeaseHolder objects representing active holders.
"""
...
def get_concurrency_lease_storage() -> ConcurrencyLeaseStorage:
"""
Returns a ConcurrencyLeaseStorage instance based on the configured lease storage module.
Will raise a ValueError if the configured module does not pass a type check.
"""
concurrency_lease_storage_module = importlib.import_module(
get_current_settings().server.concurrency.lease_storage
)
if not isinstance(concurrency_lease_storage_module, ConcurrencyLeaseStorageModule):
raise ValueError(
f"The module {get_current_settings().server.concurrency.lease_storage} does not contain a ConcurrencyLeaseStorage class"
)
return concurrency_lease_storage_module.ConcurrencyLeaseStorage()
| ConcurrencyLeaseStorage |
python | streamlit__streamlit | lib/tests/streamlit/runtime/forward_msg_queue_test.py | {
"start": 2216,
"end": 12359
} | class ____(unittest.TestCase):
def test_simple_enqueue(self):
"""Enqueue a single ForwardMsg."""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
fmq.enqueue(NEW_SESSION_MSG)
assert not fmq.is_empty()
queue = fmq.flush()
assert fmq.is_empty()
assert len(queue) == 1
assert queue[0].new_session.config.allow_run_on_save
def test_enqueue_two(self):
"""Enqueue two ForwardMsgs."""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
fmq.enqueue(NEW_SESSION_MSG)
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG1)
queue = fmq.flush()
assert len(queue) == 2
assert (
make_delta_path(RootContainer.MAIN, (), 0) == queue[1].metadata.delta_path
)
assert queue[1].delta.new_element.text.body == "text1"
def test_enqueue_three(self):
"""Enqueue 3 ForwardMsgs."""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
fmq.enqueue(NEW_SESSION_MSG)
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG1)
TEXT_DELTA_MSG2.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 1
)
fmq.enqueue(TEXT_DELTA_MSG2)
queue = fmq.flush()
assert len(queue) == 3
assert (
make_delta_path(RootContainer.MAIN, (), 0) == queue[1].metadata.delta_path
)
assert queue[1].delta.new_element.text.body == "text1"
assert (
make_delta_path(RootContainer.MAIN, (), 1) == queue[2].metadata.delta_path
)
assert queue[2].delta.new_element.text.body == "text2"
def test_replace_element(self):
"""Enqueuing an element with the same delta_path as another element
already in the queue should replace the original element.
"""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
fmq.enqueue(NEW_SESSION_MSG)
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG1)
TEXT_DELTA_MSG2.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG2)
queue = fmq.flush()
assert len(queue) == 2
assert (
make_delta_path(RootContainer.MAIN, (), 0) == queue[1].metadata.delta_path
)
assert queue[1].delta.new_element.text.body == "text2"
@parameterized.expand([(TEXT_DELTA_MSG1,), (ADD_BLOCK_MSG,)])
def test_dont_replace_block(self, other_msg: ForwardMsg):
"""add_block deltas should never be replaced because they can
have dependent deltas later in the queue."""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
ADD_BLOCK_MSG.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
other_msg.metadata.delta_path[:] = make_delta_path(RootContainer.MAIN, (), 0)
# Delta messages should not replace `add_block` deltas with the
# same delta_path.
fmq.enqueue(ADD_BLOCK_MSG)
fmq.enqueue(other_msg)
queue = fmq.flush()
assert len(queue) == 2
assert queue[0] == ADD_BLOCK_MSG
assert other_msg == queue[1]
def test_multiple_containers(self):
"""Deltas should only be coalesced if they're in the same container"""
fmq = ForwardMsgQueue()
assert fmq.is_empty()
fmq.enqueue(NEW_SESSION_MSG)
def enqueue_deltas(container: int, path: tuple[int, ...]):
# We deep-copy the protos because we mutate each one
# multiple times.
msg = copy.deepcopy(TEXT_DELTA_MSG1)
msg.metadata.delta_path[:] = make_delta_path(container, path, 0)
fmq.enqueue(msg)
msg = copy.deepcopy(DF_DELTA_MSG)
msg.metadata.delta_path[:] = make_delta_path(container, path, 1)
fmq.enqueue(msg)
msg = copy.deepcopy(ADD_ROWS_MSG)
msg.metadata.delta_path[:] = make_delta_path(container, path, 1)
fmq.enqueue(msg)
enqueue_deltas(RootContainer.MAIN, ())
enqueue_deltas(RootContainer.SIDEBAR, (0, 0, 1))
def assert_deltas(container: int, path: tuple[int, ...], idx: int):
# Text delta
assert make_delta_path(container, path, 0) == queue[idx].metadata.delta_path
assert queue[idx].delta.new_element.text.body == "text1"
queue = fmq.flush()
assert len(queue) == 7
assert_deltas(RootContainer.MAIN, (), 1)
assert_deltas(RootContainer.SIDEBAR, (0, 0, 1), 4)
def test_clear_retain_lifecycle_msgs(self):
fmq = ForwardMsgQueue()
script_finished_msg = ForwardMsg()
script_finished_msg.script_finished = (
ForwardMsg.ScriptFinishedStatus.FINISHED_SUCCESSFULLY
)
session_status_changed_msg = ForwardMsg()
session_status_changed_msg.session_status_changed.script_is_running = True
parent_msg = ForwardMsg()
parent_msg.parent_message.message = "hello"
fmq.enqueue(NEW_SESSION_MSG)
fmq.enqueue(TEXT_DELTA_MSG1)
fmq.enqueue(script_finished_msg)
fmq.enqueue(session_status_changed_msg)
fmq.enqueue(parent_msg)
fmq.enqueue(PAGE_INFO_CHANGED_MSG)
expected_new_finished_message = ForwardMsg()
expected_new_finished_message.script_finished = (
ForwardMsg.ScriptFinishedStatus.FINISHED_EARLY_FOR_RERUN
)
fmq.clear(retain_lifecycle_msgs=True)
expected_retained_messages = [
NEW_SESSION_MSG,
expected_new_finished_message,
session_status_changed_msg,
parent_msg,
PAGE_INFO_CHANGED_MSG,
]
assert fmq._queue == expected_retained_messages
fmq.clear()
assert fmq._queue == []
def test_clear_with_fragmentid_preserve_unrelated_delta_messages(self):
"""When we pass fragment_ids_this_run to the clear function, only delta
messages belonging to those fragment_ids should be cleared or in other words,
all other delta messages not belonging to one of the passed fragment ids, should
be preserved.
"""
fmq = ForwardMsgQueue()
script_finished_msg = ForwardMsg()
script_finished_msg.script_finished = (
ForwardMsg.ScriptFinishedStatus.FINISHED_SUCCESSFULLY
)
session_status_changed_msg = ForwardMsg()
session_status_changed_msg.session_status_changed.script_is_running = True
parent_msg = ForwardMsg()
parent_msg.parent_message.message = "hello"
current_fragment_delta1 = ForwardMsg()
current_fragment_delta1.delta.new_element.text.body = "text1"
current_fragment_delta1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 1
)
current_fragment_delta1.delta.fragment_id = "current_fragment_id1"
current_fragment_delta2 = ForwardMsg()
current_fragment_delta2.delta.new_element.text.body = "text1"
current_fragment_delta2.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 2
)
current_fragment_delta2.delta.fragment_id = "current_fragment_delta2"
unrelated_fragment_delta = ForwardMsg()
unrelated_fragment_delta.delta.new_element.text.body = "text1"
unrelated_fragment_delta.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 3
)
unrelated_fragment_delta.delta.fragment_id = "unrelated_fragment_id"
fmq.enqueue(NEW_SESSION_MSG)
fmq.enqueue(current_fragment_delta1)
fmq.enqueue(current_fragment_delta2)
fmq.enqueue(unrelated_fragment_delta)
fmq.enqueue(TEXT_DELTA_MSG1) # no fragment id
fmq.enqueue(script_finished_msg)
fmq.enqueue(session_status_changed_msg)
fmq.enqueue(parent_msg)
expected_new_finished_message = ForwardMsg()
expected_new_finished_message.script_finished = (
ForwardMsg.ScriptFinishedStatus.FINISHED_SUCCESSFULLY
)
fmq.clear(
retain_lifecycle_msgs=True,
fragment_ids_this_run=[
current_fragment_delta1.delta.fragment_id,
current_fragment_delta2.delta.fragment_id,
],
)
expected_retained_messages = [
NEW_SESSION_MSG,
unrelated_fragment_delta,
TEXT_DELTA_MSG1,
expected_new_finished_message,
session_status_changed_msg,
parent_msg,
]
assert fmq._queue == expected_retained_messages
fmq.clear()
assert fmq._queue == []
def test_on_before_enqueue_msg(self):
count = 0
def increase_counter(_msg):
nonlocal count
count += 1
ForwardMsgQueue.on_before_enqueue_msg(increase_counter)
fmq = ForwardMsgQueue()
assert count == 0
fmq.enqueue(NEW_SESSION_MSG)
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG1)
TEXT_DELTA_MSG2.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 1
)
fmq.enqueue(TEXT_DELTA_MSG2)
assert count == 3
count = 0
ForwardMsgQueue.on_before_enqueue_msg(None)
fmq.clear()
fmq.enqueue(NEW_SESSION_MSG)
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 0
)
fmq.enqueue(TEXT_DELTA_MSG1)
TEXT_DELTA_MSG2.metadata.delta_path[:] = make_delta_path(
RootContainer.MAIN, (), 1
)
fmq.enqueue(TEXT_DELTA_MSG2)
assert count == 0
| ForwardMsgQueueTest |
python | streamlit__streamlit | lib/streamlit/web/server/media_file_handler.py | {
"start": 1098,
"end": 5562
} | class ____(tornado.web.StaticFileHandler):
_storage: MemoryMediaFileStorage
@classmethod
def initialize_storage(cls, storage: MemoryMediaFileStorage) -> None:
"""Set the MemoryMediaFileStorage object used by instances of this
handler. Must be called on server startup.
"""
# This is a class method, rather than an instance method, because
# `get_content()` is a class method and needs to access the storage
# instance.
cls._storage = storage
def set_default_headers(self) -> None:
if allow_all_cross_origin_requests():
self.set_header("Access-Control-Allow-Origin", "*")
elif is_allowed_origin(origin := self.request.headers.get("Origin")):
self.set_header("Access-Control-Allow-Origin", cast("str", origin))
def set_extra_headers(self, path: str) -> None:
"""Add Content-Disposition header for downloadable files.
Set header value to "attachment" indicating that file should be saved
locally instead of displaying inline in browser.
We also set filename to specify the filename for downloaded files.
Used for serving downloadable files, like files stored via the
`st.download_button` widget.
"""
media_file = self._storage.get_file(path)
if media_file and media_file.kind == MediaFileKind.DOWNLOADABLE:
filename = media_file.filename
if not filename:
filename = f"streamlit_download{get_extension_for_mimetype(media_file.mimetype)}"
try:
# Check that the value can be encoded in latin1. Latin1 is
# the default encoding for headers.
filename.encode("latin1")
file_expr = f'filename="{filename}"'
except UnicodeEncodeError:
# RFC5987 syntax.
# See: https://datatracker.ietf.org/doc/html/rfc5987
file_expr = f"filename*=utf-8''{quote(filename)}"
self.set_header("Content-Disposition", f"attachment; {file_expr}")
# Overriding StaticFileHandler to use the MediaFileManager
#
# From the Tornado docs:
# To replace all interaction with the filesystem (e.g. to serve
# static content from a database), override `get_content`,
# `get_content_size`, `get_modified_time`, `get_absolute_path`, and
# `validate_absolute_path`.
def validate_absolute_path(
self,
root: str, # noqa: ARG002
absolute_path: str,
) -> str:
try:
self._storage.get_file(absolute_path)
except MediaFileStorageError:
_LOGGER.exception("MediaFileHandler: Missing file %s", absolute_path)
raise tornado.web.HTTPError(404, "not found")
return absolute_path
def get_content_size(self) -> int:
abspath = self.absolute_path
if abspath is None:
return 0
media_file = self._storage.get_file(abspath)
return media_file.content_size
def get_modified_time(self) -> None:
# We do not track last modified time, but this can be improved to
# allow caching among files in the MediaFileManager
return None
@classmethod
def get_absolute_path(cls, root: str, path: str) -> str: # noqa: ARG003
# All files are stored in memory, so the absolute path is just the
# path itself. In the MediaFileHandler, it's just the filename
return path
@classmethod
def get_content(
cls, abspath: str, start: int | None = None, end: int | None = None
) -> Any:
_LOGGER.debug("MediaFileHandler: GET %s", abspath)
try:
# abspath is the hash as used `get_absolute_path`
media_file = cls._storage.get_file(abspath)
except Exception:
_LOGGER.exception("MediaFileHandler: Missing file %s", abspath)
return None
_LOGGER.debug(
"MediaFileHandler: Sending %s file %s", media_file.mimetype, abspath
)
# If there is no start and end, just return the full content
if start is None and end is None:
return media_file.content
if start is None:
start = 0
if end is None:
end = len(media_file.content)
# content is bytes that work just by slicing supplied by start and end
return media_file.content[start:end]
| MediaFileHandler |
python | allegroai__clearml | clearml/hyperdatasets/data_entry_image.py | {
"start": 577,
"end": 19498
} | class ____(DataSubEntry):
def __init__(
self,
name: str = "image_entry_0",
source: Optional[str] = None,
preview_source: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
timestamp: Optional[int] = None,
context_id: Optional[str] = None,
masks_source: Optional[Union[Sequence[str], Dict[str, str]]] = None,
metadata: Optional[dict] = None,
) -> None:
"""
Initialise an image sub-entry with optional dimension, context and mask metadata.
:param name: Identifier of the sub-entry (defaults to image_entry_0)
:param source: Primary image URI
:param preview_source: Optional preview image URI
:param width: Image width in pixels
:param height: Image height in pixels
:param timestamp: Optional timestamp associated with the frame
:param context_id: Optional context identifier to correlate sources
:param masks_source: Sequence or mapping of mask URIs
:param metadata: Optional metadata dictionary stored alongside the sub-entry
"""
super(DataSubEntryImage, self).__init__(
name=name,
source=source,
preview_source=preview_source,
metadata=metadata,
)
self._width = width
self._height = height
self._timestamp = timestamp
self._context_id = context_id
self._masks_source: Dict[str, str] = {}
if masks_source:
if isinstance(masks_source, dict):
self._masks_source = {str(k): str(v) for k, v in masks_source.items()}
else:
self._masks_source = {f"{i:02d}": str(u) for i, u in enumerate(masks_source)}
self._annotations: List[Dict[str, Any]] = []
@property
def width(self) -> Optional[int]:
"""
Return cached image width if known.
:return: Width in pixels or None when unknown
"""
return self._width
@width.setter
def width(self, value: Optional[int]) -> None:
"""
Update the cached image width.
:param value: Width in pixels or None to clear the stored value
"""
self._width = value
@property
def height(self) -> Optional[int]:
"""
Return cached image height if known.
:return: Height in pixels or None when unknown
"""
return self._height
@height.setter
def height(self, value: Optional[int]) -> None:
"""
Update the cached image height.
:param value: Height in pixels or None to clear the stored value
"""
self._height = value
@property
def timestamp(self) -> Optional[int]:
"""
Return the timestamp associated with this frame, if any.
:return: Timestamp value or None
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value: Optional[int]) -> None:
"""
Update the timestamp associated with the sub-entry.
:param value: Timestamp value or None to clear the stored timestamp
"""
self._timestamp = value
@property
def context_id(self) -> Optional[str]:
"""
Return the context identifier used to correlate sub-entries.
:return: Context identifier string or None
"""
return self._context_id
@context_id.setter
def context_id(self, value: Optional[str]) -> None:
"""
Update the context identifier associated with the sub-entry.
:param value: Context identifier string or None to clear the stored value
"""
self._context_id = value
def set_mask_source(self, uri: Optional[str]) -> Optional[str]:
"""
Add a single mask URI and auto-number it (00, 01, 02, ...).
Returns the assigned mask id, or None if uri is falsy.
"""
if not uri:
return None
if self._masks_source is None:
self._masks_source = {}
# find next available mask id (00, 01, ...)
i = 0
while f"{i:02d}" in self._masks_source:
i += 1
mask_id = f"{i:02d}"
self._masks_source[mask_id] = str(uri)
return mask_id
def set_masks_source(self, masks_source: Optional[Union[Sequence[str], Dict[str, str]]] = None) -> None:
"""
Set multiple mask URIs and auto-number them (00, 01, 02, ...).
For dict input, the values' iteration order is used and keys are ignored.
For list/sequence input, order is preserved.
"""
if masks_source is None:
self._masks_source = {}
return
uris: List[str] = []
if isinstance(masks_source, dict):
# use insertion order of values
uris = [str(v) for v in masks_source.values()]
else:
uris = [str(u) for u in masks_source]
self._masks_source = {f"{i:02d}": u for i, u in enumerate(uris)}
def get_masks_source_dict(self) -> Dict[str, str]:
"""
Return a copy of the mask-id to URI mapping.
:return: Dictionary mapping mask ids to URIs
"""
return dict(self._masks_source)
def get_mask_source(self, mask_id: Optional[str] = None) -> Optional[str]:
"""
Return the URI for the requested mask id (or the first mask if omitted).
:param mask_id: Mask identifier (e.g. "00")
:return: Mask URI string or None when unavailable
"""
if not self._masks_source:
return None
if mask_id is None:
# return the first mask if exists
return self._masks_source.get(sorted(self._masks_source.keys())[0])
return self._masks_source.get(mask_id)
def get_local_mask_source(
self,
raise_on_error: bool = False,
mask_id: Optional[str] = None,
force_download: bool = False,
) -> Optional[str]:
"""
Retrieve a cached local copy of a specific mask source.
:param raise_on_error: Raise ValueError when the download fails
:param mask_id: Mask identifier to fetch; defaults to the first mask
:param force_download: Refresh an existing cached entry when True
:return: Absolute path to the local copy or None when unavailable
"""
uri = self.get_mask_source(mask_id)
if not uri:
return None
try:
local_file = StorageManagerDiskSpaceFileSizeStrategy.get_local_copy(
uri,
extract_archive=False,
force_download=force_download,
)
except Exception as ex:
logging.getLogger("HyperDataset").warning("Could not fetch local mask copy for %s: %s", uri, ex)
local_file = None
if not local_file and raise_on_error:
raise ValueError("Failed downloading file: {}".format(uri))
return local_file
def get_local_masks_source(
self,
raise_on_error: bool = False,
force_download: bool = False,
) -> Dict[str, Optional[str]]:
"""
Retrieve cached local copies for all mask sources on this sub-entry.
:param raise_on_error: Raise ValueError when any download fails
:param force_download: Refresh existing cached entries when True
:return: Mapping of mask id to the local copy path (or None on failure if raise_on_error is False)
"""
masks: Dict[str, Optional[str]] = {}
for mid, uri in sorted((self._masks_source or {}).items()):
if not uri:
masks[mid] = None
continue
try:
local_file = StorageManagerDiskSpaceFileSizeStrategy.get_local_copy(
uri,
extract_archive=False,
force_download=force_download,
)
except Exception as ex:
logging.getLogger("HyperDataset").warning("Could not fetch local mask copy for %s: %s", uri, ex)
local_file = None
if not local_file and raise_on_error:
raise ValueError("Failed downloading file: {}".format(uri))
masks[mid] = local_file
return masks
def __repr__(self) -> str:
meta_keys = sorted((self._metadata or {}).keys()) if isinstance(self._metadata, dict) else []
return (
f"{self.__class__.__name__}(name={self._name!r}, source={self._source!r}, "
f"preview={self._preview_source!r}, size=({self._width},{self._height}), "
f"masks={len(self._masks_source)}, metadata_keys={meta_keys})"
)
def add_annotation(
self,
poly2d_xy: Optional[Values] = None,
poly3d_xyz: Optional[Values] = None,
points2d_xy: Optional[Values] = None,
points3d_xyz: Optional[Values] = None,
box2d_xywh: Optional[Values] = None,
box3d_xyzwhxyzwh: Optional[Values] = None,
ellipse2d_xyrrt: Optional[Values] = None,
mask_rgb: Optional[Values] = None,
frame_class: Optional[Sequence[str]] = None,
id: Optional[str] = None,
labels: Optional[Sequence[str]] = None,
confidence: Optional[float] = None,
metadata: Optional[dict] = None
) -> List[int]:
"""
Create ROI records for this sub-entry and return their indices.
:param poly2d_xy: 2D polygon coordinates
:param poly3d_xyz: 3D polygon coordinates
:param points2d_xy: 2D keypoint coordinates
:param points3d_xyz: 3D keypoint coordinates
:param box2d_xywh: 2D bounding box definition
:param box3d_xyzwhxyzwh: 3D bounding box definition
:param ellipse2d_xyrrt: 2D ellipse definition
:param mask_rgb: RGB mask values
:param frame_class: Optional frame-level class labels
:param id: Annotation identifier
:param labels: Sequence of label names
:param confidence: Optional confidence value
:param metadata: Extra metadata mapping to attach to the annotation
:return: List of annotation indices that were appended
"""
# Minimal in-memory ROI creation compatible with SaveFramesRequest schema
anns: List[Dict[str, Any]] = []
def _flatten_xy(seq: Any) -> Optional[List[float]]:
if not seq:
return None
# Accept [(x,y), ...] or [x0,y0,...]
if isinstance(seq, (list, tuple)) and seq and isinstance(seq[0], (list, tuple)):
flat: List[float] = []
for x, y in seq: # type: ignore[misc]
flat.extend([float(x), float(y)])
return flat
try:
return [float(v) for v in seq] # type: ignore[return-value]
except Exception:
return None
# helper to append an ROI dict
def _add_roi(meta_type: Optional[str], poly: Optional[List[float]] = None, mask: Optional[List[int]] = None):
roi: Dict[str, Any] = {}
if labels is not None:
roi["label"] = list(labels)
if confidence is not None:
roi["confidence"] = float(confidence)
roi_meta: Dict[str, Any] = {}
if metadata:
roi_meta.update(metadata)
if meta_type:
roi_meta["_type"] = meta_type
if id is not None:
roi_meta["_id"] = id
if roi_meta:
roi["meta"] = roi_meta
if poly is not None:
roi["poly"] = poly
if mask is not None:
roi["mask"] = {"id": "00", "value": list(mask)}
# Associate with this subentry's source id (name)
roi["sources"] = [self.name]
anns.append(roi)
# Create ROIs for provided shapes (each becomes its own ROI, sharing id if given)
if poly2d_xy is not None:
poly = _flatten_xy(poly2d_xy)
if poly:
_add_roi("p2d", poly=poly)
if box2d_xywh is not None:
# Represent box as polygon (x,y,w,h[,a]) approximated by rectangle corners without rotation
try:
vals = list(box2d_xywh) # type: ignore
x, y, w, h = [float(vals[i]) for i in range(4)]
rect_poly = [x, y, x + w, y, x + w, y + h, x, y + h]
_add_roi("b2d", poly=rect_poly)
except Exception:
pass
if points2d_xy is not None:
flat = _flatten_xy(points2d_xy)
if flat:
_add_roi("k2d", poly=flat)
if ellipse2d_xyrrt is not None:
try:
vals = [float(v) for v in ellipse2d_xyrrt] # cx,cy,rx,ry,theta
_add_roi("elp", poly=vals)
except Exception:
pass
if mask_rgb is not None:
try:
mrgb = [int(v) for v in mask_rgb] # r,g,b
_add_roi("mask", mask=mrgb)
except Exception:
pass
if poly3d_xyz is not None:
# Store as flat list in poly, typed
try:
flat = [float(v) for v in (sum(poly3d_xyz, []) if isinstance(poly3d_xyz[0], (list, tuple)) else poly3d_xyz)] # type: ignore[index]
_add_roi("p3d", poly=flat)
except Exception:
pass
if points3d_xyz is not None:
try:
flat = [float(v) for v in (sum(points3d_xyz, []) if isinstance(points3d_xyz[0], (list, tuple)) else points3d_xyz)] # type: ignore[index]
_add_roi("k3d", poly=flat)
except Exception:
pass
if box3d_xyzwhxyzwh is not None:
try:
flat = [float(v) for v in box3d_xyzwhxyzwh] # type: ignore
_add_roi("b3d", poly=flat)
except Exception:
pass
if frame_class is not None and (not labels):
# Frame-level class as ROI without geometry
_add_roi(None)
# Store annotations list
if not hasattr(self, "_annotations"):
self._annotations = [] # type: ignore[attr-defined]
start = len(self._annotations) # type: ignore[attr-defined]
# type: ignore[attr-defined]
self._annotations.extend(anns)
return list(range(start, start + len(anns)))
def remove_annotation(self, index: Optional[int] = None, **kwargs: Any) -> Any:
"""
Remove a single annotation by numeric index or identifier.
:param index: Annotation index to remove
:param kwargs: Alternative filters such as id=...
:return: Removed annotation payload or None when nothing matched
"""
if not hasattr(self, "_annotations") or not self._annotations:
return None
if index is None:
ann_id = kwargs.pop("id", None)
if ann_id is None:
raise ValueError("index is required (or provide id=...) to remove_annotation")
# delegate to remove_annotations by id and return first removed
removed = self.remove_annotations(id=ann_id)
return removed[0] if removed else None
try:
return self._annotations.pop(index)
except Exception:
return None
def remove_annotations(
self, id: Optional[str] = None, label: Optional[str] = None, labels: Optional[Sequence[str]] = None
) -> Sequence[Any]:
"""
Remove annotations that match the provided id or label filters.
:param id: Annotation identifier to match
:param label: Single label to match
:param labels: Sequence of labels to match
:return: Sequence of removed annotation payloads
"""
if not hasattr(self, "_annotations") or not self._annotations:
return []
removed: List[Dict[str, Any]] = []
keep: List[Dict[str, Any]] = []
label_set = set(labels or ([] if label is None else [label])) if (labels or label) else None
for ann in self._annotations:
meta = ann.get("meta") or {}
if id is not None and meta.get("_id") != id:
keep.append(ann)
continue
if label_set is not None:
ann_labels = set(ann.get("label") or [])
if not (ann_labels & label_set):
keep.append(ann)
continue
removed.append(ann)
self._annotations = keep
return removed
def get_all_annotations(self) -> Sequence[Any]:
"""
Return all annotations attached to this sub-entry.
:return: Sequence of annotation payloads
"""
return list(getattr(self, "_annotations", []) or [])
def get_annotations(self, id: Optional[str] = None, index: Optional[int] = None) -> Sequence[Any]:
"""
Return annotations matching the supplied identifier/index filters.
:param id: Annotation identifier to filter by
:param index: Annotation index to fetch
:return: Sequence of matching annotation payloads
"""
anns = list(getattr(self, "_annotations", []) or [])
if index is not None:
try:
return [anns[index]]
except Exception:
return []
if id is not None:
return [a for a in anns if (a.get("meta") or {}).get("_id") == id]
return anns
@classmethod
def from_api_object(
cls,
source_obj: Any,
frame_meta: Optional[dict] = None,
context_id: Optional[str] = None,
name_fallback: str = "image_0",
) -> "DataSubEntryImage":
def _get(obj, key, default=None):
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
name = _get(source_obj, "id") or name_fallback
uri = _get(source_obj, "uri")
width = _get(source_obj, "width")
height = _get(source_obj, "height")
ts = _get(source_obj, "timestamp")
preview = None
p = _get(source_obj, "preview")
if p:
preview = _get(p, "uri")
masks_list = _get(source_obj, "masks") or []
masks = {str(_get(m, "id")): _get(m, "uri") for m in masks_list if _get(m, "uri")}
sub_meta = (frame_meta or {}).get(name) if isinstance(frame_meta, dict) else None
return cls(
name=name,
source=uri,
preview_source=preview,
width=width,
height=height,
timestamp=ts,
context_id=context_id,
masks_source=masks,
metadata=sub_meta,
)
| DataSubEntryImage |
python | redis__redis-py | redis/commands/search/profile_information.py | {
"start": 25,
"end": 249
} | class ____:
"""
Wrapper around FT.PROFILE response
"""
def __init__(self, info: Any) -> None:
self._info: Any = info
@property
def info(self) -> Any:
return self._info
| ProfileInformation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 5339,
"end": 5487
} | class ____(IncrementalShopifyGraphQlBulkStream):
bulk_query: CustomerJourney = CustomerJourney
primary_key = "order_id"
| CustomerJourneySummary |
python | arrow-py__arrow | arrow/locales.py | {
"start": 72499,
"end": 76033
} | class ____(Locale):
names = ["sk", "sk-sk"]
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "Teraz",
"second": {"past": "sekundou", "future": "sekundu"},
"seconds": {
"zero": "{0} sekúnd",
"past": "{0} sekundami",
"future-singular": "{0} sekundy",
"future-paucal": "{0} sekúnd",
},
"minute": {"past": "minútou", "future": "minútu"},
"minutes": {
"zero": "{0} minút",
"past": "{0} minútami",
"future-singular": "{0} minúty",
"future-paucal": "{0} minút",
},
"hour": {"past": "hodinou", "future": "hodinu"},
"hours": {
"zero": "{0} hodín",
"past": "{0} hodinami",
"future-singular": "{0} hodiny",
"future-paucal": "{0} hodín",
},
"day": {"past": "dňom", "future": "deň"},
"days": {
"zero": "{0} dní",
"past": "{0} dňami",
"future-singular": "{0} dni",
"future-paucal": "{0} dní",
},
"week": {"past": "týždňom", "future": "týždeň"},
"weeks": {
"zero": "{0} týždňov",
"past": "{0} týždňami",
"future-singular": "{0} týždne",
"future-paucal": "{0} týždňov",
},
"month": {"past": "mesiacom", "future": "mesiac"},
"months": {
"zero": "{0} mesiacov",
"past": "{0} mesiacmi",
"future-singular": "{0} mesiace",
"future-paucal": "{0} mesiacov",
},
"year": {"past": "rokom", "future": "rok"},
"years": {
"zero": "{0} rokov",
"past": "{0} rokmi",
"future-singular": "{0} roky",
"future-paucal": "{0} rokov",
},
}
past = "Pred {0}"
future = "O {0}"
and_word = "a"
month_names = [
"",
"január",
"február",
"marec",
"apríl",
"máj",
"jún",
"júl",
"august",
"september",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"máj",
"jún",
"júl",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pondelok",
"utorok",
"streda",
"štvrtok",
"piatok",
"sobota",
"nedeľa",
]
day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"]
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
"""Slovak aware time frame format function, takes into account
the differences between past and future forms."""
abs_delta = abs(delta)
form = self.timeframes[timeframe]
if isinstance(form, str):
return form.format(abs_delta)
if delta == 0:
key = "zero" # And *never* use 0 in the singular!
elif delta < 0:
key = "past"
else:
if "future-singular" not in form:
key = "future"
elif 2 <= abs_delta % 10 <= 4 and (
abs_delta % 100 < 10 or abs_delta % 100 >= 20
):
key = "future-singular"
else:
key = "future-paucal"
form: str = form[key]
return form.format(abs_delta)
| SlovakLocale |
python | astropy__astropy | astropy/coordinates/tests/test_masked.py | {
"start": 1349,
"end": 8752
} | class ____(MaskedSphericalSetup):
"""Tests for mask propagation for Spherical with separate masks."""
def test_initialization(self):
assert_array_equal(self.msph.lon.mask, self.mask_lon)
assert_array_equal(self.msph.lat.mask, self.mask_lat)
assert_array_equal(self.msph.distance.mask, self.mask_dis)
assert_array_equal(self.msph.unmasked.lon, self.lon)
assert_array_equal(self.msph.unmasked.lat, self.lat)
assert_array_equal(self.msph.unmasked.distance, self.dis)
assert_array_equal(self.msph.mask, self.mask)
assert_array_equal(self.msph.get_mask(), self.mask)
assert_array_equal(self.msph.get_mask("lon", "lat"), self.mask_ang)
assert repr(self.msph) == (
"<SphericalRepresentation (lon, lat, distance) in (hourangle, deg, pc)\n"
" [( 0., -15., 10.), (———, 30., 20.), ( 6., ———, 30.),\n"
" (12., -60., ———), (———, ———, 50.), (———, ———, ———)]>"
)
assert str(self.msph) == (
"[( 0., -15., 10.), (———, 30., 20.), ( 6., ———, 30.), (12., -60., ———),\n"
" (———, ———, 50.), (———, ———, ———)] (hourangle, deg, pc)"
)
def test_convert_to_cartesian(self):
mcart = self.msph.represent_as(r.CartesianRepresentation)
assert_array_equal(mcart.mask, self.mask)
def test_convert_to_unit_spherical(self):
musph = self.msph.represent_as(r.UnitSphericalRepresentation)
assert_array_equal(musph.lon.mask, self.mask_lon)
assert_array_equal(musph.lat.mask, self.mask_lat)
assert_array_equal(musph.mask, self.mask_ang)
def test_convert_to_radial(self):
mrad = r.RadialRepresentation.from_representation(self.msph)
assert_array_equal(mrad.mask, self.mask_dis)
def test_convert_to_physics_spherical(self):
mpsph = self.msph.represent_as(r.PhysicsSphericalRepresentation)
assert_array_equal(mpsph.phi.mask, self.mask_lon)
assert_array_equal(mpsph.theta.mask, self.mask_lat)
assert_array_equal(mpsph.r.mask, self.mask_dis)
assert_array_equal(mpsph.mask, self.mask)
assert_array_equal(mpsph.get_mask("phi", "theta"), self.mask_ang)
def test_set_mask(self):
msph = self.msph.copy()
msph[0] = np.ma.masked
assert_array_equal(msph.mask, np.concatenate(([True], self.mask[1:])))
msph[0] = np.ma.nomask
assert_array_equal(msph.mask, self.mask)
def test_setitem(self):
msph = self.msph.copy()
msph[0] = self.msph[1]
assert_array_equal(msph.mask, np.concatenate(([True], self.mask[1:])))
assert_array_equal(
msph.unmasked.lon, np.concatenate((msph.lon[1:2], self.lon[1:]))
)
msph[0] = self.msph[0]
assert_array_equal(msph.mask, self.mask)
assert_array_equal(msph.unmasked.lon, self.lon)
def test_set_masked_item_on_unmasked_instance(self):
# Currently, the mask on items is *ignored*, just as it is for ndarray,
# Quantity, and Time. In principle, this could be changed for containers
# like representations and Time. See
# https://github.com/astropy/astropy/pull/17016#issuecomment-2439607869
sph = self.msph.unmasked.copy()
sph[0] = self.msph[1]
assert not sph.masked
assert_array_equal(sph.mask, np.zeros_like(self.mask))
assert_array_equal(sph.lon, np.concatenate((sph.lon[1:2], self.lon[1:])))
sph[0] = self.msph[0].unmasked
assert not sph.masked
assert_array_equal(sph.mask, np.zeros_like(self.mask))
assert_array_equal(sph.unmasked.lon, self.lon)
def test_set_np_ma_masked_on_unmasked_instance(self):
sph = self.msph.copy().unmasked
sph[0] = np.ma.masked
assert sph.masked
assert_array_equal(
sph.mask, np.concatenate(([True], np.zeros_like(self.mask[1:])))
)
assert_array_equal(sph.unmasked.lon, self.lon)
sph[0] = np.ma.nomask
assert sph.masked # Does not get reset
assert_array_equal(sph.mask, np.zeros_like(self.mask))
assert_array_equal(sph.unmasked.lon, self.lon)
def test_set_np_ma_nomasked_on_unmasked_instance(self):
sph = self.msph.copy().unmasked
sph[0] = np.ma.nomask
assert not sph.masked
assert_array_equal(sph.unmasked.lon, self.lon)
def test_filled(self):
unmasked = self.msph.unmasked
sph = self.msph.filled(unmasked[1])
expected = unmasked.copy()
expected[self.mask] = unmasked[1]
assert np.all(representation_equal(sph, expected))
def test_filled_with_masked_value(self):
# Filled ignores the mask (this will be true as long as __setitem__
# ignores it; it may be a logical choice to actually use it).
sph = self.msph.filled(self.msph[1])
assert not sph.masked
expected = self.msph.unmasked.copy()
expected[self.mask] = self.msph.unmasked[1]
assert np.all(representation_equal(sph, expected))
def test_transform_keeps_distance_angular_masks(self):
m = rotation_matrix(30.0, "x") * 2.0 # rotation and scale
sph = self.msph.transform(m)
assert sph.masked
# Distance now also masked if angular coordinates were masked.
assert_array_equal(sph.distance.mask, self.mask)
# But angular coordinates just depend on the angular mask.
assert_array_equal(sph.lon.mask, self.mask_ang)
assert_array_equal(sph.lat.mask, self.mask_ang)
assert_array_equal(self.msph.get_mask(), self.mask)
assert_array_equal(self.msph.get_mask("lon", "lat"), self.mask_ang)
def test_unmasked_representation_masked_differential(self):
rv = np.arange(6.0) << u.km / u.s
mask_rv = [True, False] * 3
mrv = Masked(rv, mask_rv)
mdiff = r.RadialDifferential(mrv)
msph = r.SphericalRepresentation(
self.lon,
self.lat,
self.dis,
differentials={"s": mdiff},
)
assert msph.masked
assert_array_equal(msph.lon.mask, False)
assert_array_equal(msph.lat.mask, False)
assert_array_equal(msph.distance.mask, False)
assert_array_equal(msph.differentials["s"].d_distance.mask, mask_rv)
sph = msph.unmasked
assert not sph.masked
assert not sph.differentials["s"].masked
# Sanity checks on "with[out]_differentials"
assert msph.without_differentials().masked
sph2 = r.SphericalRepresentation(self.lon, self.lat, self.dis)
assert not sph2.masked
sph3 = sph2.with_differentials({"s": mdiff})
assert sph3.masked
def test_masked_representation_unmasked_differential(self):
diff = r.RadialDifferential(np.arange(6.0) << u.km / u.s)
msph = r.SphericalRepresentation(
self.mlon,
self.mlat,
self.mdis,
differentials={"s": diff},
)
assert msph.masked
assert msph.differentials["s"].masked
assert_array_equal(msph.differentials["s"].d_distance.mask, False)
# Sanity check on using with_differentials.
msph2 = self.msph.with_differentials({"s": diff})
assert msph2.masked
assert msph2.differentials["s"].masked
| TestSphericalRepresentationSeparateMasks |
python | celery__celery | t/unit/backends/test_azureblockblob.py | {
"start": 386,
"end": 7934
} | class ____:
def setup_method(self):
self.url = (
"azureblockblob://"
"DefaultEndpointsProtocol=protocol;"
"AccountName=name;"
"AccountKey=key;"
"EndpointSuffix=suffix")
self.backend = AzureBlockBlobBackend(
app=self.app,
url=self.url)
@pytest.fixture(params=['', 'my_folder/'])
def base_path(self, request):
return request.param
def test_missing_third_party_sdk(self):
azurestorage = azureblockblob.azurestorage
try:
azureblockblob.azurestorage = None
with pytest.raises(ImproperlyConfigured):
AzureBlockBlobBackend(app=self.app, url=self.url)
finally:
azureblockblob.azurestorage = azurestorage
def test_bad_connection_url(self):
with pytest.raises(ImproperlyConfigured):
AzureBlockBlobBackend._parse_url("azureblockblob://")
with pytest.raises(ImproperlyConfigured):
AzureBlockBlobBackend._parse_url("")
@patch(MODULE_TO_MOCK + ".BlobServiceClient")
def test_create_client(self, mock_blob_service_factory):
mock_blob_service_client_instance = Mock()
mock_blob_service_factory.from_connection_string.return_value = mock_blob_service_client_instance
backend = AzureBlockBlobBackend(app=self.app, url=self.url)
# ensure container gets created on client access...
assert mock_blob_service_client_instance.create_container.call_count == 0
assert backend._blob_service_client is not None
assert mock_blob_service_client_instance.create_container.call_count == 1
# ...but only once per backend instance
assert backend._blob_service_client is not None
assert mock_blob_service_client_instance.create_container.call_count == 1
@patch(MODULE_TO_MOCK + ".AzureStorageQueuesTransport")
@patch(MODULE_TO_MOCK + ".BlobServiceClient")
def test_create_client__default_azure_credentials(self, mock_blob_service_client, mock_kombu_transport):
credential_mock = Mock()
mock_blob_service_client.return_value = Mock()
mock_kombu_transport.parse_uri.return_value = (credential_mock, "dummy_account_url")
url = "azureblockblob://DefaultAzureCredential@dummy_account_url"
backend = AzureBlockBlobBackend(app=self.app, url=url)
assert backend._blob_service_client is not None
mock_kombu_transport.parse_uri.assert_called_once_with(url.replace("azureblockblob://", ""))
mock_blob_service_client.assert_called_once_with(
account_url="dummy_account_url",
credential=credential_mock,
connection_timeout=backend._connection_timeout,
read_timeout=backend._read_timeout,
)
@patch(MODULE_TO_MOCK + ".AzureStorageQueuesTransport")
@patch(MODULE_TO_MOCK + ".BlobServiceClient")
def test_create_client__managed_identity_azure_credentials(self, mock_blob_service_client, mock_kombu_transport):
credential_mock = Mock()
mock_blob_service_client.return_value = Mock()
mock_kombu_transport.parse_uri.return_value = (credential_mock, "dummy_account_url")
url = "azureblockblob://ManagedIdentityCredential@dummy_account_url"
backend = AzureBlockBlobBackend(app=self.app, url=url)
assert backend._blob_service_client is not None
mock_kombu_transport.parse_uri.assert_called_once_with(url.replace("azureblockblob://", ""))
mock_blob_service_client.assert_called_once_with(
account_url="dummy_account_url",
credential=credential_mock,
connection_timeout=backend._connection_timeout,
read_timeout=backend._read_timeout,
)
@patch(MODULE_TO_MOCK + ".BlobServiceClient")
def test_configure_client(self, mock_blob_service_factory):
connection_timeout = 3
read_timeout = 11
self.app.conf.update(
{
'azureblockblob_connection_timeout': connection_timeout,
'azureblockblob_read_timeout': read_timeout,
}
)
mock_blob_service_client_instance = Mock()
mock_blob_service_factory.from_connection_string.return_value = (
mock_blob_service_client_instance
)
base_url = "azureblockblob://"
connection_string = "connection_string"
backend = AzureBlockBlobBackend(
app=self.app, url=f'{base_url}{connection_string}'
)
client = backend._blob_service_client
assert client is mock_blob_service_client_instance
(
mock_blob_service_factory
.from_connection_string
.assert_called_once_with(
connection_string,
connection_timeout=connection_timeout,
read_timeout=read_timeout
)
)
@patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_get(self, mock_client, base_path):
self.backend.base_path = base_path
self.backend.get(b"mykey")
mock_client.get_blob_client \
.assert_called_once_with(blob=base_path + "mykey", container="celery")
mock_client.get_blob_client.return_value \
.download_blob.return_value \
.readall.return_value \
.decode.assert_called_once()
@patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_get_missing(self, mock_client):
mock_client.get_blob_client.return_value \
.download_blob.return_value \
.readall.side_effect = azureblockblob.ResourceNotFoundError
assert self.backend.get(b"mykey") is None
@patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_set(self, mock_client, base_path):
self.backend.base_path = base_path
self.backend._set_with_state(b"mykey", "myvalue", states.SUCCESS)
mock_client.get_blob_client.assert_called_once_with(
container="celery", blob=base_path + "mykey")
mock_client.get_blob_client.return_value \
.upload_blob.assert_called_once_with("myvalue", overwrite=True)
@patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_mget(self, mock_client, base_path):
keys = [b"mykey1", b"mykey2"]
self.backend.base_path = base_path
self.backend.mget(keys)
mock_client.get_blob_client.assert_has_calls(
[call(blob=base_path + key.decode(), container='celery') for key in keys],
any_order=True,)
@patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_delete(self, mock_client, base_path):
self.backend.base_path = base_path
self.backend.delete(b"mykey")
mock_client.get_blob_client.assert_called_once_with(
container="celery", blob=base_path + "mykey")
mock_client.get_blob_client.return_value \
.delete_blob.assert_called_once()
def test_base_path_conf(self, base_path):
self.app.conf.azureblockblob_base_path = base_path
backend = AzureBlockBlobBackend(
app=self.app,
url=self.url
)
assert backend.base_path == base_path
def test_base_path_conf_default(self):
backend = AzureBlockBlobBackend(
app=self.app,
url=self.url
)
assert backend.base_path == ''
| test_AzureBlockBlobBackend |
python | getsentry__sentry | src/sentry/core/endpoints/organization_request_project_creation.py | {
"start": 712,
"end": 2127
} | class ____(OrganizationRequestChangeEndpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
def post(self, request: Request, organization) -> Response:
"""
Send an email requesting a project be created
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = OrganizationRequestProjectCreationSerializer(data=request.data)
if not serializer.is_valid():
return self.respond(serializer.errors, status=400)
requester_name = request.user.get_display_name()
requester_link = absolute_uri(
f"/organizations/{organization.slug}/projects/new/?referrer=request_project&category=mobile"
)
subject = _("%s thinks Sentry can help monitor your mobile app")
msg = MessageBuilder(
subject=subject % (requester_name),
template="sentry/emails/requests/organization-project.txt",
html_template="sentry/emails/requests/organization-project.html",
type="organization.project.request",
context={
"requester_name": requester_name,
"requester_link": requester_link,
},
)
msg.send_async([serializer.validated_data["target_user_email"]])
return self.respond(status=201)
| OrganizationRequestProjectCreation |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/output.py | {
"start": 16586,
"end": 18773
} | class ____(Out):
"""Variant of :py:class:`Out <dagster.Out>` for an output that will dynamically alter the graph at
runtime.
When using in a composition function such as :py:func:`@graph <dagster.graph>`,
dynamic outputs must be used with either
* ``map`` - clone downstream ops for each separate :py:class:`DynamicOut`
* ``collect`` - gather across all :py:class:`DynamicOut` in to a list
Uses the same constructor as :py:class:`Out <dagster.Out>`
.. code-block:: python
@op(
config_schema={
"path": Field(str, default_value=file_relative_path(__file__, "sample"))
},
out=DynamicOut(str),
)
def files_in_directory(context):
path = context.op_config["path"]
dirname, _, filenames = next(os.walk(path))
for file in filenames:
yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))
@job
def process_directory():
files = files_in_directory()
# use map to invoke an op on each dynamic output
file_results = files.map(process_file)
# use collect to gather the results in to a list
summarize_directory(file_results.collect())
"""
def to_definition(
self,
annotation_type: type,
name: Optional[str],
description: Optional[str],
code_version: Optional[str],
) -> "OutputDefinition":
dagster_type = (
self.dagster_type
if self.dagster_type is not NoValueSentinel
else _checked_inferred_type(annotation_type)
)
return DynamicOutputDefinition(
dagster_type=dagster_type,
name=name,
description=self.description or description,
is_required=self.is_required,
io_manager_key=self.io_manager_key,
metadata=self.metadata,
code_version=self.code_version or code_version,
)
@property
def is_dynamic(self) -> bool:
return True
@public
| DynamicOut |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/resources/resources.py | {
"start": 2442,
"end": 3230
} | class ____:
def __init__(self, _user, _password):
pass
# start_resource_dep_example
from dagster import resource
@resource
def credentials():
return ("bad_username", "easy_password")
@resource(required_resource_keys={"credentials"})
def client(init_context):
username, password = init_context.resources.credentials
return Client(username, password)
# end_resource_dep_example
# start_resource_dep_op
from dagster import graph, op
@op(required_resource_keys={"client"})
def get_client(context: OpExecutionContext):
return context.resources.client
# end_resource_dep_op
# start_resource_dep_job
@job(resource_defs={"credentials": credentials, "client": client})
def connect():
get_client()
# end_resource_dep_job
# start_resource_config
| Client |
python | python-poetry__poetry | src/poetry/utils/env/python/providers.py | {
"start": 457,
"end": 1030
} | class ____(findpython.BaseProvider): # type: ignore[misc]
@classmethod
def create(cls) -> Self | None:
return cls()
def find_pythons(self) -> Iterable[findpython.PythonVersion]:
if python := self.find_python_by_name("python"):
return [python]
return []
@classmethod
def find_python_by_name(cls, name: str) -> findpython.PythonVersion | None:
if path := shutil.which(name):
return findpython.PythonVersion(executable=Path(path))
return None
@dataclasses.dataclass
| ShutilWhichPythonProvider |
python | django__django | tests/sessions_tests/no_clear_expired.py | {
"start": 64,
"end": 178
} | class ____(SessionBase):
"""Session store without support for clearing expired sessions."""
pass
| SessionStore |
python | getsentry__sentry | src/sentry/testutils/notifications/platform.py | {
"start": 533,
"end": 667
} | class ____(NotificationData):
source = "test"
message: str
@template_registry.register(MockNotification.source)
| MockNotification |
python | scikit-image__scikit-image | tests/skimage/morphology/test_gray.py | {
"start": 3455,
"end": 15855
} | class ____:
def setup_class(self):
self.black_pixel = 255 * np.ones((6, 6), dtype=np.uint8)
self.black_pixel[2, 2] = 0
self.white_pixel = 255 - self.black_pixel
self.footprints = [
footprint_rectangle((2, 2)),
footprint_rectangle((2, 1)),
footprint_rectangle((2, 1)),
]
def test_dilate_erode_symmetry(self):
for s in self.footprints:
c = gray.erosion(self.black_pixel, s)
d = gray.dilation(self.white_pixel, s)
assert np.all(c == (255 - d))
def test_open_black_pixel(self):
for s in self.footprints:
gray_open = gray.opening(self.black_pixel, s)
assert np.all(gray_open == self.black_pixel)
def test_close_white_pixel(self):
for s in self.footprints:
gray_close = gray.closing(self.white_pixel, s)
assert np.all(gray_close == self.white_pixel)
def test_open_white_pixel(self):
for s in self.footprints:
assert np.all(gray.opening(self.white_pixel, s) == 0)
def test_close_black_pixel(self):
for s in self.footprints:
assert np.all(gray.closing(self.black_pixel, s) == 255)
def test_white_tophat_white_pixel(self):
for s in self.footprints:
tophat = gray.white_tophat(self.white_pixel, s)
assert np.all(tophat == self.white_pixel)
def test_black_tophat_black_pixel(self):
for s in self.footprints:
tophat = gray.black_tophat(self.black_pixel, s)
assert np.all(tophat == self.white_pixel)
def test_white_tophat_black_pixel(self):
for s in self.footprints:
tophat = gray.white_tophat(self.black_pixel, s)
assert np.all(tophat == 0)
def test_black_tophat_white_pixel(self):
for s in self.footprints:
tophat = gray.black_tophat(self.white_pixel, s)
assert np.all(tophat == 0)
gray_functions = [
gray.erosion,
gray.dilation,
gray.opening,
gray.closing,
gray.white_tophat,
gray.black_tophat,
]
@pytest.mark.parametrize("function", gray_functions)
def test_default_footprint(function):
strel = footprints.diamond(radius=1)
image = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
im_expected = function(image, strel)
im_test = function(image)
assert_array_equal(im_expected, im_test)
def test_3d_fallback_default_footprint():
# 3x3x3 cube inside a 7x7x7 image:
image = np.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
opened = gray.opening(image)
# expect a "hyper-cross" centered in the 5x5x5:
image_expected = np.zeros((7, 7, 7), dtype=bool)
image_expected[2:5, 2:5, 2:5] = ndi.generate_binary_structure(3, 1)
assert_array_equal(opened, image_expected)
gray_3d_fallback_functions = [gray.closing, gray.opening]
@pytest.mark.parametrize("function", gray_3d_fallback_functions)
def test_3d_fallback_cube_footprint(function):
# 3x3x3 cube inside a 7x7x7 image:
image = np.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
cube = np.ones((3, 3, 3), dtype=np.uint8)
new_image = function(image, cube)
assert_array_equal(new_image, image)
def test_3d_fallback_white_tophat():
image = np.zeros((7, 7, 7), dtype=bool)
image[2, 2:4, 2:4] = 1
image[3, 2:5, 2:5] = 1
image[4, 3:5, 3:5] = 1
with expected_warnings([r'operator.*deprecated|\A\Z']):
new_image = gray.white_tophat(image)
footprint = ndi.generate_binary_structure(3, 1)
with expected_warnings([r'operator.*deprecated|\A\Z']):
image_expected = ndi.white_tophat(
image.view(dtype=np.uint8), footprint=footprint
)
assert_array_equal(new_image, image_expected)
def test_3d_fallback_black_tophat():
image = np.ones((7, 7, 7), dtype=bool)
image[2, 2:4, 2:4] = 0
image[3, 2:5, 2:5] = 0
image[4, 3:5, 3:5] = 0
with expected_warnings([r'operator.*deprecated|\A\Z']):
new_image = gray.black_tophat(image)
footprint = ndi.generate_binary_structure(3, 1)
with expected_warnings([r'operator.*deprecated|\A\Z']):
image_expected = ndi.black_tophat(
image.view(dtype=np.uint8), footprint=footprint
)
assert_array_equal(new_image, image_expected)
def test_2d_ndimage_equivalence():
image = np.zeros((9, 9), np.uint8)
image[2:-2, 2:-2] = 128
image[3:-3, 3:-3] = 196
image[4, 4] = 255
opened = gray.opening(image)
closed = gray.closing(image)
footprint = ndi.generate_binary_structure(2, 1)
ndimage_opened = ndi.grey_opening(image, footprint=footprint)
ndimage_closed = ndi.grey_closing(image, footprint=footprint)
assert_array_equal(opened, ndimage_opened)
assert_array_equal(closed, ndimage_closed)
# float test images
im = np.array(
[
[0.55, 0.72, 0.6, 0.54, 0.42],
[0.65, 0.44, 0.89, 0.96, 0.38],
[0.79, 0.53, 0.57, 0.93, 0.07],
[0.09, 0.02, 0.83, 0.78, 0.87],
[0.98, 0.8, 0.46, 0.78, 0.12],
]
)
eroded = np.array(
[
[0.55, 0.44, 0.54, 0.42, 0.38],
[0.44, 0.44, 0.44, 0.38, 0.07],
[0.09, 0.02, 0.53, 0.07, 0.07],
[0.02, 0.02, 0.02, 0.78, 0.07],
[0.09, 0.02, 0.46, 0.12, 0.12],
]
)
dilated = np.array(
[
[0.72, 0.72, 0.89, 0.96, 0.54],
[0.79, 0.89, 0.96, 0.96, 0.96],
[0.79, 0.79, 0.93, 0.96, 0.93],
[0.98, 0.83, 0.83, 0.93, 0.87],
[0.98, 0.98, 0.83, 0.78, 0.87],
]
)
opened = np.array(
[
[0.55, 0.55, 0.54, 0.54, 0.42],
[0.55, 0.44, 0.54, 0.44, 0.38],
[0.44, 0.53, 0.53, 0.78, 0.07],
[0.09, 0.02, 0.78, 0.78, 0.78],
[0.09, 0.46, 0.46, 0.78, 0.12],
]
)
closed = np.array(
[
[0.72, 0.72, 0.72, 0.54, 0.54],
[0.72, 0.72, 0.89, 0.96, 0.54],
[0.79, 0.79, 0.79, 0.93, 0.87],
[0.79, 0.79, 0.83, 0.78, 0.87],
[0.98, 0.83, 0.78, 0.78, 0.78],
]
)
def test_float():
assert_allclose(gray.erosion(im), eroded)
assert_allclose(gray.dilation(im), dilated)
assert_allclose(gray.opening(im), opened)
assert_allclose(gray.closing(im), closed)
def test_uint16():
im16, eroded16, dilated16, opened16, closed16 = map(
img_as_uint, [im, eroded, dilated, opened, closed]
)
assert_allclose(gray.erosion(im16), eroded16)
assert_allclose(gray.dilation(im16), dilated16)
assert_allclose(gray.opening(im16), opened16)
assert_allclose(gray.closing(im16), closed16)
def test_discontiguous_out_array():
image = np.array([[5, 6, 2], [7, 2, 2], [3, 5, 1]], np.uint8)
out_array_big = np.zeros((5, 5), np.uint8)
out_array = out_array_big[::2, ::2]
expected_dilation = np.array(
[
[7, 0, 6, 0, 6],
[0, 0, 0, 0, 0],
[7, 0, 7, 0, 2],
[0, 0, 0, 0, 0],
[7, 0, 5, 0, 5],
],
np.uint8,
)
expected_erosion = np.array(
[
[5, 0, 2, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 2, 0, 1],
[0, 0, 0, 0, 0],
[3, 0, 1, 0, 1],
],
np.uint8,
)
gray.dilation(image, out=out_array)
assert_array_equal(out_array_big, expected_dilation)
gray.erosion(image, out=out_array)
assert_array_equal(out_array_big, expected_erosion)
def test_1d_erosion():
image = np.array([1, 2, 3, 2, 1])
expected = np.array([1, 1, 2, 1, 1])
eroded = gray.erosion(image)
assert_array_equal(eroded, expected)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening", "white_tophat", "black_tophat"],
)
@pytest.mark.parametrize("nrows", [3, 7, 11])
@pytest.mark.parametrize("ncols", [3, 7, 11])
@pytest.mark.parametrize("decomposition", ['separable', 'sequence'])
def test_rectangle_decomposition(cam_image, function, nrows, ncols, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = footprint_rectangle((nrows, ncols), decomposition=None)
footprint = footprint_rectangle((nrows, ncols), decomposition=decomposition)
func = getattr(gray, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening", "white_tophat", "black_tophat"],
)
@pytest.mark.parametrize("radius", (2, 3))
@pytest.mark.parametrize("decomposition", ['sequence'])
def test_diamond_decomposition(cam_image, function, radius, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = footprints.diamond(radius, decomposition=None)
footprint = footprints.diamond(radius, decomposition=decomposition)
func = getattr(gray, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening", "white_tophat", "black_tophat"],
)
@pytest.mark.parametrize("m", (0, 1, 3, 5))
@pytest.mark.parametrize("n", (0, 1, 2, 3))
@pytest.mark.parametrize("decomposition", ['sequence'])
@pytest.mark.filterwarnings(
"ignore:.*falling back to decomposition='separable':UserWarning"
)
def test_octagon_decomposition(cam_image, function, m, n, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
if m == 0 and n == 0:
with pytest.raises(ValueError):
footprints.octagon(m, n, decomposition=decomposition)
else:
footprint_ndarray = footprints.octagon(m, n, decomposition=None)
footprint = footprints.octagon(m, n, decomposition=decomposition)
func = getattr(gray, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening", "white_tophat", "black_tophat"],
)
@pytest.mark.parametrize("shape", [(5, 5, 5), (5, 5, 7)])
@pytest.mark.parametrize("decomposition", ['separable', 'sequence'])
def test_cube_decomposition(cell3d_image, function, shape, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = footprint_rectangle(shape, decomposition=None)
footprint = footprint_rectangle(shape, decomposition=decomposition)
func = getattr(gray, function)
expected = func(cell3d_image, footprint=footprint_ndarray)
out = func(cell3d_image, footprint=footprint)
assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening", "white_tophat", "black_tophat"],
)
@pytest.mark.parametrize("radius", (3,))
@pytest.mark.parametrize("decomposition", ['sequence'])
def test_octahedron_decomposition(cell3d_image, function, radius, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = footprints.octahedron(radius, decomposition=None)
footprint = footprints.octahedron(radius, decomposition=decomposition)
func = getattr(gray, function)
expected = func(cell3d_image, footprint=footprint_ndarray)
out = func(cell3d_image, footprint=footprint)
assert_array_equal(expected, out)
| TestEccentricStructuringElements |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 31794,
"end": 33137
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("workflow_engine", "0067_workflow_action_group_status_group_db_constraint"),
]
operations = [
migrations.RunPython(
migrate_anomaly_detection_alerts,
migrations.RunPython.noop,
hints={"tables": ["sentry_alertrule"]},
)
]
| Migration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 951944,
"end": 952413
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ResolveReviewThread"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "thread")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
thread = sgqlc.types.Field("PullRequestReviewThread", graphql_name="thread")
"""The thread to resolve."""
| ResolveReviewThreadPayload |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 50128,
"end": 54275
} | class ____(BaseTestCase):
def setUp(self):
restore_default_excepthook(self)
super().setUp()
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
| ExceptHookTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.