language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 6730,
"end": 6867
} | class ____(_ConfigCreateModel):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(default=..., exclude=True)
| _VectorizerConfigCreate |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_random_test.py | {
"start": 5339,
"end": 7496
} | class ____(test.TestCase):
def assertNotAllClose(self, a, b, **kwargs):
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError(
'The two values are close at all %d elements' % np_array_ops.size(a)
)
def testDistribution(self):
def run_test(*args):
num_samples = 1000
tol = 0.1 # High tolerance to keep the # of samples low else the test
# takes a long time to run.
np_random.seed(10)
outputs = [np_random.randn(*args) for _ in range(num_samples)]
# Test output shape.
for output in outputs:
self.assertEqual(output.shape, tuple(args))
default_dtype = (
np_dtypes.float64
if np_dtypes.is_allow_float64()
else np_dtypes.float32
)
self.assertEqual(output.dtype.as_numpy_dtype, default_dtype)
if np_array_ops.prod(args): # Don't bother with empty arrays.
outputs = [output.tolist() for output in outputs]
# Test that the properties of normal distribution are satisfied.
mean = np_array_ops.mean(outputs, axis=0)
stddev = np_array_ops.std(outputs, axis=0)
self.assertAllClose(mean, np_array_ops.zeros(args), atol=tol)
self.assertAllClose(stddev, np_array_ops.ones(args), atol=tol)
# Test that outputs are different with different seeds.
np_random.seed(20)
diff_seed_outputs = [
np_random.randn(*args).tolist() for _ in range(num_samples)
]
self.assertNotAllClose(outputs, diff_seed_outputs)
# Test that outputs are the same with the same seed.
np_random.seed(10)
same_seed_outputs = [
np_random.randn(*args).tolist() for _ in range(num_samples)
]
self.assertAllClose(outputs, same_seed_outputs)
run_test()
run_test(0)
run_test(1)
run_test(5)
run_test(2, 3)
run_test(0, 2, 3)
run_test(2, 0, 3)
run_test(2, 3, 0)
run_test(2, 3, 5)
if __name__ == '__main__':
ops.enable_eager_execution()
np_math_ops.enable_numpy_methods_on_tensor()
test.main()
| RandNDistriutionTest |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 10577,
"end": 11197
} | class ____(nn.Module):
def __init__(self, input_dim, output_dim, num_codebooks, use_flexible_linear=False):
super().__init__()
self.use_flexible_linear = use_flexible_linear
if not use_flexible_linear:
self.linear = nn.Linear(input_dim, output_dim, bias=False)
else:
self.linear = KyutaiSpeechToTextFlexibleLinear(input_dim, output_dim, num_layers=num_codebooks)
def forward(self, x, layer_idx=None):
if self.use_flexible_linear:
return self.linear(x, layer_idx)
else:
return self.linear(x)
| KyutaiSpeechToTextLinear |
python | scipy__scipy | scipy/optimize/tests/test_chandrupatla.py | {
"start": 20348,
"end": 38639
} | class ____:
def f(self, q, p):
return special.ndtr(q) - p
@pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)])
def test_basic(self, p, xp):
# Invert distribution CDF and compare against distribution `ppf`
a, b = xp.asarray(-5.), xp.asarray(5.)
res = find_root(self.f, (a, b), args=(xp.asarray(p),))
ref = xp.asarray(stats.norm().ppf(p), dtype=xp.asarray(p).dtype)
xp_assert_close(res.x, ref)
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
def test_vectorization(self, shape, xp):
# Test for correct functionality, output shapes, and dtypes for various
# input shapes.
p = (np.linspace(-0.05, 1.05, 12).reshape(shape) if shape
else np.float64(0.6))
p_xp = xp.asarray(p)
args_xp = (p_xp,)
dtype = p_xp.dtype
@np.vectorize
def find_root_single(p):
return find_root(self.f, (-5, 5), args=(p,))
def f(*args, **kwargs):
f.f_evals += 1
return self.f(*args, **kwargs)
f.f_evals = 0
bracket = xp.asarray(-5., dtype=xp.float64), xp.asarray(5., dtype=xp.float64)
res = find_root(f, bracket, args=args_xp)
refs = find_root_single(p).ravel()
ref_x = [ref.x for ref in refs]
ref_x = xp.reshape(xp.asarray(ref_x, dtype=dtype), shape)
xp_assert_close(res.x, ref_x)
ref_f = [ref.f_x for ref in refs]
ref_f = xp.reshape(xp.asarray(ref_f, dtype=dtype), shape)
xp_assert_close(res.f_x, ref_f, atol=1e-15)
xp_assert_equal(res.f_x, self.f(res.x, *args_xp))
ref_success = [bool(ref.success) for ref in refs]
ref_success = xp.reshape(xp.asarray(ref_success, dtype=xp.bool), shape)
xp_assert_equal(res.success, ref_success)
ref_status = [ref.status for ref in refs]
ref_status = xp.reshape(xp.asarray(ref_status, dtype=xp.int32), shape)
xp_assert_equal(res.status, ref_status)
ref_nfev = [ref.nfev for ref in refs]
ref_nfev = xp.reshape(xp.asarray(ref_nfev, dtype=xp.int32), shape)
if is_numpy(xp):
xp_assert_equal(res.nfev, ref_nfev)
assert xp.max(res.nfev) == f.f_evals
else: # different backend may lead to different nfev
assert res.nfev.shape == shape
assert res.nfev.dtype == xp.int32
ref_nit = [ref.nit for ref in refs]
ref_nit = xp.reshape(xp.asarray(ref_nit, dtype=xp.int32), shape)
if is_numpy(xp):
xp_assert_equal(res.nit, ref_nit)
assert xp.max(res.nit) == f.f_evals-2
else:
assert res.nit.shape == shape
assert res.nit.dtype == xp.int32
ref_xl = [ref.bracket[0] for ref in refs]
ref_xl = xp.reshape(xp.asarray(ref_xl, dtype=dtype), shape)
xp_assert_close(res.bracket[0], ref_xl)
ref_xr = [ref.bracket[1] for ref in refs]
ref_xr = xp.reshape(xp.asarray(ref_xr, dtype=dtype), shape)
xp_assert_close(res.bracket[1], ref_xr)
xp_assert_less(res.bracket[0], res.bracket[1])
finite = xp.isfinite(res.x)
assert xp.all((res.x[finite] == res.bracket[0][finite])
| (res.x[finite] == res.bracket[1][finite]))
# PyTorch and CuPy don't solve to the same accuracy as NumPy - that's OK.
atol = 1e-15 if is_numpy(xp) else 1e-9
ref_fl = [ref.f_bracket[0] for ref in refs]
ref_fl = xp.reshape(xp.asarray(ref_fl, dtype=dtype), shape)
xp_assert_close(res.f_bracket[0], ref_fl, atol=atol)
xp_assert_equal(res.f_bracket[0], self.f(res.bracket[0], *args_xp))
ref_fr = [ref.f_bracket[1] for ref in refs]
ref_fr = xp.reshape(xp.asarray(ref_fr, dtype=dtype), shape)
xp_assert_close(res.f_bracket[1], ref_fr, atol=atol)
xp_assert_equal(res.f_bracket[1], self.f(res.bracket[1], *args_xp))
assert xp.all(xp.abs(res.f_x[finite]) ==
xp.minimum(xp.abs(res.f_bracket[0][finite]),
xp.abs(res.f_bracket[1][finite])))
def test_flags(self, xp):
# Test cases that should produce different status flags; show that all
# can be produced simultaneously.
def f(xs, js):
# Note that full_like and int(j) shouldn't really be required. CuPy
# is just really picky here, so I'm making it a special case to
# make sure the other backends work when the user is less careful.
assert js.dtype == xp.int64
if is_cupy(xp):
funcs = [lambda x: x - 2.5,
lambda x: x - 10,
lambda x: (x - 0.1)**3,
lambda x: xp.full_like(x, xp.asarray(xp.nan))]
return [funcs[int(j)](x) for x, j in zip(xs, js)]
funcs = [lambda x: x - 2.5,
lambda x: x - 10,
lambda x: (x - 0.1) ** 3,
lambda x: xp.nan]
return [funcs[j](x) for x, j in zip(xs, js)]
args = (xp.arange(4, dtype=xp.int64),)
a, b = xp.asarray([0.]*4), xp.asarray([xp.pi]*4)
res = find_root(f, (a, b), args=args, maxiter=2)
ref_flags = xp.asarray([eim._ECONVERGED,
eim._ESIGNERR,
eim._ECONVERR,
eim._EVALUEERR], dtype=xp.int32)
xp_assert_equal(res.status, ref_flags)
def test_convergence(self, xp):
# Test that the convergence tolerances behave as expected
rng = np.random.default_rng(2585255913088665241)
p = xp.asarray(rng.random(size=3))
bracket = (-xp.asarray(5.), xp.asarray(5.))
args = (p,)
kwargs0 = dict(args=args, tolerances=dict(xatol=0, xrtol=0, fatol=0, frtol=0))
kwargs = deepcopy(kwargs0)
kwargs['tolerances']['xatol'] = 1e-3
res1 = find_root(self.f, bracket, **kwargs)
xp_assert_less(res1.bracket[1] - res1.bracket[0],
xp.full_like(p, xp.asarray(1e-3)))
kwargs['tolerances']['xatol'] = 1e-6
res2 = find_root(self.f, bracket, **kwargs)
xp_assert_less(res2.bracket[1] - res2.bracket[0],
xp.full_like(p, xp.asarray(1e-6)))
xp_assert_less(res2.bracket[1] - res2.bracket[0],
res1.bracket[1] - res1.bracket[0])
kwargs = deepcopy(kwargs0)
kwargs['tolerances']['xrtol'] = 1e-3
res1 = find_root(self.f, bracket, **kwargs)
xp_assert_less(res1.bracket[1] - res1.bracket[0], 1e-3 * xp.abs(res1.x))
kwargs['tolerances']['xrtol'] = 1e-6
res2 = find_root(self.f, bracket, **kwargs)
xp_assert_less(res2.bracket[1] - res2.bracket[0],
1e-6 * xp.abs(res2.x))
xp_assert_less(res2.bracket[1] - res2.bracket[0],
res1.bracket[1] - res1.bracket[0])
kwargs = deepcopy(kwargs0)
kwargs['tolerances']['fatol'] = 1e-3
res1 = find_root(self.f, bracket, **kwargs)
xp_assert_less(xp.abs(res1.f_x), xp.full_like(p, xp.asarray(1e-3)))
kwargs['tolerances']['fatol'] = 1e-6
res2 = find_root(self.f, bracket, **kwargs)
xp_assert_less(xp.abs(res2.f_x), xp.full_like(p, xp.asarray(1e-6)))
xp_assert_less(xp.abs(res2.f_x), xp.abs(res1.f_x))
kwargs = deepcopy(kwargs0)
kwargs['tolerances']['frtol'] = 1e-3
x1, x2 = bracket
f0 = xp.minimum(xp.abs(self.f(x1, *args)), xp.abs(self.f(x2, *args)))
res1 = find_root(self.f, bracket, **kwargs)
xp_assert_less(xp.abs(res1.f_x), 1e-3*f0)
kwargs['tolerances']['frtol'] = 1e-6
res2 = find_root(self.f, bracket, **kwargs)
xp_assert_less(xp.abs(res2.f_x), 1e-6*f0)
xp_assert_less(xp.abs(res2.f_x), xp.abs(res1.f_x))
def test_maxiter_callback(self, xp):
# Test behavior of `maxiter` parameter and `callback` interface
p = xp.asarray(0.612814)
bracket = (xp.asarray(-5.), xp.asarray(5.))
maxiter = 5
def f(q, p):
res = special.ndtr(q) - p
f.x = q
f.f_x = res
return res
f.x = None
f.f_x = None
res = find_root(f, bracket, args=(p,), maxiter=maxiter)
assert not xp.any(res.success)
assert xp.all(res.nfev == maxiter+2)
assert xp.all(res.nit == maxiter)
def callback(res):
callback.iter += 1
callback.res = res
assert hasattr(res, 'x')
if callback.iter == 0:
# callback is called once with initial bracket
assert (res.bracket[0], res.bracket[1]) == bracket
else:
changed = (((res.bracket[0] == callback.bracket[0])
& (res.bracket[1] != callback.bracket[1]))
| ((res.bracket[0] != callback.bracket[0])
& (res.bracket[1] == callback.bracket[1])))
assert xp.all(changed)
callback.bracket[0] = res.bracket[0]
callback.bracket[1] = res.bracket[1]
assert res.status == eim._EINPROGRESS
xp_assert_equal(self.f(res.bracket[0], p), res.f_bracket[0])
xp_assert_equal(self.f(res.bracket[1], p), res.f_bracket[1])
xp_assert_equal(self.f(res.x, p), res.f_x)
if callback.iter == maxiter:
raise StopIteration
callback.iter = -1 # callback called once before first iteration
callback.res = None
callback.bracket = [None, None]
res2 = find_root(f, bracket, args=(p,), callback=callback)
# terminating with callback is identical to terminating due to maxiter
# (except for `status`)
for key in res.keys():
if key == 'status':
xp_assert_equal(res[key], xp.asarray(eim._ECONVERR, dtype=xp.int32))
xp_assert_equal(res2[key], xp.asarray(eim._ECALLBACK, dtype=xp.int32))
elif key in {'bracket', 'f_bracket'}:
xp_assert_equal(res2[key][0], res[key][0])
xp_assert_equal(res2[key][1], res[key][1])
elif key.startswith('_'):
continue
else:
xp_assert_equal(res2[key], res[key])
@pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS)
def test_nit_expected(self, case, xp):
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
# in all 40 test cases, the number of iterations performed
# matches the number reported in the original paper.
f, bracket, root, nfeval, id = case
# Chandrupatla's criterion is equivalent to
# abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard
# abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x
# that used by Chandrupatla in tests.
bracket = (xp.asarray(bracket[0], dtype=xp.float64),
xp.asarray(bracket[1], dtype=xp.float64))
root = xp.asarray(root, dtype=xp.float64)
res = find_root(f, bracket, tolerances=dict(xrtol=4e-10, xatol=1e-5))
xp_assert_close(res.f_x, xp.asarray(f(root), dtype=xp.float64),
rtol=1e-8, atol=2e-3)
xp_assert_equal(res.nfev, xp.asarray(nfeval, dtype=xp.int32))
@pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
@pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
def test_dtype(self, root, dtype, xp):
# Test that dtypes are preserved
not_numpy = not is_numpy(xp)
if not_numpy and dtype == 'float16':
pytest.skip("`float16` dtype only supported for NumPy arrays.")
dtype = getattr(xp, dtype, None)
if dtype is None:
pytest.skip(f"{xp} does not support {dtype}")
def f(x, root):
res = (x - root) ** 3.
if is_numpy(xp): # NumPy does not preserve dtype
return xp.asarray(res, dtype=dtype)
return res
a, b = xp.asarray(-3, dtype=dtype), xp.asarray(3, dtype=dtype)
root = xp.asarray(root, dtype=dtype)
res = find_root(f, (a, b), args=(root,), tolerances={'xatol': 1e-3})
try:
xp_assert_close(res.x, root, atol=1e-3)
except AssertionError:
assert res.x.dtype == dtype
xp.all(res.f_x == 0)
def test_input_validation(self, xp):
# Test input validation for appropriate error messages
def func(x):
return x
message = '`func` must be callable.'
with pytest.raises(ValueError, match=message):
bracket = xp.asarray(-4), xp.asarray(4)
find_root(None, bracket)
message = 'Abscissae and function output must be real numbers.'
with pytest.raises(ValueError, match=message):
bracket = xp.asarray(-4+1j), xp.asarray(4)
find_root(func, bracket)
# raised by `np.broadcast, but the traceback is readable IMO
# all messages include this part
message = "(not be broadcast|Attempting to broadcast a dimension of length)"
with pytest.raises((ValueError, RuntimeError), match=message):
bracket = xp.asarray([-2, -3]), xp.asarray([3, 4, 5])
find_root(func, bracket)
message = "The shape of the array returned by `func`..."
with pytest.raises(ValueError, match=message):
bracket = xp.asarray([-3, -3]), xp.asarray([5, 5])
find_root(lambda x: [x[0], x[1], x[1]], bracket)
message = 'Tolerances must be non-negative scalars.'
bracket = xp.asarray(-4), xp.asarray(4)
with pytest.raises(ValueError, match=message):
find_root(func, bracket, tolerances=dict(xatol=-1))
with pytest.raises(ValueError, match=message):
find_root(func, bracket, tolerances=dict(xrtol=xp.nan))
with pytest.raises(ValueError, match=message):
find_root(func, bracket, tolerances=dict(fatol='ekki'))
with pytest.raises(ValueError, match=message):
find_root(func, bracket, tolerances=dict(frtol=xp.nan))
message = '`maxiter` must be a non-negative integer.'
with pytest.raises(ValueError, match=message):
find_root(func, bracket, maxiter=1.5)
with pytest.raises(ValueError, match=message):
find_root(func, bracket, maxiter=-1)
message = '`callback` must be callable.'
with pytest.raises(ValueError, match=message):
find_root(func, bracket, callback='shrubbery')
def test_special_cases(self, xp):
# Test edge cases and other special cases
# Test infinite function values
def f(x):
return 1 / x + 1 - 1 / (-x + 1)
a, b = xp.asarray([0.1, 0., 0., 0.1]), xp.asarray([0.9, 1.0, 0.9, 1.0])
with np.errstate(divide='ignore', invalid='ignore'):
res = find_root(f, (a, b))
assert xp.all(res.success)
xp_assert_close(res.x[1:], xp.full((3,), res.x[0]))
# Test that integers are not passed to `f`
# (otherwise this would overflow)
def f(x):
assert xp.isdtype(x.dtype, "real floating")
# this would overflow if x were an xp integer dtype
return x ** 31 - 1
# note that all inputs are integer type; result is automatically default float
res = find_root(f, (xp.asarray(-7), xp.asarray(5)))
assert res.success
xp_assert_close(res.x, xp.asarray(1.))
# Test that if both ends of bracket equal root, algorithm reports
# convergence.
def f(x, root):
return x**2 - root
root = xp.asarray([0, 1])
res = find_root(f, (xp.asarray(1), xp.asarray(1)), args=(root,))
xp_assert_equal(res.success, xp.asarray([False, True]))
xp_assert_equal(res.x, xp.asarray([xp.nan, 1.]))
def f(x):
return 1/x
with np.errstate(invalid='ignore'):
inf = xp.asarray(xp.inf)
res = find_root(f, (inf, inf))
assert res.success
xp_assert_equal(res.x, xp.asarray(xp.inf))
# Test maxiter = 0. Should do nothing to bracket.
def f(x):
return x**3 - 1
a, b = xp.asarray(-3.), xp.asarray(5.)
res = find_root(f, (a, b), maxiter=0)
xp_assert_equal(res.success, xp.asarray(False))
xp_assert_equal(res.status, xp.asarray(-2, dtype=xp.int32))
xp_assert_equal(res.nit, xp.asarray(0, dtype=xp.int32))
xp_assert_equal(res.nfev, xp.asarray(2, dtype=xp.int32))
xp_assert_equal(res.bracket[0], a)
xp_assert_equal(res.bracket[1], b)
# The `x` attribute is the one with the smaller function value
xp_assert_equal(res.x, a)
# Reverse bracket; check that this is still true
res = find_root(f, (-b, -a), maxiter=0)
xp_assert_equal(res.x, -a)
# Test maxiter = 1
res = find_root(f, (a, b), maxiter=1)
xp_assert_equal(res.success, xp.asarray(True))
xp_assert_equal(res.status, xp.asarray(0, dtype=xp.int32))
xp_assert_equal(res.nit, xp.asarray(1, dtype=xp.int32))
xp_assert_equal(res.nfev, xp.asarray(3, dtype=xp.int32))
xp_assert_close(res.x, xp.asarray(1.))
# Test scalar `args` (not in tuple)
def f(x, c):
return c*x - 1
res = find_root(f, (xp.asarray(-1), xp.asarray(1)), args=xp.asarray(3))
xp_assert_close(res.x, xp.asarray(1/3))
# # TODO: Test zero tolerance
# # ~~What's going on here - why are iterations repeated?~~
# # tl goes to zero when xatol=xrtol=0. When function is nearly linear,
# # this causes convergence issues.
# def f(x):
# return np.cos(x)
#
# res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0)
# assert res.nit < 100
# xp = np.nextafter(res.x, np.inf)
# xm = np.nextafter(res.x, -np.inf)
# assert np.abs(res.fun) < np.abs(f(xp))
# assert np.abs(res.fun) < np.abs(f(xm))
| TestFindRoot |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 5894,
"end": 6120
} | class ____(NamedTuple):
"""Information about a single error in a file."""
line_no: int | None
message: str
def __str__(self):
return f"{self.message}. Line number: s{str(self.line_no)},"
| FileSyntaxError |
python | sqlalchemy__sqlalchemy | test/orm/test_deferred.py | {
"start": 90065,
"end": 94054
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"thing",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(20)),
)
Table(
"human",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("thing_id", Integer, ForeignKey("thing.id")),
Column("name", String(20)),
)
@classmethod
def setup_mappers(cls):
thing, human = cls.tables.thing, cls.tables.human
class Human(cls.Basic):
pass
class Thing(cls.Basic):
pass
cls.mapper_registry.map_imperatively(
Human, human, properties={"thing": relationship(Thing)}
)
cls.mapper_registry.map_imperatively(
Thing, thing, properties={"name": deferred(thing.c.name)}
)
@classmethod
def insert_data(cls, connection):
thing, human = cls.tables.thing, cls.tables.human
connection.execute(thing.insert(), [{"id": 1, "name": "Chair"}])
connection.execute(
human.insert(), [{"id": 1, "thing_id": 1, "name": "Clark Kent"}]
)
def _test(self, thing):
assert "name" in attributes.instance_state(thing).dict
def test_no_previous_query(self):
Thing = self.classes.Thing
session = fixture_session()
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_query_twice_with_clear(self):
Thing = self.classes.Thing
session = fixture_session()
result = session.query(Thing).first() # noqa
session.expunge_all()
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_query_twice_no_clear(self):
Thing = self.classes.Thing
session = fixture_session()
result = session.query(Thing).first() # noqa
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_joinedload_with_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = fixture_session()
human = ( # noqa
session.query(Human)
.options(sa.orm.joinedload(Human.thing))
.first()
)
session.expunge_all()
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_joinedload_no_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = fixture_session()
human = ( # noqa
session.query(Human)
.options(sa.orm.joinedload(Human.thing))
.first()
)
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_join_with_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = fixture_session()
result = ( # noqa
session.query(Human).add_entity(Thing).join(Human.thing).first()
)
session.expunge_all()
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
def test_join_no_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = fixture_session()
result = ( # noqa
session.query(Human).add_entity(Thing).join(Human.thing).first()
)
thing = (
session.query(Thing).options(sa.orm.undefer(Thing.name)).first()
)
self._test(thing)
| DeferredPopulationTest |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/splice_t/package.py | {
"start": 217,
"end": 806
} | class ____(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/splice-t-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("splice-h")
depends_on("splice-z")
def install(self, spec, prefix):
with open(prefix.join("splice-t"), "w", encoding="utf-8") as f:
f.write("splice-t: {0}".format(prefix))
f.write("splice-h: {0}".format(spec["splice-h"].prefix))
f.write("splice-z: {0}".format(spec["splice-z"].prefix))
| SpliceT |
python | getsentry__sentry | src/sentry/sentry_apps/utils/webhooks.py | {
"start": 99,
"end": 275
} | class ____(SentryAppActionType):
ASSIGNED = "assigned"
CREATED = "created"
IGNORED = "ignored"
RESOLVED = "resolved"
UNRESOLVED = "unresolved"
| IssueActionType |
python | getsentry__sentry | tests/sentry/models/test_dynamicsampling.py | {
"start": 1020,
"end": 13204
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.second_project = self.create_project()
self.second_organization = self.create_organization(owner=self.user)
self.third_project = self.create_project(organization=self.second_organization)
def test_update_or_create(self) -> None:
condition = {"op": "equals", "name": "environment", "value": "prod"}
end1 = timezone.now() + timedelta(hours=1)
rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now(),
end=end1,
project_ids=[self.project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query="environment:prod",
)
end2 = timezone.now() + timedelta(hours=1)
updated_rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() + timedelta(minutes=1),
end=end2,
project_ids=[self.project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query="environment:prod",
)
assert rule.id == updated_rule.id
projects = updated_rule.projects.all()
assert len(projects) == 1
assert self.project in projects
assert updated_rule.end_date >= end1
assert updated_rule.end_date >= end2
def test_assign_rule_id(self) -> None:
rule_ids = set()
rules = []
for idx in range(3):
rule = _create_rule_for_env(idx, [self.project], self.organization)
rule_ids.add(rule.rule_id)
rules.append(rule)
# all 3 rules have different rule ids
assert len(rule_ids) == 3
# make a rule obsolete and check that the rule id is reused
rules[1].is_active = False
rules[1].save()
new_rule = _create_rule_for_env(4, [self.project], self.organization)
assert new_rule.rule_id == rules[1].rule_id
# a new rule will take another slot (now that there is no free slot)
new_rule_2 = _create_rule_for_env(5, [self.project], self.organization)
assert new_rule_2.rule_id not in rule_ids
# make again an empty slot ( this time by having the rule expire)
rules[2].start_date = timezone.now() - timedelta(hours=2)
rules[2].end_date = timezone.now() - timedelta(hours=1)
rules[2].save()
# the new rule should take the empty slot
new_rule_3 = _create_rule_for_env(6, [self.project], self.organization)
assert new_rule_3.rule_id == rules[2].rule_id
def test_deactivate_old_rules(self) -> None:
idx = 1
old_rules = []
new_rules = []
def create_rule(is_old: bool, idx: int) -> CustomDynamicSamplingRule:
condition = {"op": "equals", "name": "environment", "value": f"prod{idx}"}
if is_old:
end_delta = -timedelta(hours=1)
else:
end_delta = timedelta(hours=1)
return CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() - timedelta(hours=2),
end=timezone.now() + end_delta,
project_ids=[self.project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query=f"environment:prod{idx}",
)
for i in range(10):
for is_old in [True, False]:
idx += 1
rule = create_rule(is_old, idx)
if is_old:
old_rules.append(rule)
else:
new_rules.append(rule)
CustomDynamicSamplingRule.deactivate_old_rules()
# check that all old rules are inactive and all new rules are active
inactive_rules = list(CustomDynamicSamplingRule.objects.filter(is_active=False))
assert len(inactive_rules) == 10
for rule in old_rules:
assert rule in inactive_rules
active_rules = list(CustomDynamicSamplingRule.objects.filter(is_active=True))
assert len(active_rules) == 10
for rule in new_rules:
assert rule in active_rules
def test_get_rule_for_org(self) -> None:
"""
Test the get_rule_for_org method
"""
condition = {"op": "equals", "name": "environment", "value": "prod"}
# check empty result
rule = CustomDynamicSamplingRule.get_rule_for_org(
condition, self.organization.id, [self.project.id]
)
assert rule is None
new_rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() - timedelta(hours=2),
end=timezone.now() + timedelta(hours=1),
project_ids=[self.project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query="environment:prod",
)
rule = CustomDynamicSamplingRule.get_rule_for_org(
condition, self.organization.id, [self.project.id]
)
assert rule == new_rule
def test_get_project_rules(self) -> None:
"""
Tests that all valid rules (i.e. active and within the date range) that apply to a project
(i.e. that are either organization rules or apply to the project) are returned.
"""
idx = [1]
def create_rule(
project_ids: list[int],
org_id: int | None = None,
old: bool = False,
new: bool = False,
) -> CustomDynamicSamplingRule:
idx[0] += 1
condition = {"op": "equals", "name": "environment", "value": f"prod{idx[0]}"}
if old:
end_delta = -timedelta(hours=2)
else:
end_delta = timedelta(hours=2)
if new:
start_delta = timedelta(hours=1)
else:
start_delta = -timedelta(hours=1)
if org_id is None:
org_id = self.organization.id
return CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() + start_delta,
end=timezone.now() + end_delta,
project_ids=project_ids,
organization_id=org_id,
num_samples=100,
sample_rate=0.5,
query=f"environment:prod{idx[0]}",
)
valid_project_rule = create_rule([self.project.id, self.second_project.id])
valid_org_rule = create_rule([])
# rule for another project
create_rule([self.second_project.id])
# rule for another org
create_rule([self.third_project.id], org_id=self.second_organization.id)
# old project rule ( already expired)
create_rule([self.project.id], old=True)
# new project rule ( not yet active)
create_rule([self.project.id], new=True)
# old org rule
create_rule([], old=True)
# new org rule
create_rule([], new=True)
# we should only get valid_project_rule and valid_org_rule
rules = list(CustomDynamicSamplingRule.get_project_rules(self.project))
assert len(rules) == 2
assert valid_project_rule in rules
assert valid_org_rule in rules
def test_separate_projects_create_different_rules(self) -> None:
"""
Tests that same condition for different projects create different rules
"""
condition = {"op": "equals", "name": "environment", "value": "prod"}
end1 = timezone.now() + timedelta(hours=1)
rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now(),
end=end1,
project_ids=[self.project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query="environment:prod",
)
end2 = timezone.now() + timedelta(hours=1)
second_rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() + timedelta(minutes=1),
end=end2,
project_ids=[self.second_project.id],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query="environment:prod",
)
assert rule.id != second_rule.id
first_projects = rule.projects.all()
assert len(first_projects) == 1
assert self.project == first_projects[0]
second_projects = second_rule.projects.all()
assert len(second_projects) == 1
assert self.second_project == second_projects[0]
def test_deactivate_expired_rules(self) -> None:
"""
Tests that expired, and only expired, rules are deactivated
"""
def create_rule(
env_idx: int, end: datetime, project_ids: list[int]
) -> CustomDynamicSamplingRule:
condition = {"op": "equals", "name": "environment", "value": f"prod{env_idx}"}
return CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=timezone.now() - timedelta(hours=5),
end=end,
project_ids=project_ids,
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query=f"environment:prod{env_idx}",
)
env_idx = 1
expired_rules: set[int] = set()
active_rules: set[int] = set()
for projects in [
[self.project],
[self.second_project],
[self.third_project],
[self.project, self.second_project, self.third_project],
[],
]:
# create some expired rules
project_ids = [p.id for p in projects]
rule = create_rule(env_idx, timezone.now() - timedelta(minutes=5), project_ids)
expired_rules.add(rule.id)
env_idx += 1
# create some active rules
rule = create_rule(env_idx, timezone.now() + timedelta(minutes=5), project_ids)
active_rules.add(rule.id)
env_idx += 1
# check that all rules are active before deactivation
for rule in CustomDynamicSamplingRule.objects.all():
assert rule.is_active
CustomDynamicSamplingRule.deactivate_expired_rules()
# check that all expired rules are inactive and all active rules are still active
for rule in CustomDynamicSamplingRule.objects.all():
if rule.id in expired_rules:
assert not rule.is_active
else:
assert rule.is_active
assert rule.id in active_rules
def test_per_project_limit(self) -> None:
"""
Tests that it is not possible to create more than MAX_CUSTOM_RULES_PER_PROJECT
for a project
"""
# a few org rules
num_org_rules = 10
for idx in range(num_org_rules):
_create_rule_for_env(idx, [], self.organization)
# now add project rules (up to MAX_CUSTOM_RULES_PER_PROJECT)
for idx in range(num_org_rules, MAX_CUSTOM_RULES_PER_PROJECT):
_create_rule_for_env(idx, [self.project], self.organization)
_create_rule_for_env(idx, [self.second_project], self.organization)
# we've reached the limit for both project and second_project next one should raise TooManyRules()
with pytest.raises(TooManyRules):
_create_rule_for_env(MAX_CUSTOM_RULES_PER_PROJECT, [self.project], self.organization)
with pytest.raises(TooManyRules):
_create_rule_for_env(
MAX_CUSTOM_RULES_PER_PROJECT, [self.second_project], self.organization
)
| TestCustomDynamicSamplingRuleProject |
python | wandb__wandb | wandb/apis/public/artifacts.py | {
"start": 2174,
"end": 3593
} | class ____(RelayPaginator["ArtifactAliasFragment", str]):
"""An internal iterator of collection alias names.
<!-- lazydoc-ignore-init: internal -->
"""
QUERY: ClassVar[Document | None] = None
last_response: Connection[ArtifactAliasFragment] | None
def __init__(self, client: Client, collection_id: str, per_page: int = 1_000):
if self.QUERY is None:
from wandb.sdk.artifacts._generated import ARTIFACT_COLLECTION_ALIASES_GQL
type(self).QUERY = gql(ARTIFACT_COLLECTION_ALIASES_GQL)
variables = {"id": collection_id}
super().__init__(client, variables=variables, per_page=per_page)
def _update_response(self) -> None:
from wandb.sdk.artifacts._generated import (
ArtifactAliasFragment,
ArtifactCollectionAliases,
)
data = self.client.execute(self.QUERY, variable_values=self.variables)
result = ArtifactCollectionAliases.model_validate(data)
# Extract the inner `*Connection` result for faster/easier access.
if not ((coll := result.artifact_collection) and (conn := coll.aliases)):
raise ValueError(f"Unable to parse {nameof(type(self))!r} response data")
self.last_response = Connection[ArtifactAliasFragment].model_validate(conn)
def _convert(self, node: ArtifactAliasFragment) -> str:
return node.alias
| _ArtifactCollectionAliases |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 6097,
"end": 6805
} | class ____(_FusedModule):
r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert (
type_before_parametrizations(conv) == Conv3d
and type_before_parametrizations(bn) == BatchNorm3d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(conv)}"
f"{type_before_parametrizations(bn)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(conv, bn, relu)
| ConvBnReLU3d |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/input.py | {
"start": 16162,
"end": 20510
} | class ____(
NamedTuple(
"_In",
[
("dagster_type", PublicAttr[Union[DagsterType, type[NoValueSentinel]]]),
("description", PublicAttr[Optional[str]]),
("default_value", PublicAttr[Any]),
("metadata", PublicAttr[Optional[Mapping[str, Any]]]),
(
"asset_key",
PublicAttr[Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]]],
),
(
"asset_partitions",
PublicAttr[Optional[Union[set[str], Callable[["InputContext"], set[str]]]]],
),
("input_manager_key", PublicAttr[Optional[str]]),
],
)
):
"""Defines an argument to an op's compute function.
Inputs may flow from previous op's outputs, or be stubbed using config. They may optionally
be typed using the Dagster type system.
Args:
dagster_type (Optional[Union[Type, DagsterType]]]):
The type of this input. Should only be set if the correct type can not
be inferred directly from the type signature of the decorated function.
description (Optional[str]): Human-readable description of the input.
default_value (Optional[Any]): The default value to use if no input is provided.
metadata (Optional[Dict[str, RawMetadataValue]]): A dict of metadata for the input.
asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): An AssetKey
(or function that produces an AssetKey from the InputContext) which should be associated
with this In. Used for tracking lineage information through Dagster.
asset_partitions (Optional[Union[Set[str], InputContext -> Set[str]]]): A
set of partitions of the given asset_key (or a function that produces this list of
partitions from the InputContext) which should be associated with this In.
input_manager_key (Optional[str]): The resource key for the
:py:class:`InputManager` used for loading this input when it is not connected to an
upstream output.
"""
def __new__(
cls,
dagster_type: Union[type, UnionType, DagsterType] = NoValueSentinel,
description: Optional[str] = None,
default_value: Any = NoValueSentinel,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None,
asset_partitions: Optional[Union[set[str], Callable[["InputContext"], set[str]]]] = None,
input_manager_key: Optional[str] = None,
):
return super().__new__(
cls,
dagster_type=(
NoValueSentinel
if dagster_type is NoValueSentinel
else resolve_dagster_type(dagster_type)
),
description=check.opt_str_param(description, "description"),
default_value=default_value,
metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),
asset_key=check.opt_inst_param(asset_key, "asset_key", (AssetKey, FunctionType)),
asset_partitions=asset_partitions,
input_manager_key=check.opt_str_param(input_manager_key, "input_manager_key"),
)
@staticmethod
def from_definition(input_def: InputDefinition) -> "In":
return In(
dagster_type=input_def.dagster_type,
description=input_def.description,
default_value=input_def._default_value, # noqa: SLF001
metadata=input_def.metadata,
asset_key=input_def._asset_key, # noqa: SLF001
asset_partitions=input_def._asset_partitions_fn, # noqa: SLF001
input_manager_key=input_def.input_manager_key,
)
def to_definition(self, name: str) -> InputDefinition:
dagster_type = self.dagster_type if self.dagster_type is not NoValueSentinel else None
return InputDefinition(
name=name,
dagster_type=dagster_type,
description=self.description,
default_value=self.default_value,
metadata=self.metadata,
asset_key=self.asset_key,
asset_partitions=self.asset_partitions,
input_manager_key=self.input_manager_key,
)
@public
| In |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 31989,
"end": 32260
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
it = args[0]
if isinstance(it, types.IterableType):
return signature(it.iterator_type, *args)
@infer_global(next)
| Iter |
python | graphql-python__graphene | graphene/relay/tests/test_mutation.py | {
"start": 291,
"end": 403
} | class ____(ObjectType):
# class Meta:
# interfaces = (Node, )
id = ID()
name = String()
| MyNode |
python | walkccc__LeetCode | solutions/416. Partition Equal Subset Sum/416-2.py | {
"start": 0,
"end": 474
} | class ____:
def canPartition(self, nums: list[int]) -> bool:
summ = sum(nums)
if summ % 2 == 1:
return False
return self.knapsack_(nums, summ // 2)
def knapsack_(self, nums: list[int], subsetSum: int) -> bool:
# dp[i] := True if i can be formed by nums so far
dp = [False] * (subsetSum + 1)
dp[0] = True
for num in nums:
for i in range(subsetSum, num - 1, -1):
dp[i] = dp[i] or dp[i - num]
return dp[subsetSum]
| Solution |
python | django-extensions__django-extensions | tests/management/commands/test_describe_form.py | {
"start": 1668,
"end": 2223
} | class ____(forms.Form):
title = forms.CharField(label='Title', max_length=50)"""
call_command("describe_form", "testapp.NonEditableModel", stdout=self.out)
self.assertIn(expected_result, self.out.getvalue())
def test_should_print_form_with_fields_for_TestModel(self):
not_expected = """body = forms.CharField(label='Body')"""
call_command(
"describe_form", "testapp.BaseModel", "--fields=title", stdout=self.out
)
self.assertNotIn(not_expected, self.out.getvalue())
| NonEditableModelForm |
python | apache__airflow | task-sdk/src/airflow/sdk/api/client.py | {
"start": 13155,
"end": 13993
} | class ____:
__slots__ = ("client",)
def __init__(self, client: Client):
self.client = client
def get(self, conn_id: str) -> ConnectionResponse | ErrorResponse:
"""Get a connection from the API server."""
try:
resp = self.client.get(f"connections/{conn_id}")
except ServerResponseError as e:
if e.response.status_code == HTTPStatus.NOT_FOUND:
log.debug(
"Connection not found",
conn_id=conn_id,
detail=e.detail,
status_code=e.response.status_code,
)
return ErrorResponse(error=ErrorType.CONNECTION_NOT_FOUND, detail={"conn_id": conn_id})
raise
return ConnectionResponse.model_validate_json(resp.read())
| ConnectionOperations |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operands/operands.py | {
"start": 11905,
"end": 12719
} | class ____(SubsetAutomationCondition[AssetCheckKey]):
passed: bool
@property
def name(self) -> str:
return "check_passed" if self.passed else "check_failed"
async def compute_subset( # pyright: ignore[reportIncompatibleMethodOverride]
self, context: AutomationContext[AssetCheckKey]
) -> EntitySubset[AssetCheckKey]:
from dagster._core.storage.asset_check_execution_record import (
AssetCheckExecutionResolvedStatus,
)
target_status = (
AssetCheckExecutionResolvedStatus.SUCCEEDED
if self.passed
else AssetCheckExecutionResolvedStatus.FAILED
)
return await context.asset_graph_view.compute_subset_with_status(
key=context.key, status=target_status
)
| CheckResultCondition |
python | huggingface__transformers | src/transformers/models/cohere2_vision/modeling_cohere2_vision.py | {
"start": 1772,
"end": 3656
} | class ____(nn.Module):
def __init__(self, config: Cohere2VisionConfig):
super().__init__()
self.config = config
self.downsample_factor = config.downsample_factor
self.intermediate_size = config.alignment_intermediate_size
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * (config.downsample_factor**2), self.intermediate_size, bias=True
)
self.act = nn.SiLU()
self.linear_2 = nn.Linear(self.intermediate_size // 2, config.text_config.hidden_size, bias=True)
def pixel_shuffle(self, image_features): # B, S, D
batch_size, seq_length, feature_dim = image_features.shape
height = width = int(seq_length**0.5)
image_features = image_features.reshape(image_features.shape[0], width, height, -1)
channels = image_features.shape[-1]
image_features = image_features.reshape(
batch_size, width, int(height / self.downsample_factor), int(channels * self.downsample_factor)
)
image_features = image_features.permute(0, 2, 1, 3)
image_features = image_features.reshape(
batch_size, int(height / self.downsample_factor), int(width / self.downsample_factor), -1
)
image_features = image_features.permute(0, 2, 1, 3)
return image_features
def forward(self, image_features):
image_features = self.pixel_shuffle(image_features)
hidden_states = self.linear_1(image_features)
# Split along last dimension and apply SwiGLU
x, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.act(gate) * x
hidden_states = self.linear_2(hidden_states)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for Cohere2Vision outputs, with hidden states and attentions.
"""
)
| Cohere2VisionMultiModalProjector |
python | django__django | tests/cache/tests.py | {
"start": 62342,
"end": 64060
} | class ____(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
@property
def incr_decr_type_error(self):
return cache._lib.ClientError
@override_settings(
CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
"binary": True,
"behaviors": {"tcp_nodelay": True},
},
)
)
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors["tcp_nodelay"], int(True))
def test_pylibmc_client_servers(self):
backend = self.base_params["BACKEND"]
tests = [
("unix:/run/memcached/socket", "/run/memcached/socket"),
("/run/memcached/socket", "/run/memcached/socket"),
("localhost", "localhost"),
("localhost:11211", "localhost:11211"),
("[::1]", "[::1]"),
("[::1]:11211", "[::1]:11211"),
("127.0.0.1", "127.0.0.1"),
("127.0.0.1:11211", "127.0.0.1:11211"),
]
for location, expected in tests:
settings = {"default": {"BACKEND": backend, "LOCATION": location}}
with self.subTest(location), self.settings(CACHES=settings):
self.assertEqual(cache.client_servers, [expected])
@unittest.skipUnless(PyMemcacheCache_params, "PyMemcacheCache backend not configured")
@override_settings(
CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
)
)
| PyLibMCCacheTests |
python | scrapy__scrapy | tests/test_utils_asyncio.py | {
"start": 469,
"end": 730
} | class ____:
def test_is_asyncio_available(self, reactor_pytest: str) -> None:
# the result should depend only on the pytest --reactor argument
assert is_asyncio_available() == (reactor_pytest == "asyncio")
@pytest.mark.only_asyncio
| TestAsyncio |
python | falconry__falcon | falcon/errors.py | {
"start": 12285,
"end": 15117
} | class ____(HTTPError):
"""403 Forbidden.
The server understood the request but refuses to authorize it.
A server that wishes to make public why the request has been
forbidden can describe that reason in the response payload (if any).
If authentication credentials were provided in the request, the
server considers them insufficient to grant access. The client
SHOULD NOT automatically repeat the request with the same
credentials. The client MAY repeat the request with new or different
credentials. However, a request might be forbidden for reasons
unrelated to the credentials.
An origin server that wishes to "hide" the current existence of a
forbidden target resource MAY instead respond with a status code of
404 Not Found.
(See also: RFC 7231, Section 6.5.4)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '403 Forbidden').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_403,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPForbidden |
python | huggingface__transformers | src/transformers/models/owlv2/modular_owlv2.py | {
"start": 1214,
"end": 8161
} | class ____(OwlViTImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 960, "width": 960}
rescale_factor = 1 / 255
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
crop_size = None
do_center_crop = None
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.0) -> "torch.Tensor":
"""
Pad an image with zeros to the given size.
"""
height, width = images.shape[-2:]
size = max(height, width)
pad_bottom = size - height
pad_right = size - width
padding = (0, 0, pad_right, pad_bottom)
padded_image = F.pad(images, padding, fill=constant_value)
return padded_image
def pad(
self,
images: list["torch.Tensor"],
disable_grouping: Optional[bool],
constant_value: float = 0.0,
**kwargs,
) -> list["torch.Tensor"]:
"""
Unlike the Base class `self.pad` where all images are padded to the maximum image size,
Owlv2 pads an image to square.
"""
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self._pad_images(
stacked_images,
constant_value=constant_value,
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return processed_images
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
anti_aliasing: bool = True,
anti_aliasing_sigma=None,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image as per the original implementation.
Args:
image (`Tensor`):
Image to resize.
size (`dict[str, int]`):
Dictionary containing the height and width to resize the image to.
anti_aliasing (`bool`, *optional*, defaults to `True`):
Whether to apply anti-aliasing when downsampling the image.
anti_aliasing_sigma (`float`, *optional*, defaults to `None`):
Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated
automatically.
"""
output_shape = (size.height, size.width)
input_shape = image.shape
# select height and width from input tensor
factors = torch.tensor(input_shape[2:]).to(image.device) / torch.tensor(output_shape).to(image.device)
if anti_aliasing:
if anti_aliasing_sigma is None:
anti_aliasing_sigma = ((factors - 1) / 2).clamp(min=0)
else:
anti_aliasing_sigma = torch.atleast_1d(anti_aliasing_sigma) * torch.ones_like(factors)
if torch.any(anti_aliasing_sigma < 0):
raise ValueError("Anti-aliasing standard deviation must be greater than or equal to zero")
elif torch.any((anti_aliasing_sigma > 0) & (factors <= 1)):
warnings.warn(
"Anti-aliasing standard deviation greater than zero but not down-sampling along all axes"
)
if torch.any(anti_aliasing_sigma == 0):
filtered = image
else:
kernel_sizes = 2 * torch.ceil(3 * anti_aliasing_sigma).int() + 1
filtered = F.gaussian_blur(
image, (kernel_sizes[0], kernel_sizes[1]), sigma=anti_aliasing_sigma.tolist()
)
else:
filtered = image
out = F.resize(filtered, size=(size.height, size.width), antialias=False)
return out
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_pad: bool,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Rescale images before other operations as done in original implementation
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, False, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
if do_pad:
processed_images = self.pad(processed_images, constant_value=0.0, disable_grouping=disable_grouping)
grouped_images, grouped_images_index = group_images_by_shape(
processed_images, disable_grouping=disable_grouping
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
resized_stack = self.resize(
image=stacked_images,
size=size,
interpolation=interpolation,
input_data_format=ChannelDimension.FIRST,
)
resized_images_grouped[shape] = resized_stack
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, False, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Owlv2ImageProcessorFast"]
| Owlv2ImageProcessorFast |
python | fluentpython__example-code | attic/functions/strkeydict2.py | {
"start": 932,
"end": 1870
} | class ____(collections.UserDict): # <1>
def __init__(self, args, normalize=str, **kwargs):
super().__init__(self, *args, **kwargs)
self.normalize = normalize
def __missing__(self, key): # <2>
if self.normalize(key) == key:
raise KeyError(key)
return self[self.normalize(key)]
def __contains__(self, key):
return self.normalize(key) in self.data # <3>
def __setitem__(self, key, item):
self.data[self.normalize(key)] = item # <4>
def update(self, iterable=None, **kwds):
if iterable is not None:
if isinstance(iterable, collections.abc.Mapping): # <5>
pairs = iterable.items()
else:
pairs = ((k, v) for k, v in iterable) # <6>
for key, value in pairs:
self[key] = value # <7>
if kwds:
self.update(kwds) # <8>
# END STRKEYDICT
| StrKeyDict |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_v2_ops_test.py | {
"start": 11376,
"end": 31250
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)))
self.assertAllEqual(
debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([3.0, -np.inf]))),
[-np.inf, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))),
[0.0, 0.0, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))),
[0.0, np.inf, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))),
[-np.inf, np.inf, np.nan])
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0])
x[1, 41] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpLargeTensorIDError(self):
modes = [
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.SHAPE,
]
# Maximum allowed tensor_id
tensor_id = np.power(2, 53, dtype=np.int64)
for mode in modes:
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
# Incrementing by one should error
tensor_id += 1
for mode in modes:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x[1, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[43, 99] = np.nan
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.zeros([100, 100, 50], dtype=np.float64)
x[0, 0, 1] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpDeterminism(self):
x = np.zeros([100, 100, 50], dtype=np.float64)
x = constant_op.constant(x)
modes = (
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
)
for mode in modes:
debug_mode = debug_event_pb2.TensorDebugMode.Name(mode)
with test_util.deterministic_ops():
if test_util.config.list_physical_devices("GPU"):
with self.assertRaisesRegex(
errors_impl.UnimplementedError, "Determinism is not yet "
"supported for DebugNumericSummaryV2 when tensor_debug_mode is "
+ debug_mode + "."):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=mode,
tensor_id=x._id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, :] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83:85] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0])
x[1:9, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[3, 4] = -np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeEmpty(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant(0.0))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([3, 4], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0])
x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16)
x[0, 1, 2, 2, 2, 2] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor,
[tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x = np.zeros([2], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [
tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0
])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3, -np.inf], dtype=np.float32)))
expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64)))
expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16)))
expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32)))
expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0]
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
def tensor_counts(arr):
counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0]
for n in np.ravel(arr):
if np.isneginf(n):
counts[2] += 1
elif np.isposinf(n):
counts[3] += 1
elif np.isnan(n):
counts[4] += 1
elif n < 0.:
counts[5] += 1
elif n == 0.:
counts[6] += 1
else:
counts[7] += 1
return counts
x = np.zeros([50, 50], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[40:50, 40:50] = 10
x[3, 20] = -10
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 19] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.ones([25, 25, 50], dtype=np.float32) * np.inf
x[:, :, 1] = np.nan
x[:, :, 2] = -np.inf
x[:, :, 3] = -1
x[:, :, 4] = 0
x[:, :, 5] = 1
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x[0, 0, 0] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [
tensor_id,
-1,
1,
] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 2] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[90:100, 90:100] = 10
x[3, 20] = -10
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.ones((100, 200, 3, 10), np.double)
x[1, 30, 2] = 10
x[5, :, 0, 1] = np.nan
x[90:100, 150, :, :] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
def testCheckNumericsV2OpNegativeAndPositiveInf(self):
"""Test that CheckNumericsV2 op distinguishes negative and positive infs."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf and \+Inf values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self):
"""CheckNumericsV2 op distinguishes - & + infs when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf, \+Inf, and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2PositiveInfAndNaN(self):
"""Test that CheckNumericsV2 op shows sign of inf when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([0.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had \+Inf and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| DebugNumericSummaryV2Test |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_class_nested_under_function.py | {
"start": 288,
"end": 336
} | class ____:
"""Module-level class"""
| ModuleClass |
python | allegroai__clearml | clearml/backend_api/session/response.py | {
"start": 363,
"end": 544
} | class ____(jsonmodels.models.Base):
name = jsonmodels.fields.StringField()
requested_version = FloatOrStringField()
actual_version = FloatOrStringField()
| _ResponseEndpoint |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 50448,
"end": 52571
} | class ____(_Loss):
r"""Creates a criterion that optimizes a two-class classification
logistic loss between input tensor :math:`x` and target tensor :math:`y`
(containing 1 or -1).
.. math::
\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
shape as input.
"""
__constants__ = ["reduction"]
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.soft_margin_loss(input, target, reduction=self.reduction)
| SoftMarginLoss |
python | ray-project__ray | python/ray/autoscaler/_private/gcp/node.py | {
"start": 5888,
"end": 6911
} | class ____(GCPNode):
"""Abstraction around tpu nodes"""
# https://cloud.google.com/tpu/docs/reference/rest/v2alpha1/projects.locations.nodes#State
NON_TERMINATED_STATUSES = {"CREATING", "STARTING", "RESTARTING", "READY"}
RUNNING_STATUSES = {"READY"}
STATUS_FIELD = "state"
def get_labels(self) -> dict:
return self.get("labels", {})
@property
def num_workers(self) -> int:
return len(self.get("networkEndpoints", [{}]))
def get_external_ips(self) -> List[str]:
return self.get("networkEndpoints", [{}])
def get_external_ip(self, worker_index: int = 0) -> str:
return (
self.get_external_ips()[worker_index]
.get("accessConfig", {})
.get("externalIp", None)
)
def get_internal_ips(self) -> List[str]:
return self.get("networkEndpoints", [{}])
def get_internal_ip(self, worker_index: int = 0) -> str:
return self.get_internal_ips()[worker_index].get("ipAddress", None)
| GCPTPUNode |
python | PrefectHQ__prefect | src/integrations/prefect-snowflake/prefect_snowflake/database.py | {
"start": 682,
"end": 46191
} | class ____(DatabaseBlock):
"""
Block used to manage connections with Snowflake.
Upon instantiating, a connection is created and maintained for the life of
the object until the close method is called.
It is recommended to use this block as a context manager, which will automatically
close the engine and its connections when the context is exited.
It is also recommended that this block is loaded and consumed within a single task
or flow because if the block is passed across separate tasks and flows,
the state of the block's connection and cursor will be lost.
Args:
credentials: The credentials to authenticate with Snowflake.
database: The name of the default database to use.
warehouse: The name of the default warehouse to use.
schema: The name of the default schema to use;
this attribute is accessible through `SnowflakeConnector(...).schema_`.
fetch_size: The number of rows to fetch at a time.
poll_frequency_s: The number of seconds before checking query.
Examples:
Load stored Snowflake connector as a context manager:
```python
from prefect_snowflake.database import SnowflakeConnector
snowflake_connector = SnowflakeConnector.load("BLOCK_NAME")
```
Insert data into database and fetch results.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
{"name": "Me", "address": "Myway 88"},
],
)
results = conn.fetch_all(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Space"}
)
print(results)
```
""" # noqa
_block_type_name = "Snowflake Connector"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/bd359de0b4be76c2254bd329fe3a267a1a3879c2-250x250.png" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-snowflake" # noqa
_description = "Perform data operations against a Snowflake database."
credentials: SnowflakeCredentials = Field(
default=..., description="The credentials to authenticate with Snowflake."
)
database: str = Field(
default=..., description="The name of the default database to use."
)
warehouse: str = Field(
default=..., description="The name of the default warehouse to use."
)
schema_: str = Field(
default=...,
serialization_alias="schema",
# Handles cases where the model was dumped with `by_alias=False` or `by_alias=True`
validation_alias=AliasChoices("schema_", "schema"),
description="The name of the default schema to use.",
)
fetch_size: int = Field(
default=1, description="The default number of rows to fetch at a time."
)
poll_frequency_s: int = Field(
default=1,
title="Poll Frequency [seconds]",
description=(
"The number of seconds between checking query "
"status for long running queries."
),
)
_connection: Optional[SnowflakeConnection] = None
_unique_cursors: Dict[str, SnowflakeCursor] = None
def get_connection(self, **connect_kwargs: Any) -> SnowflakeConnection:
"""
Returns an authenticated connection that can be
used to query from Snowflake databases.
Args:
**connect_kwargs: Additional arguments to pass to
`snowflake.connector.connect`.
Returns:
The authenticated SnowflakeConnection.
Examples:
```python
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
with snowflake_connector.get_connection() as connection:
...
```
"""
if self._connection is not None and self._connection.is_closed() is False:
return self._connection
connect_params = {
"database": self.database,
"warehouse": self.warehouse,
"schema": self.schema_,
}
connection = self.credentials.get_client(**connect_kwargs, **connect_params)
self._connection = connection
self.logger.info("Started a new connection to Snowflake.")
return connection
def _start_connection(self):
"""
Starts Snowflake database connection.
"""
self.get_connection()
if self._unique_cursors is None:
self._unique_cursors = {}
def _get_cursor(
self,
inputs: Dict[str, Any],
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
) -> Tuple[bool, SnowflakeCursor]:
"""
Get a Snowflake cursor.
Args:
inputs: The inputs to generate a unique hash, used to decide
whether a new cursor should be used.
cursor_type: The class of the cursor to use when creating a
Snowflake cursor.
Returns:
Whether a cursor is new and a Snowflake cursor.
"""
self._start_connection()
input_hash = hash_objects(inputs)
if input_hash is None:
raise RuntimeError(
f"We were not able to hash your inputs, {inputs!r}, "
"which resulted in an unexpected data return; "
"please open an issue with a reproducible example."
)
if input_hash not in self._unique_cursors.keys():
new_cursor = self._connection.cursor(cursor_type)
self._unique_cursors[input_hash] = new_cursor
return True, new_cursor
else:
existing_cursor = self._unique_cursors[input_hash]
return False, existing_cursor
async def _execute_async(self, cursor: SnowflakeCursor, inputs: Dict[str, Any]):
"""Helper method to execute operations asynchronously."""
response = await run_sync_in_worker_thread(cursor.execute_async, **inputs)
self.logger.info(
f"Executing the operation, {inputs['command']!r}, asynchronously; "
f"polling for the result every {self.poll_frequency_s} seconds."
)
query_id = response["queryId"]
while self._connection.is_still_running(
await run_sync_in_worker_thread(
self._connection.get_query_status_throw_if_error, query_id
)
):
await asyncio.sleep(self.poll_frequency_s)
await run_sync_in_worker_thread(cursor.get_results_from_sfqid, query_id)
def reset_cursors(self) -> None:
"""
Tries to close all opened cursors.
Examples:
Reset the cursors to refresh cursor position.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
{"name": "Me", "address": "Myway 88"},
],
)
print(conn.fetch_one("SELECT * FROM customers")) # Ford
conn.reset_cursors()
print(conn.fetch_one("SELECT * FROM customers")) # should be Ford again
```
""" # noqa
if not self._unique_cursors:
self.logger.info("There were no cursors to reset.")
return
input_hashes = tuple(self._unique_cursors.keys())
for input_hash in input_hashes:
cursor = self._unique_cursors.pop(input_hash)
try:
cursor.close()
except Exception as exc:
self.logger.warning(
f"Failed to close cursor for input hash {input_hash!r}: {exc}"
)
self.logger.info("Successfully reset the cursors.")
def fetch_one(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> Tuple[Any]:
"""
Fetch a single result from the database.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A tuple containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
Examples:
Fetch one row from the database where address is Space.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
{"name": "Me", "address": "Myway 88"},
],
)
result = conn.fetch_one(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Space"}
)
print(result)
```
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type=cursor_type)
if new:
cursor.execute(operation, params=parameters, **execute_kwargs)
self.logger.debug("Preparing to fetch a row.")
return cursor.fetchone()
async def fetch_one_async(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> Tuple[Any]:
"""
Fetch a single result from the database asynchronously.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A tuple containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
Examples:
Fetch one row from the database where address is Space.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
{"name": "Me", "address": "Myway 88"},
],
)
result = await conn.fetch_one_async(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Space"}
)
print(result)
```
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type=cursor_type)
if new:
await self._execute_async(cursor, inputs)
self.logger.debug("Preparing to fetch a row.")
result = await run_sync_in_worker_thread(cursor.fetchone)
return result
def fetch_many(
self,
operation: str,
parameters: Optional[Sequence[Dict[str, Any]]] = None,
size: Optional[int] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> List[Tuple[Any]]:
"""
Fetch a limited number of results from the database.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
size: The number of results to return; if None or 0, uses the value of
`fetch_size` configured on the block.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
Examples:
Repeatedly fetch two rows from the database where address is Highway 42.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Marvin", "address": "Highway 42"},
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Highway 42"},
{"name": "Me", "address": "Highway 42"},
],
)
result = conn.fetch_many(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Highway 42"},
size=2
)
print(result) # Marvin, Ford
result = conn.fetch_many(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Highway 42"},
size=2
)
print(result) # Unknown, Me
```
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type)
if new:
cursor.execute(operation, params=parameters, **execute_kwargs)
size = size or self.fetch_size
self.logger.debug(f"Preparing to fetch {size} rows.")
return cursor.fetchmany(size=size)
async def fetch_many_async(
self,
operation: str,
parameters: Optional[Sequence[Dict[str, Any]]] = None,
size: Optional[int] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> List[Tuple[Any]]:
"""
Fetch a limited number of results from the database asynchronously.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
size: The number of results to return; if None or 0, uses the value of
`fetch_size` configured on the block.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
Examples:
Repeatedly fetch two rows from the database where address is Highway 42.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Marvin", "address": "Highway 42"},
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Highway 42"},
{"name": "Me", "address": "Highway 42"},
],
)
result = conn.fetch_many(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Highway 42"},
size=2
)
print(result) # Marvin, Ford
result = conn.fetch_many(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Highway 42"},
size=2
)
print(result) # Unknown, Me
```
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type)
if new:
await self._execute_async(cursor, inputs)
size = size or self.fetch_size
self.logger.debug(f"Preparing to fetch {size} rows.")
result = await run_sync_in_worker_thread(cursor.fetchmany, size=size)
return result
def fetch_all(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> List[Tuple[Any]]:
"""
Fetch all results from the database.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type)
if new:
cursor.execute(operation, params=parameters, **execute_kwargs)
self.logger.debug("Preparing to fetch all rows.")
return cursor.fetchall()
async def fetch_all_async(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> List[Tuple[Any]]:
"""
Fetch all results from the database.
Repeated calls using the same inputs to *any* of the fetch methods of this
block will skip executing the operation again, and instead,
return the next set of results from the previous execution,
until the reset_cursors method is called.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
Examples:
Fetch all rows from the database where address is Highway 42.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
await conn.execute_async(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
await conn.execute_many_async(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Marvin", "address": "Highway 42"},
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Highway 42"},
{"name": "Me", "address": "Myway 88"},
],
)
result = await conn.fetch_all_async(
"SELECT * FROM customers WHERE address = %(address)s",
parameters={"address": "Highway 42"},
)
print(result) # Marvin, Ford, Unknown
```
""" # noqa
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
new, cursor = self._get_cursor(inputs, cursor_type)
if new:
await self._execute_async(cursor, inputs)
self.logger.debug("Preparing to fetch all rows.")
result = await run_sync_in_worker_thread(cursor.fetchall)
return result
def execute(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> None:
"""
Executes an operation on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Unlike the fetch methods, this method will always execute the operation
upon calling.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Examples:
Create table named customers with two columns, name and address.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
```
""" # noqa
self._start_connection()
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
with self._connection.cursor(cursor_type) as cursor:
run_coro_as_sync(self._execute_async(cursor, inputs))
self.logger.info(f"Executed the operation, {operation!r}.")
async def execute_async(
self,
operation: str,
parameters: Optional[Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
**execute_kwargs: Any,
) -> None:
"""
Executes an operation on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Unlike the fetch methods, this method will always execute the operation
upon calling.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
cursor_type: The class of the cursor to use when creating a Snowflake cursor.
**execute_kwargs: Additional options to pass to `cursor.execute_async`.
Examples:
Create table named customers with two columns, name and address.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
```
""" # noqa
self._start_connection()
inputs = dict(
command=operation,
params=parameters,
**execute_kwargs,
)
with self._connection.cursor(cursor_type) as cursor:
await run_sync_in_worker_thread(cursor.execute, **inputs)
self.logger.info(f"Executed the operation, {operation!r}.")
def execute_many(
self,
operation: str,
seq_of_parameters: List[Dict[str, Any]],
) -> None:
"""
Executes many operations on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Unlike the fetch methods, this method will always execute the operations
upon calling.
Args:
operation: The SQL query or other operation to be executed.
seq_of_parameters: The sequence of parameters for the operation.
Examples:
Create table and insert three rows into it.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Marvin", "address": "Highway 42"},
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
],
)
```
""" # noqa
self._start_connection()
inputs = dict(
command=operation,
seqparams=seq_of_parameters,
)
with self._connection.cursor() as cursor:
cursor.executemany(**inputs)
self.logger.info(
f"Executed {len(seq_of_parameters)} operations off {operation!r}."
)
async def execute_many_async(
self,
operation: str,
seq_of_parameters: List[Dict[str, Any]],
) -> None:
"""
Executes many operations on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Unlike the fetch methods, this method will always execute the operations
upon calling.
Args:
operation: The SQL query or other operation to be executed.
seq_of_parameters: The sequence of parameters for the operation.
Examples:
Create table and insert three rows into it.
```python
from prefect_snowflake.database import SnowflakeConnector
with SnowflakeConnector.load("BLOCK_NAME") as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);"
)
conn.execute_many(
"INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);",
seq_of_parameters=[
{"name": "Marvin", "address": "Highway 42"},
{"name": "Ford", "address": "Highway 42"},
{"name": "Unknown", "address": "Space"},
],
)
```
""" # noqa
self._start_connection()
inputs = dict(
command=operation,
seqparams=seq_of_parameters,
)
with self._connection.cursor() as cursor:
await run_sync_in_worker_thread(cursor.executemany, **inputs)
self.logger.info(
f"Executed {len(seq_of_parameters)} operations off {operation!r}."
)
def close(self):
"""
Closes connection and its cursors.
"""
try:
self.reset_cursors()
finally:
if self._connection is None:
self.logger.info("There was no connection open to be closed.")
return
self._connection.close()
self._connection = None
self.logger.info("Successfully closed the Snowflake connection.")
def __enter__(self):
"""
Start a connection upon entry.
"""
return self
def __exit__(self, *args):
"""
Closes connection and its cursors upon exit.
"""
self.close()
def __getstate__(self):
"""Allows block to be pickled and dumped."""
data = self.__dict__.copy()
data.update({k: None for k in {"_connection", "_unique_cursors"}})
return data
def __setstate__(self, data: dict):
"""Reset connection and cursors upon loading."""
self.__dict__.update(data)
self._start_connection()
@task
def snowflake_query(
query: str,
snowflake_connector: SnowflakeConnector,
params: Union[Tuple[Any], Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
poll_frequency_seconds: int = 1,
) -> List[Tuple[Any]]:
"""
Executes a query against a Snowflake database.
Args:
query: The query to execute against the database.
params: The params to replace the placeholders in the query.
snowflake_connector: The credentials to use to authenticate.
cursor_type: The type of database cursor to use for the query.
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
Returns:
The output of `response.fetchall()`.
Examples:
Query Snowflake table with the ID value parameterized.
```python
from prefect import flow
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector, snowflake_query
@flow
def snowflake_query_flow():
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
result = snowflake_query(
"SELECT * FROM table WHERE id=%{id_param}s LIMIT 8;",
snowflake_connector,
params={"id_param": 1}
)
return result
snowflake_query_flow()
```
"""
# context manager automatically rolls back failed transactions and closes
with snowflake_connector.get_connection() as connection:
with connection.cursor(cursor_type) as cursor:
response = cursor.execute_async(query, params=params)
query_id = response["queryId"]
while connection.is_still_running(
connection.get_query_status_throw_if_error(query_id)
):
sleep(poll_frequency_seconds)
cursor.get_results_from_sfqid(query_id)
result = cursor.fetchall()
return result
@task
async def snowflake_query_async(
query: str,
snowflake_connector: SnowflakeConnector,
params: Union[Tuple[Any], Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
poll_frequency_seconds: int = 1,
) -> List[Tuple[Any]]:
"""
Executes a query against a Snowflake database.
Args:
query: The query to execute against the database.
params: The params to replace the placeholders in the query.
snowflake_connector: The credentials to use to authenticate.
cursor_type: The type of database cursor to use for the query.
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
Returns:
The output of `response.fetchall()`.
Examples:
Query Snowflake table with the ID value parameterized.
```python
from prefect import flow
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector, snowflake_query
@flow
def snowflake_query_flow():
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
result = snowflake_query(
"SELECT * FROM table WHERE id=%{id_param}s LIMIT 8;",
snowflake_connector,
params={"id_param": 1}
)
return result
snowflake_query_flow()
```
"""
# context manager automatically rolls back failed transactions and closes
with snowflake_connector.get_connection() as connection:
with connection.cursor(cursor_type) as cursor:
response = cursor.execute_async(query, params=params)
query_id = response["queryId"]
while connection.is_still_running(
connection.get_query_status_throw_if_error(query_id)
):
await asyncio.sleep(poll_frequency_seconds)
cursor.get_results_from_sfqid(query_id)
result = cursor.fetchall()
return result
@task
def snowflake_multiquery(
queries: List[str],
snowflake_connector: SnowflakeConnector,
params: Union[Tuple[Any], Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
as_transaction: bool = False,
return_transaction_control_results: bool = False,
poll_frequency_seconds: int = 1,
) -> List[List[Tuple[Any]]]:
"""
Executes multiple queries against a Snowflake database in a shared session.
Allows execution in a transaction.
Args:
queries: The list of queries to execute against the database.
params: The params to replace the placeholders in the query.
snowflake_connector: The credentials to use to authenticate.
cursor_type: The type of database cursor to use for the query.
as_transaction: If True, queries are executed in a transaction.
return_transaction_control_results: Determines if the results of queries
controlling the transaction (BEGIN/COMMIT) should be returned.
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
Returns:
List of the outputs of `response.fetchall()` for each query.
Examples:
Query Snowflake table with the ID value parameterized.
```python
from prefect import flow
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector, snowflake_multiquery
@flow
def snowflake_multiquery_flow():
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
result = snowflake_multiquery(
["SELECT * FROM table WHERE id=%{id_param}s LIMIT 8;", "SELECT 1,2"],
snowflake_connector,
params={"id_param": 1},
as_transaction=True
)
return result
snowflake_multiquery_flow()
```
"""
with snowflake_connector.get_connection() as connection:
if as_transaction:
queries.insert(0, BEGIN_TRANSACTION_STATEMENT)
queries.append(END_TRANSACTION_STATEMENT)
with connection.cursor(cursor_type) as cursor:
results = []
for query in queries:
response = cursor.execute_async(query, params=params)
query_id = response["queryId"]
while connection.is_still_running(
connection.get_query_status_throw_if_error(query_id)
):
sleep(poll_frequency_seconds)
cursor.get_results_from_sfqid(query_id)
result = cursor.fetchall()
results.append(result)
# cut off results from BEGIN/COMMIT queries
if as_transaction and not return_transaction_control_results:
return results[1:-1]
else:
return results
@task
async def snowflake_multiquery_async(
queries: List[str],
snowflake_connector: SnowflakeConnector,
params: Union[Tuple[Any], Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
as_transaction: bool = False,
return_transaction_control_results: bool = False,
poll_frequency_seconds: int = 1,
) -> List[List[Tuple[Any]]]:
"""
Executes multiple queries against a Snowflake database in a shared session.
Allows execution in a transaction.
Args:
queries: The list of queries to execute against the database.
params: The params to replace the placeholders in the query.
snowflake_connector: The credentials to use to authenticate.
cursor_type: The type of database cursor to use for the query.
as_transaction: If True, queries are executed in a transaction.
return_transaction_control_results: Determines if the results of queries
controlling the transaction (BEGIN/COMMIT) should be returned.
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
Returns:
List of the outputs of `response.fetchall()` for each query.
Examples:
Query Snowflake table with the ID value parameterized.
```python
from prefect import flow
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector, snowflake_multiquery
@flow
def snowflake_multiquery_flow():
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
result = snowflake_multiquery(
["SELECT * FROM table WHERE id=%{id_param}s LIMIT 8;", "SELECT 1,2"],
snowflake_connector,
params={"id_param": 1},
as_transaction=True
)
return result
snowflake_multiquery_flow()
```
"""
with snowflake_connector.get_connection() as connection:
if as_transaction:
queries.insert(0, BEGIN_TRANSACTION_STATEMENT)
queries.append(END_TRANSACTION_STATEMENT)
with connection.cursor(cursor_type) as cursor:
results = []
for query in queries:
response = cursor.execute_async(query, params=params)
query_id = response["queryId"]
while connection.is_still_running(
connection.get_query_status_throw_if_error(query_id)
):
await asyncio.sleep(poll_frequency_seconds)
cursor.get_results_from_sfqid(query_id)
result = cursor.fetchall()
results.append(result)
# cut off results from BEGIN/COMMIT queries
if as_transaction and not return_transaction_control_results:
return results[1:-1]
else:
return results
@task
def snowflake_query_sync(
query: str,
snowflake_connector: SnowflakeConnector,
params: Union[Tuple[Any], Dict[str, Any]] = None,
cursor_type: Type[SnowflakeCursor] = SnowflakeCursor,
) -> List[Tuple[Any]]:
"""
Executes a query in sync mode against a Snowflake database.
Args:
query: The query to execute against the database.
params: The params to replace the placeholders in the query.
snowflake_connector: The credentials to use to authenticate.
cursor_type: The type of database cursor to use for the query.
Returns:
The output of `response.fetchall()`.
Examples:
Execute a put statement.
```python
from prefect import flow
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector, snowflake_query
@flow
def snowflake_query_sync_flow():
snowflake_credentials = SnowflakeCredentials(
account="account",
user="user",
password="password",
)
snowflake_connector = SnowflakeConnector(
database="database",
warehouse="warehouse",
schema="schema",
credentials=snowflake_credentials
)
result = snowflake_query_sync(
"put file://a_file.csv @mystage;",
snowflake_connector,
)
return result
snowflake_query_sync_flow()
```
"""
# context manager automatically rolls back failed transactions and closes
with snowflake_connector.get_connection() as connection:
with connection.cursor(cursor_type) as cursor:
cursor.execute(query, params=params)
result = cursor.fetchall()
return result
| SnowflakeConnector |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 1087,
"end": 1282
} | class ____(RequestHandler):
def post(self):
self.finish(
"Post arg1: %s, arg2: %s"
% (self.get_argument("arg1"), self.get_argument("arg2"))
)
| PostHandler |
python | donnemartin__interactive-coding-challenges | linked_lists/delete_mid/test_delete_mid.py | {
"start": 18,
"end": 1402
} | class ____(unittest.TestCase):
def test_delete_node(self):
print('Test: Empty list, null node to delete')
linked_list = MyLinkedList(None)
linked_list.delete_node(None)
self.assertEqual(linked_list.get_all_data(), [])
print('Test: One node')
head = Node(2)
linked_list = MyLinkedList(head)
linked_list.delete_node(head)
self.assertEqual(linked_list.get_all_data(), [None])
print('Test: Multiple nodes')
linked_list = MyLinkedList(None)
node0 = linked_list.insert_to_front(2)
node1 = linked_list.insert_to_front(3)
node2 = linked_list.insert_to_front(4)
node3 = linked_list.insert_to_front(1)
linked_list.delete_node(node1)
self.assertEqual(linked_list.get_all_data(), [1, 4, 2])
print('Test: Multiple nodes, delete last element')
linked_list = MyLinkedList(None)
node0 = linked_list.insert_to_front(2)
node1 = linked_list.insert_to_front(3)
node2 = linked_list.insert_to_front(4)
node3 = linked_list.insert_to_front(1)
linked_list.delete_node(node0)
self.assertEqual(linked_list.get_all_data(), [1, 4, 3, None])
print('Success: test_delete_node')
def main():
test = TestDeleteNode()
test.test_delete_node()
if __name__ == '__main__':
main()
| TestDeleteNode |
python | joke2k__faker | faker/providers/ssn/en_US/__init__.py | {
"start": 67,
"end": 6848
} | class ____(BaseProvider):
INVALID_SSN_TYPE = "INVALID_SSN"
SSN_TYPE = "SSN"
ITIN_TYPE = "ITIN"
EIN_TYPE = "EIN"
def itin(self) -> str:
"""Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information
"""
area = self.random_int(min=900, max=999)
serial = self.random_int(min=0, max=9999)
# The group number must be between 70 and 99 inclusively but not 89 or 93
group: int = self.random_element([x for x in range(70, 100) if x not in [89, 93]])
itin = f"{area:03d}-{group:02d}-{serial:04d}"
return itin
def ein(self) -> str:
"""Generate a random United States Employer Identification Number (EIN).
An United States An Employer Identification Number (EIN) is
also known as a Federal Tax Identification Number, and is
used to identify a business entity. EINs follow a format of a
two-digit prefix followed by a hyphen and a seven-digit sequence:
##-######
https://www.irs.gov/businesses/small-businesses-self-employed/employer-id-numbers
"""
# Only certain EIN Prefix values are assigned:
#
# https://www.irs.gov/businesses/small-businesses-self-employed/how-eins-are-assigned-and-valid-ein-prefixes
ein_prefix_choices: List[str] = [
"01",
"02",
"03",
"04",
"05",
"06",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"30",
"31",
"32",
"33",
"34",
"35",
"36",
"37",
"38",
"39",
"40",
"41",
"42",
"43",
"44",
"45",
"46",
"47",
"48",
"50",
"51",
"52",
"53",
"54",
"55",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
"65",
"66",
"67",
"68",
"71",
"72",
"73",
"74",
"75",
"76",
"77",
"80",
"81",
"82",
"83",
"84",
"85",
"86",
"87",
"88",
"90",
"91",
"92",
"93",
"94",
"95",
"98",
"99",
]
ein_prefix: str = self.random_element(ein_prefix_choices)
sequence = self.random_int(min=0, max=9999999)
ein = f"{ein_prefix:s}-{sequence:07d}"
return ein
def invalid_ssn(self) -> str:
"""Generate a random invalid United States Social Security Identification Number (SSN).
Invalid SSNs have the following characteristics:
Cannot begin with the number 9
Cannot begin with 666 in positions 1 - 3
Cannot begin with 000 in positions 1 - 3
Cannot contain 00 in positions 4 - 5
Cannot contain 0000 in positions 6 - 9
https://www.ssa.gov/kc/SSAFactSheet--IssuingSSNs.pdf
Additionally, return an invalid SSN that is NOT a valid ITIN by excluding certain ITIN related "group" values
"""
itin_group_numbers = [
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
90,
91,
92,
94,
95,
96,
97,
98,
99,
]
area = self.random_int(min=0, max=999)
if area < 900 and area not in {666, 0}:
random_group_or_serial = self.random_int(min=1, max=1000)
if random_group_or_serial <= 500:
group = 0
serial = self.random_int(0, 9999)
else:
group = self.random_int(0, 99)
serial = 0
elif area in {666, 0}:
group = self.random_int(0, 99)
serial = self.random_int(0, 9999)
else:
group = self.random_element([x for x in range(0, 100) if x not in itin_group_numbers])
serial = self.random_int(0, 9999)
invalid_ssn = f"{area:03d}-{group:02d}-{serial:04d}"
return invalid_ssn
def ssn(self, taxpayer_identification_number_type: str = SSN_TYPE) -> str:
"""Generate a random United States Taxpayer Identification Number of the specified type.
If no type is specified, a US SSN is returned.
"""
if taxpayer_identification_number_type == self.ITIN_TYPE:
return self.itin()
elif taxpayer_identification_number_type == self.EIN_TYPE:
return self.ein()
elif taxpayer_identification_number_type == self.INVALID_SSN_TYPE:
return self.invalid_ssn()
elif taxpayer_identification_number_type == self.SSN_TYPE:
# Certain numbers are invalid for United States Social Security
# Numbers. The area (first 3 digits) cannot be 666 or 900-999.
# The group number (middle digits) cannot be 00. The serial
# (last 4 digits) cannot be 0000.
area = self.random_int(min=1, max=899)
if area == 666:
area += 1
group = self.random_int(1, 99)
serial = self.random_int(1, 9999)
ssn = f"{area:03d}-{group:02d}-{serial:04d}"
return ssn
else:
raise ValueError(
"taxpayer_identification_number_type must be one of 'SSN', 'EIN', 'ITIN', or 'INVALID_SSN'."
)
| Provider |
python | celery__celery | celery/backends/azureblockblob.py | {
"start": 719,
"end": 6071
} | class ____(KeyValueStoreBackend):
"""Azure Storage Block Blob backend for Celery."""
def __init__(self,
url=None,
container_name=None,
*args,
**kwargs):
"""
Supported URL formats:
azureblockblob://CONNECTION_STRING
azureblockblob://DefaultAzureCredential@STORAGE_ACCOUNT_URL
azureblockblob://ManagedIdentityCredential@STORAGE_ACCOUNT_URL
"""
super().__init__(*args, **kwargs)
if azurestorage is None or azurestorage.__version__ < '12':
raise ImproperlyConfigured(
"You need to install the azure-storage-blob v12 library to"
"use the AzureBlockBlob backend")
conf = self.app.conf
self._connection_string = self._parse_url(url)
self._container_name = (
container_name or
conf["azureblockblob_container_name"])
self.base_path = conf.get('azureblockblob_base_path', '')
self._connection_timeout = conf.get(
'azureblockblob_connection_timeout', 20
)
self._read_timeout = conf.get('azureblockblob_read_timeout', 120)
@classmethod
def _parse_url(cls, url, prefix=AZURE_BLOCK_BLOB_CONNECTION_PREFIX):
connection_string = url[len(prefix):]
if not connection_string:
raise ImproperlyConfigured("Invalid URL")
return connection_string
@cached_property
def _blob_service_client(self):
"""Return the Azure Storage Blob service client.
If this is the first call to the property, the client is created and
the container is created if it doesn't yet exist.
"""
if (
"DefaultAzureCredential" in self._connection_string or
"ManagedIdentityCredential" in self._connection_string
):
# Leveraging the work that Kombu already did for us
credential_, url = AzureStorageQueuesTransport.parse_uri(
self._connection_string
)
client = BlobServiceClient(
account_url=url,
credential=credential_,
connection_timeout=self._connection_timeout,
read_timeout=self._read_timeout,
)
else:
client = BlobServiceClient.from_connection_string(
self._connection_string,
connection_timeout=self._connection_timeout,
read_timeout=self._read_timeout,
)
try:
client.create_container(name=self._container_name)
msg = f"Container created with name {self._container_name}."
except ResourceExistsError:
msg = f"Container with name {self._container_name} already." \
"exists. This will not be created."
LOGGER.info(msg)
return client
def get(self, key):
"""Read the value stored at the given key.
Args:
key: The key for which to read the value.
"""
key = bytes_to_str(key)
LOGGER.debug("Getting Azure Block Blob %s/%s", self._container_name, key)
blob_client = self._blob_service_client.get_blob_client(
container=self._container_name,
blob=f'{self.base_path}{key}',
)
try:
return blob_client.download_blob().readall().decode()
except ResourceNotFoundError:
return None
def set(self, key, value):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug(f"Creating azure blob at {self._container_name}/{key}")
blob_client = self._blob_service_client.get_blob_client(
container=self._container_name,
blob=f'{self.base_path}{key}',
)
blob_client.upload_blob(value, overwrite=True)
def mget(self, keys):
"""Read all the values for the provided keys.
Args:
keys: The list of keys to read.
"""
return [self.get(key) for key in keys]
def delete(self, key):
"""Delete the value at a given key.
Args:
key: The key of the value to delete.
"""
key = bytes_to_str(key)
LOGGER.debug(f"Deleting azure blob at {self._container_name}/{key}")
blob_client = self._blob_service_client.get_blob_client(
container=self._container_name,
blob=f'{self.base_path}{key}',
)
blob_client.delete_blob()
def as_uri(self, include_password=False):
if include_password:
return (
f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}'
f'{self._connection_string}'
)
connection_string_parts = self._connection_string.split(';')
account_key_prefix = 'AccountKey='
redacted_connection_string_parts = [
f'{account_key_prefix}**' if part.startswith(account_key_prefix)
else part
for part in connection_string_parts
]
return (
f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}'
f'{";".join(redacted_connection_string_parts)}'
)
| AzureBlockBlobBackend |
python | tornadoweb__tornado | tornado/test/util_test.py | {
"start": 7660,
"end": 8793
} | class ____(unittest.TestCase):
def setUp(self):
def function(x, y, callback=None, z=None):
pass
self.replacer = ArgReplacer(function, "callback")
def test_omitted(self):
args = (1, 2)
kwargs: Dict[str, Any] = dict()
self.assertIsNone(self.replacer.get_old_value(args, kwargs))
self.assertEqual(
self.replacer.replace("new", args, kwargs),
(None, (1, 2), dict(callback="new")),
)
def test_position(self):
args = (1, 2, "old", 3)
kwargs: Dict[str, Any] = dict()
self.assertEqual(self.replacer.get_old_value(args, kwargs), "old")
self.assertEqual(
self.replacer.replace("new", args, kwargs),
("old", [1, 2, "new", 3], dict()),
)
def test_keyword(self):
args = (1,)
kwargs = dict(y=2, callback="old", z=3)
self.assertEqual(self.replacer.get_old_value(args, kwargs), "old")
self.assertEqual(
self.replacer.replace("new", args, kwargs),
("old", (1,), dict(y=2, callback="new", z=3)),
)
| ArgReplacerTest |
python | realpython__materials | python-protocol/adder_v4.py | {
"start": 76,
"end": 144
} | class ____(Protocol[T]):
def add(self, x: T, y: T) -> T: ...
| Adder |
python | pydantic__pydantic | pydantic/v1/types.py | {
"start": 16357,
"end": 19299
} | class ____(list): # type: ignore
# Needed for pydantic to detect that this is a list
__origin__ = list
__args__: Tuple[Type[T], ...] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
unique_items: Optional[bool] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.list_length_validator
if cls.unique_items:
yield cls.unique_items_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items, uniqueItems=cls.unique_items)
@classmethod
def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]':
if v is None:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
@classmethod
def unique_items_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]':
if v is None:
return None
for i, value in enumerate(v, start=1):
if value in v[i:]:
raise errors.ListUniqueItemsError()
return v
def conlist(
item_type: Type[T], *, min_items: Optional[int] = None, max_items: Optional[int] = None, unique_items: bool = None
) -> Type[List[T]]:
# __args__ is needed to conform to typing generics api
namespace = dict(
min_items=min_items, max_items=max_items, unique_items=unique_items, item_type=item_type, __args__=(item_type,)
)
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PYOBJECT TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
PyObject = Callable[..., Any]
else:
class PyObject:
validate_always = True
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> Any:
if isinstance(value, Callable):
return value
try:
value = str_validator(value)
except errors.StrError:
raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable')
try:
return import_string(value)
except ImportError as e:
raise errors.PyObjectError(error_message=str(e))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECIMAL TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| ConstrainedList |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_typing.py | {
"start": 6646,
"end": 6785
} | class ____(NamedTuple):
"""Note: current false negative if outer() called before this declaration."""
field: int
outer()
| MyNamedTuple |
python | astropy__astropy | astropy/uncertainty/tests/test_distribution.py | {
"start": 21801,
"end": 22435
} | class ____(StructuredDtypeBase):
@classmethod
def setup_class(cls):
super().setup_class()
cls.unit = u.Unit("km, m")
cls.d_unit = cls.unit
def test_init_via_structured_samples(self):
distribution = self.distribution << self.unit
d = Distribution(distribution)
assert d.unit == self.d_unit
assert_array_equal(d.distribution, distribution)
assert_array_equal(d.value.distribution, self.distribution)
def test_init_via_structured_distribution(self):
d = self.d << self.unit
assert d.unit == self.d_unit
| TestStructuredQuantityDistributionInit |
python | great-expectations__great_expectations | tests/data_context/abstract_data_context/test_data_docs_config_crud.py | {
"start": 2768,
"end": 5213
} | class ____:
@pytest.mark.unit
def test_update_data_docs_site(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Update the new site
updated_site_config = copy.deepcopy(new_site_config)
updated_site_config["store_backend"]["base_directory"] = "/my_updated_site/"
ephemeral_context_with_defaults.update_data_docs_site(new_site_name, updated_site_config)
# Check the updated site config
sites = ephemeral_context_with_defaults.variables.data_docs_sites
assert sites[new_site_name]["store_backend"]["base_directory"] == "/my_updated_site/"
@pytest.mark.unit
def test_update_data_docs_site_persists(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Update the new site
updated_site_config = copy.deepcopy(new_site_config)
updated_site_config["store_backend"]["base_directory"] = "/my_updated_site/"
with mock.patch(
"great_expectations.data_context.EphemeralDataContext._save_project_config"
) as mock_save_project_config:
ephemeral_context_with_defaults.update_data_docs_site(
new_site_name, updated_site_config
)
mock_save_project_config.assert_called_once()
@pytest.mark.unit
def test_update_data_docs_site_missing_site_raises_exception(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Check fixture configuration
assert "missing" not in ephemeral_context_with_defaults.get_site_names()
with pytest.raises(gx_exceptions.InvalidKeyError) as e:
ephemeral_context_with_defaults.update_data_docs_site(
site_name="missing", site_config=new_site_config
)
assert "Data Docs Site `missing` does not already exist in the Data Context." in str(
e.value
)
| TestUpdateDataDocsSite |
python | Lightning-AI__lightning | tests/tests_pytorch/helpers/advanced_models.py | {
"start": 7364,
"end": 8824
} | class ____(LightningModule):
def __init__(self):
super().__init__()
self.batch_size = 10
self.in_features = 10
self.out_features = 5
self.hidden_dim = 20
self.automatic_optimization = False
self.truncated_bptt_steps = 10
self.rnn = nn.LSTM(self.in_features, self.hidden_dim, batch_first=True)
self.linear_out = nn.Linear(in_features=self.hidden_dim, out_features=self.out_features)
def forward(self, x, hs):
seq, hs = self.rnn(x, hs)
return self.linear_out(seq), hs
def training_step(self, batch, batch_idx):
x, y = batch
split_x, split_y = [
x.tensor_split(self.truncated_bptt_steps, dim=1),
y.tensor_split(self.truncated_bptt_steps, dim=1),
]
hiddens = None
optimizer = self.optimizers()
losses = []
for x, y in zip(split_x, split_y):
y_pred, hiddens = self(x, hiddens)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
self.manual_backward(loss)
optimizer.step()
# "Truncate"
hiddens = [h.detach() for h in hiddens]
losses.append(loss.detach())
return
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.001)
def train_dataloader(self):
return DataLoader(AverageDataset(), batch_size=self.batch_size)
| TBPTTModule |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 21614,
"end": 25711
} | class ____:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
@pytest.mark.filterwarnings(
"ignore:.*set_string_function.*:DeprecationWarning"
)
def test_unicode_assignment(self):
# gh-5049
from numpy._core.arrayprint import set_printoptions
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_printoptions(formatter={"all": lambda x: s})
try:
yield
finally:
set_printoptions()
a1d = np.array(['test'])
a0d = np.array('done')
with inject_str('bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], 'done')
# this would crash for the same reason
np.array([np.array('\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array(['done'])
b = np.array([b'done'])
class bad_sequence:
def __getitem__(self, _, /): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.clongdouble):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# When converting a float to a string via array assignment, we
# want to ensure that the conversion uses str(scalar) to preserve
# the expected precision.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
| TestAssignment |
python | weaviate__weaviate-python-client | weaviate/util.py | {
"start": 16110,
"end": 25019
} | class ____:
def __init__(self, major: int, minor: int, patch: int) -> None:
self.major = major
self.minor = minor
self.patch = patch
def __eq__(self, other: object) -> bool:
if not isinstance(other, _ServerVersion):
return NotImplemented
return self.major == other.major and self.minor == other.minor and self.patch == other.patch
def __neq__(self, other: object) -> bool:
return not self.__eq__(other)
def __gt__(self, other: "_ServerVersion") -> bool:
if self.major > other.major:
return True
elif self.major == other.major:
if self.minor > other.minor:
return True
elif self.minor == other.minor:
if self.patch > other.patch:
return True
return False
def __lt__(self, other: "_ServerVersion") -> bool:
return not self.__gt__(other) and not self.__eq__(other)
def __ge__(self, other: "_ServerVersion") -> bool:
return self.__gt__(other) or self.__eq__(other)
def __le__(self, other: "_ServerVersion") -> bool:
return self.__lt__(other) or self.__eq__(other)
def __repr__(self) -> str:
return f"{self.major}.{self.minor}.{self.patch}"
def __str__(self) -> str:
return f"{self.major}.{self.minor}.{self.patch}"
def is_at_least(self, major: int, minor: int, patch: int) -> bool:
return self >= _ServerVersion(major, minor, patch)
def is_lower_than(self, major: int, minor: int, patch: int) -> bool:
return self < _ServerVersion(major, minor, patch)
@classmethod
def from_string(cls, version: str) -> "_ServerVersion":
initial = version
if version == "":
version = "0"
if version.count(".") == 0:
version = version + ".0"
if version.count(".") == 1:
version = version + ".0"
pattern = r"v?(\d+)\.(\d+)\.(\d+)"
match = re.match(pattern, version)
if match:
ver_tup = tuple(map(int, match.groups()))
return cls(major=ver_tup[0], minor=ver_tup[1], patch=ver_tup[2])
else:
raise ValueError(
f"Unable to parse a version from the input string: {initial}. Is it in the format '(v)x.y.z' (e.g. 'v1.18.2' or '1.18.0')?"
)
def check_is_at_least_1_25_0(self, feature: str) -> None:
if not self >= _ServerVersion(1, 25, 0):
raise WeaviateUnsupportedFeatureError(feature, str(self), "1.25.0")
def check_is_at_least_1_32_0(self, feature: str) -> None:
if not self >= _ServerVersion(1, 32, 0):
raise WeaviateUnsupportedFeatureError(feature, str(self), "1.32.0")
@property
def supports_tenants_get_grpc(self) -> bool:
return self >= _ServerVersion(1, 25, 0)
def is_weaviate_too_old(current_version_str: str) -> bool:
"""Check if the user should be gently nudged to upgrade their Weaviate server version.
Args:
current_version_str: The version of the Weaviate server that the client is connected to. (e.g. "v1.18.2" or "1.18.0")
Returns:
True if the user should be nudged to upgrade.
"""
current_version = parse_version_string(current_version_str)
minimum_version = parse_version_string(MINIMUM_NO_WARNING_VERSION)
return minimum_version > current_version
def is_weaviate_client_too_old(current_version_str: str, latest_version_str: str) -> bool:
"""Check if the user should be gently nudged to upgrade their Weaviate client version.
Args:
current_version_str: The version of the Weaviate client that is being used (e.g. "v1.18.2" or "1.18.0")
latest_version_str: The latest version of the Weaviate client to compare against (e.g. "v1.18.2" or "1.18.0")
Returns:
`True` if the user should be nudged to upgrade. `False` if the user is using a valid version or if the version could not be parsed.
"""
try:
current_version = parse_version_string(current_version_str)
latest_major, latest_minor = parse_version_string(latest_version_str)
minimum_minor = max(latest_minor - MAXIMUM_MINOR_VERSION_DELTA, 0)
minimum_version = (latest_major, minimum_minor)
return minimum_version > current_version
except ValueError:
return False
def _get_valid_timeout_config(
timeout_config: Union[Tuple[NUMBER, NUMBER], NUMBER, None],
) -> Tuple[NUMBER, NUMBER]:
"""Validate and return TimeOut configuration.
Args:
timeout_config: Set the timeout configuration for all requests to the Weaviate server. It can be a
number or, a tuple of two numbers: (connect timeout, read timeout).
If only one number is passed then both connect and read timeout will be set to
that value.
Raises:
TypeError: If arguments are of a wrong data type.
ValueError: If 'timeout_config' is not a tuple of 2.
ValueError: If 'timeout_config' is/contains negative number/s.
"""
def check_number(num: Union[NUMBER, Tuple[NUMBER, NUMBER], None]) -> bool:
return isinstance(num, float) or isinstance(num, int)
if (isinstance(timeout_config, float) or isinstance(timeout_config, int)) and not isinstance(
timeout_config, bool
):
assert timeout_config is not None
if timeout_config <= 0.0:
raise ValueError("'timeout_config' cannot be non-positive number/s!")
return timeout_config, timeout_config
if not isinstance(timeout_config, tuple):
raise TypeError("'timeout_config' should be a (or tuple of) positive number/s!")
if len(timeout_config) != 2:
raise ValueError("'timeout_config' must be of length 2!")
if not (check_number(timeout_config[0]) and check_number(timeout_config[1])) or (
isinstance(timeout_config[0], bool) and isinstance(timeout_config[1], bool)
):
raise TypeError("'timeout_config' must be tuple of numbers")
if timeout_config[0] <= 0.0 or timeout_config[1] <= 0.0:
raise ValueError("'timeout_config' cannot be non-positive number/s!")
return timeout_config
def _type_request_response(json_response: Any) -> Optional[Dict[str, Any]]:
if json_response is None:
return None
assert isinstance(json_response, dict)
return json_response
def _to_beacons(uuids: UUIDS, to_class: str = "") -> List[Dict[str, str]]:
if isinstance(uuids, uuid_lib.UUID) or isinstance(
uuids, str
): # replace with isinstance(uuids, UUID) in 3.10
uuids = [uuids]
if len(to_class) > 0:
to_class = to_class + "/"
return [{"beacon": f"weaviate://localhost/{to_class}{uuid_to}"} for uuid_to in uuids]
def _decode_json_response_dict(response: httpx.Response, location: str) -> Optional[Dict[str, Any]]:
if response is None:
return None
if 200 <= response.status_code < 300:
try:
json_response = cast(Dict[str, Any], response.json())
return json_response
except (httpx.DecodingError, json.decoder.JSONDecodeError):
raise ResponseCannotBeDecodedError(location, response)
raise UnexpectedStatusCodeError(location, response)
def _decode_json_response_list(
response: httpx.Response, location: str
) -> Optional[List[Dict[str, Any]]]:
if response is None:
return None
if 200 <= response.status_code < 300:
try:
json_response = response.json()
return cast(list, json_response)
except (httpx.DecodingError, json.decoder.JSONDecodeError):
raise ResponseCannotBeDecodedError(location, response)
raise UnexpectedStatusCodeError(location, response)
def _datetime_to_string(value: TIME) -> str:
if value.tzinfo is None:
_Warnings.datetime_insertion_with_no_specified_timezone(value)
value = value.replace(tzinfo=datetime.timezone.utc)
return value.isoformat(sep="T", timespec="microseconds")
def _datetime_from_weaviate_str(string: str) -> datetime.datetime:
if string[-1] != "Z":
string = "".join(string.rsplit(":", 1))
# Weaviate can return up to 9 digits for milliseconds, but Python datetime only supports 6 digits.
string = re.sub(r"(?<=\.\d{6})\d+(?=[Z+-])", "", string)
# pick format with or without microseconds
date_format = "%Y-%m-%dT%H:%M:%S.%f%z" if "." in string else "%Y-%m-%dT%H:%M:%S%z"
try:
return datetime.datetime.strptime(string, date_format)
except ValueError as e:
# note that the year 9999 is valid and does not need to be handled. for 5 digit years only the first
# 4 digits are considered and it wrapps around
if "year 0 is out of range" in str(e):
_Warnings.datetime_year_zero(string)
return datetime.datetime.min
raise e
| _ServerVersion |
python | pypa__warehouse | warehouse/accounts/forms.py | {
"start": 2772,
"end": 3042
} | class ____:
username = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(),
PreventNullBytesValidator(),
_check_for_email_in_username,
_check_for_existing_username,
],
)
| UsernameMixin |
python | ansible__ansible | lib/ansible/parsing/yaml/objects.py | {
"start": 389,
"end": 653
} | class ____(dict):
"""Backwards compatibility type."""
def __new__(cls, value=_UNSET, /, **kwargs):
if value is _UNSET:
return dict(**kwargs)
return _datatag.AnsibleTagHelper.tag_copy(value, dict(value, **kwargs))
| _AnsibleMapping |
python | ray-project__ray | doc/source/serve/doc_code/fault_tolerance/replica_health_check.py | {
"start": 190,
"end": 679
} | class ____:
def __init__(self, db_addr: str):
self._my_db_connection = connect_to_db(db_addr)
def __call__(self, request):
return self._do_something_cool()
# Called by Serve to check the replica's health.
def check_health(self):
if not self._my_db_connection.is_connected():
# The specific type of exception is not important.
raise RuntimeError("uh-oh, DB connection is broken.")
# __health_check_end__
| MyDeployment |
python | openai__openai-python | src/openai/types/beta/realtime/input_audio_buffer_cleared_event.py | {
"start": 206,
"end": 429
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
type: Literal["input_audio_buffer.cleared"]
"""The event type, must be `input_audio_buffer.cleared`."""
| InputAudioBufferClearedEvent |
python | sympy__sympy | sympy/concrete/products.py | {
"start": 635,
"end": 18456
} | class ____(ExprWithIntLimits):
r"""
Represents unevaluated products.
Explanation
===========
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
factorial(m)**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
4**n*factorial(n)**2/(2**(2*n)*RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import combsimp, pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 - pi/2, n)*RisingFactorial(1 + pi/2, n)/(2*factorial(n)**2)
>>> limit(Pe, n, oo).gammasimp()
sin(pi**2/2)
>>> Pe.rewrite(gamma)
(-1)**n*pi**2*gamma(pi/2)*gamma(n + 1 + pi/2)/(2*gamma(1 + pi/2)*gamma(-n + pi/2)*gamma(n + 1)**2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> combsimp(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
https://dl.acm.org/doi/10.1145/322248.322255
.. [2] https://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] https://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ()
limits: tuple[tuple[Symbol, Expr, Expr]]
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args, **kwargs):
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
if self.has_empty_sequence:
return False
z = self.term.is_zero
if z is True:
return True
if self.has_finite_limits:
# A Product is zero only if its term is zero assuming finite limits.
return z
def _eval_is_extended_real(self):
if self.has_empty_sequence:
return True
return self.function.is_extended_real
def _eval_is_positive(self):
if self.has_empty_sequence:
return True
if self.function.is_positive and self.has_finite_limits:
return True
def _eval_is_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_nonnegative and self.has_finite_limits:
return True
def _eval_is_extended_nonnegative(self):
if self.has_empty_sequence:
return True
if self.function.is_extended_nonnegative:
return True
def _eval_is_extended_nonpositive(self):
if self.has_empty_sequence:
return True
def _eval_is_finite(self):
if self.has_finite_limits and self.function.is_finite:
return True
def doit(self, **hints):
# first make sure any definite limits have product
# variables with matching assumptions
reps = {}
for xab in self.limits:
d = _dummy_with_inherited_properties_concrete(xab)
if d:
reps[xab[0]] = d
if reps:
undo = {v: k for k, v in reps.items()}
did = self.xreplace(reps).doit(**hints)
if isinstance(did, tuple): # when separate=True
did = tuple([i.xreplace(undo) for i in did])
else:
did = did.xreplace(undo)
return did
from sympy.simplify.powsimp import powsimp
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_integer and dif.is_negative:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
from .delta import deltaproduct, _has_simple_delta
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
definite = dif.is_Integer
if definite and (dif < 100):
return self._eval_product_direct(term, limits)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
factored = factor_terms(term, fraction=True)
if factored.is_Mul:
return self._eval_product(factored, (k, a, n))
elif term.is_Mul:
# Factor in part without the summation variable and part with
without_k, with_k = term.as_coeff_mul(k)
if len(with_k) >= 2:
# More than one term including k, so still a multiplication
exclude, include = [], []
for t in with_k:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return without_k**(n - a + 1)*A * B
else:
# Just a single term
p = self._eval_product(with_k[0], (k, a, n))
if p is None:
p = self.func(with_k[0], (k, a, n)).doit()
return without_k**(n - a + 1)*p
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
if definite:
return self._eval_product_direct(term, limits)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import product_simplify
rv = product_simplify(self, **kwargs)
return rv.doit() if kwargs['doit'] else rv
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_product_direct(self, term, limits):
(k, a, n) = limits
return Mul(*[term.subs(k, a + i) for i in range(n - a + 1)])
def _eval_derivative(self, x):
if isinstance(x, Symbol) and x not in self.free_symbols:
return S.Zero
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits:
f = self.func(f, *limits)
i, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
h = Dummy()
rv = Sum( Product(f, (i, a, h - 1)) * Product(f, (i, h + 1, b)) * Derivative(f, x, evaluate=True).subs(i, h), (h, a, b))
return rv
def is_convergent(self):
r"""
See docs of :obj:`.Sum.is_convergent()` for explanation of convergence
in SymPy.
Explanation
===========
The infinite product:
.. math::
\prod_{1 \leq i < \infty} f(i)
is defined by the sequence of partial products:
.. math::
\prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
as n increases without bound. The product converges to a non-zero
value if and only if the sum:
.. math::
\sum_{1 \leq i < \infty} \log{f(n)}
converges.
Examples
========
>>> from sympy import Product, Symbol, cos, pi, exp, oo
>>> n = Symbol('n', integer=True)
>>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
False
>>> Product(1/n**2, (n, 1, oo)).is_convergent()
False
>>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
True
>>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinite_product
"""
sequence_term = self.function
log_sum = log(sequence_term)
lim = self.limits
try:
is_conv = Sum(log_sum, *lim).is_convergent()
except NotImplementedError:
if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
return S.true
raise NotImplementedError("The algorithm to find the product convergence of %s "
"is not yet implemented" % (sequence_term))
return is_conv
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Explanation
===========
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import gamma, Product, simplify, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), b > -1), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P.rewrite(gamma))
Piecewise((gamma(b + 1)/gamma(a), a > 0), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
reorder_limit,
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
https://dl.acm.org/doi/10.1145/322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
Explanation
===========
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
Examples
========
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| Product |
python | cython__cython | Cython/Compiler/AnalysedTreeTransforms.py | {
"start": 293,
"end": 3786
} | class ____(ScopeTrackingTransform):
# Handles autotestdict directive
excludelist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if '__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString('__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString('__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = EncodedString(f'{path} (line {testpos[1]:d})')
key = UnicodeNode(pos, value=keystr)
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.excludelist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
| AutoTestDictTransform |
python | getsentry__sentry | src/sentry/apidocs/examples/project_examples.py | {
"start": 14090,
"end": 17934
} | class ____:
CLIENT_KEY_RESPONSE = [
OpenApiExample(
"Client key with rate limiting",
value=KEY_RATE_LIMIT,
status_codes=["200", "201"],
response_only=True,
),
]
DETAILED_PROJECT = [
OpenApiExample(
"Get detailed view about a Project",
value=DETAILED_PROJECT,
status_codes=["200"],
response_only=True,
),
]
OVERVIEW_PROJECT = [
OpenApiExample(
"Get an overview of a Project",
summary=(
"Project overviews are high-level summaries of a project. They are intended to provide a "
"quick and lightweight way to get information about a project."
),
value=BASE_PROJECT,
status_codes=["200"],
response_only=True,
),
]
CREATE_PROJECT = [
OpenApiExample(
"Project successfully created",
value=PROJECT_SUMMARY,
status_codes=["201"],
response_only=True,
),
]
LIST_CLIENT_KEYS = [
OpenApiExample(
"List Client Keys for a Project",
value=[
KEY_RATE_LIMIT,
KEY_NO_RATE_LIMIT,
],
status_codes=["200"],
response_only=True,
),
]
ADD_TEAM_TO_PROJECT = [
OpenApiExample(
"Give a Team Access to a Project",
value=project_with_team(extra_team=True),
status_codes=["201"],
response_only=True,
),
]
DELETE_TEAM_FROM_PROJECT = [
OpenApiExample(
"Revoke a Team's Access to a Project",
value=project_with_team(),
status_codes=["200"],
response_only=True,
),
]
GET_SYMBOL_SOURCES = [
OpenApiExample(
"List custom symbol sources configured for a project.",
value=SYMBOL_SOURCES,
status_codes=["200"],
response_only=True,
),
]
ADD_SYMBOL_SOURCE = [
OpenApiExample(
"List custom symbol sources configured for a project.",
value=SYMBOL_SOURCES[0],
status_codes=["201"],
response_only=True,
),
]
ADD_SYMBOL_SOURCE = [
OpenApiExample(
"Add a custom symbol source to a project.",
value=SYMBOL_SOURCES[0],
status_codes=["201"],
response_only=True,
),
]
UPDATE_SYMBOL_SOURCE = [
OpenApiExample(
"Update a custom symbol source in a project.",
value=SYMBOL_SOURCES[0],
status_codes=["200"],
response_only=True,
),
]
DELETE_SYMBOL_SOURCE = [
OpenApiExample(
"Delete a custom symbol source from a project.",
status_codes=["204"],
response_only=True,
),
]
GET_PROJECT_FILTERS = [
OpenApiExample(
"List a project's filters",
value=[
{"id": "browser-extensions", "active": False},
{"id": "filtered-transaction", "active": True},
{
"id": "legacy-browsers",
"active": [
"opera",
"edge",
"safari",
"chrome",
"ie",
"opera_mini",
"firefox",
"android",
],
},
{"id": "localhost", "active": False},
{"id": "web-crawlers", "active": True},
],
status_codes=["200"],
response_only=True,
),
]
| ProjectExamples |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_xs.py | {
"start": 3991,
"end": 13520
} | class ____:
def test_xs_doc_example(self):
# TODO: more descriptive name
# based on example in advanced.rst
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 8)),
index=["A", "B", "C"],
columns=index,
)
result = df.xs(("one", "bar"), level=("second", "first"), axis=1)
expected = df.iloc[:, [0]]
tm.assert_frame_equal(result, expected)
def test_xs_integer_key(self):
# see GH#2107
dates = range(20111201, 20111205)
ids = list("abcde")
index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
df = DataFrame(
np.random.default_rng(2).standard_normal((len(index), 3)),
index,
["X", "Y", "Z"],
)
result = df.xs(20111201, level="date")
expected = df.loc[20111201, :]
tm.assert_frame_equal(result, expected)
def test_xs_level(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
expected = df[df.index.get_level_values(1) == "two"]
expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_frame_equal(result, expected)
def test_xs_level_eq_2(self):
arr = np.random.default_rng(2).standard_normal((3, 5))
index = MultiIndex(
levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
)
df = DataFrame(arr, index=index)
expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
def test_xs_setting_with_copy_error(self, multiindex_dataframe_random_data):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
df_orig = df.copy()
result = df.xs("two", level="second")
result[:] = 10
tm.assert_frame_equal(df, df_orig)
def test_xs_setting_with_copy_error_multiple(self, four_level_index_dataframe):
# this is a copy in 0.14
df = four_level_index_dataframe
df_orig = df.copy()
result = df.xs(("a", 4), level=["one", "four"])
result[:] = 10
tm.assert_frame_equal(df, df_orig)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data):
# see GH#13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
if isinstance(key, list):
result = df.xs(tuple(key), level=level)
else:
result = df.xs(key, level=level)
tm.assert_frame_equal(result, expected)
def test_xs_missing_values_in_index(self):
# see GH#6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(self, key, level, exp_arr, exp_index):
# see GH#2903
arr = np.random.default_rng(2).standard_normal((4, 4))
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
lambda df: df.xs(("a", 4), level=["one", "four"]),
lambda df: df.xs("a").xs(4, level="four"),
],
)
def test_xs_level_multiple(self, indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
expected_index = MultiIndex(
levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
)
expected = DataFrame(
expected_values, index=expected_index, columns=list("ABCDE")
)
result = indexer(df)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
)
def test_xs_level0(self, indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
]
expected_index = MultiIndex(
levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
codes=[[0, 1], [0, 1], [1, 0]],
names=["two", "three", "four"],
)
expected = DataFrame(
expected_values, index=expected_index, columns=list("ABCDE")
)
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_values(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_IndexSlice_argument_not_implemented(self, frame_or_series):
# GH#35301
index = MultiIndex(
levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
obj = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index)
if frame_or_series is Series:
obj = obj[0]
expected = obj.iloc[-2:].droplevel(0)
result = obj.xs(IndexSlice[("foo", "qux", 0), :])
tm.assert_equal(result, expected)
result = obj.loc[IndexSlice[("foo", "qux", 0), :]]
tm.assert_equal(result, expected)
def test_xs_levels_raises(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
if frame_or_series is Series:
obj = obj["A"]
msg = "Index must be a MultiIndex"
with pytest.raises(TypeError, match=msg):
obj.xs(0, level="as")
def test_xs_multiindex_droplevel_false(self):
# GH#19056
mi = MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"]
)
df = DataFrame([[1, 2, 3]], columns=mi)
result = df.xs("a", axis=1, drop_level=False)
expected = DataFrame(
[[1, 2]],
columns=MultiIndex.from_tuples(
[("a", "x"), ("a", "y")], names=["level1", "level2"]
),
)
tm.assert_frame_equal(result, expected)
def test_xs_droplevel_false(self):
# GH#19056
df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
def test_xs_droplevel_false_view(self):
# GH#37832
df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
# check that result still views the same data as df
assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values)
df.iloc[0, 0] = 2
# The subset is never modified
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
df.iloc[0, 0] = 2
# The subset is never modified
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
def test_xs_list_indexer_droplevel_false(self):
# GH#41760
mi = MultiIndex.from_tuples([("x", "m", "a"), ("x", "n", "b"), ("y", "o", "c")])
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi)
with pytest.raises(KeyError, match="y"):
df.xs(("x", "y"), drop_level=False, axis=1)
| TestXSWithMultiIndex |
python | joke2k__faker | faker/providers/date_time/es/__init__.py | {
"start": 46,
"end": 777
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "domingo",
"1": "lunes",
"2": "martes",
"3": "miércoles",
"4": "jueves",
"5": "viernes",
"6": "sábado",
}
MONTH_NAMES = {
"01": "enero",
"02": "febrero",
"03": "marzo",
"04": "abril",
"05": "mayo",
"06": "junio",
"07": "julio",
"08": "agosto",
"09": "septiembre",
"10": "octubre",
"11": "noviembre",
"12": "diciembre",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | numba__numba | numba/tests/test_record_dtype.py | {
"start": 37322,
"end": 41430
} | class ____(TestCase):
def setUp(self):
self.value = 2
a_dtype = np.dtype([('a', 'f8')])
ab_dtype = np.dtype([('a', 'f8'), ('b', 'f8')])
self.a_rec1 = np.array([1], dtype=a_dtype)[0]
self.a_rec2 = np.array([2], dtype=a_dtype)[0]
self.ab_rec1 = np.array([(self.value, 3)], dtype=ab_dtype)[0]
self.ab_rec2 = np.array([(self.value + 1, 3)], dtype=ab_dtype)[0]
self.func = lambda rec: rec['a']
def test_common_field(self):
# Test that subtypes do not require new compilations
njit_sig = njit(types.float64(typeof(self.a_rec1)))
functions = [
njit(self.func), # jitted function with open njit
njit_sig(self.func) # jitted fc with closed signature
]
for fc in functions:
fc(self.a_rec1)
fc.disable_compile()
y = fc(self.ab_rec1)
self.assertEqual(self.value, y)
def test_tuple_of_records(self):
@njit
def foo(rec_tup):
x = 0
for i in range(len(rec_tup)):
x += rec_tup[i]['a']
return x
foo((self.a_rec1, self.a_rec2))
foo.disable_compile()
y = foo((self.ab_rec1, self.ab_rec2))
self.assertEqual(2 * self.value + 1, y)
def test_array_field(self):
# Tests subtyping with array fields
rec1 = np.empty(1, dtype=[('a', 'f8', (4,))])[0]
rec1['a'][0] = 1
rec2 = np.empty(1, dtype=[('a', 'f8', (4,)), ('b', 'f8')])[0]
rec2['a'][0] = self.value
@njit
def foo(rec):
return rec['a'][0]
foo(rec1)
foo.disable_compile()
y = foo(rec2)
self.assertEqual(self.value, y)
def test_no_subtyping1(self):
# test that conversion rules don't allow subtypes with different field
# names
c_dtype = np.dtype([('c', 'f8')])
c_rec1 = np.array([1], dtype=c_dtype)[0]
@njit
def foo(rec):
return rec['c']
foo(c_rec1)
foo.disable_compile()
with self.assertRaises(TypeError) as err:
foo(self.a_rec1)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_no_subtyping2(self):
# test that conversion rules don't allow smaller records as subtypes
jit_fc = njit(self.func)
jit_fc(self.ab_rec1)
jit_fc.disable_compile()
with self.assertRaises(TypeError) as err:
jit_fc(self.a_rec1)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_no_subtyping3(self):
# test that conversion rules don't allow records with fields with same
# name but incompatible type
other_a_rec = np.array(['a'], dtype=np.dtype([('a', 'U25')]))[0]
jit_fc = njit(self.func)
jit_fc(self.a_rec1)
jit_fc.disable_compile()
with self.assertRaises(TypeError) as err:
jit_fc(other_a_rec)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_branch_pruning(self):
# test subtyping behaviour in a case with a dead branch
@njit
def foo(rec, flag=None):
n = 0
n += rec['a']
if flag is not None:
# Dead branch pruning will hide this branch
n += rec['b']
rec['b'] += 20
return n
self.assertEqual(foo(self.a_rec1), self.a_rec1[0])
# storing value because it will be mutated
k = self.ab_rec1[1]
self.assertEqual(foo(self.ab_rec1, flag=1), self.ab_rec1[0] + k)
self.assertEqual(self.ab_rec1[1], k + 20)
foo.disable_compile()
self.assertEqual(len(foo.nopython_signatures), 2)
self.assertEqual(foo(self.a_rec1) + 1, foo(self.ab_rec1))
self.assertEqual(foo(self.ab_rec1, flag=1), self.ab_rec1[0] + k + 20)
| TestSubtyping |
python | pydata__xarray | xarray/tests/test_plugins.py | {
"start": 937,
"end": 10026
} | class ____(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder): # type: ignore[override]
pass
@pytest.fixture
def dummy_duplicated_entrypoints():
specs = [
["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine1", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
]
eps = list(starmap(EntryPoint, specs))
return eps
@pytest.mark.filterwarnings("ignore:Found")
def test_remove_duplicates(dummy_duplicated_entrypoints) -> None:
with pytest.warns(RuntimeWarning):
entrypoints = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(entrypoints) == 2
def test_broken_plugin() -> None:
broken_backend = EntryPoint(
"broken_backend",
"xarray.tests.test_plugins:backend_1",
"xarray.backends",
)
with pytest.warns(RuntimeWarning) as record:
_ = plugins.build_engines(EntryPoints([broken_backend]))
assert len(record) == 1
message = str(record[0].message)
assert "Engine 'broken_backend'" in message
def test_remove_duplicates_warnings(dummy_duplicated_entrypoints) -> None:
with pytest.warns(RuntimeWarning) as record:
_ = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(record) == 2
message0 = str(record[0].message)
message1 = str(record[1].message)
assert "entrypoints" in message0
assert "entrypoints" in message1
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load", mock.MagicMock(return_value=None)
)
def test_backends_dict_from_pkg() -> None:
specs = [
["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
]
entrypoints = list(starmap(EntryPoint, specs))
engines = plugins.backends_dict_from_pkg(entrypoints)
assert len(engines) == 2
assert engines.keys() == {"engine1", "engine2"}
def test_set_missing_parameters() -> None:
backend_1 = DummyBackendEntrypoint1
backend_2 = DummyBackendEntrypoint2
backend_2.open_dataset_parameters = ("filename_or_obj",)
engines = {"engine_1": backend_1, "engine_2": backend_2}
plugins.set_missing_parameters(engines)
assert len(engines) == 2
assert backend_1.open_dataset_parameters == ("filename_or_obj", "decoder")
assert backend_2.open_dataset_parameters == ("filename_or_obj",)
backend_kwargs = DummyBackendEntrypointKwargs
backend_kwargs.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend_kwargs})
assert backend_kwargs.open_dataset_parameters == ("filename_or_obj", "decoder")
backend_args = DummyBackendEntrypointArgs
backend_args.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend_args})
assert backend_args.open_dataset_parameters == ("filename_or_obj", "decoder")
# reset
backend_1.open_dataset_parameters = None
backend_1.open_dataset_parameters = None
backend_kwargs.open_dataset_parameters = None
backend_args.open_dataset_parameters = None
def test_set_missing_parameters_raise_error() -> None:
backend = DummyBackendEntrypointKwargs
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend})
backend_args = DummyBackendEntrypointArgs
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend_args})
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines() -> None:
dummy_pkg_entrypoint = EntryPoint(
"dummy", "xarray.tests.test_plugins:backend_1", "xarray_backends"
)
backend_entrypoints = plugins.build_engines(EntryPoints([dummy_pkg_entrypoint]))
assert isinstance(backend_entrypoints["dummy"], DummyBackendEntrypoint1)
assert backend_entrypoints["dummy"].open_dataset_parameters == (
"filename_or_obj",
"decoder",
)
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines_sorted() -> None:
dummy_pkg_entrypoints = EntryPoints(
[
EntryPoint(
"dummy2", "xarray.tests.test_plugins:backend_1", "xarray.backends"
),
EntryPoint(
"dummy1", "xarray.tests.test_plugins:backend_1", "xarray.backends"
),
]
)
backend_entrypoints = list(plugins.build_engines(dummy_pkg_entrypoints))
indices = []
for be in OPTIONS["netcdf_engine_order"]:
try:
index = backend_entrypoints.index(be)
backend_entrypoints.pop(index)
indices.append(index)
except ValueError:
pass
assert set(indices) < {0, -1}
assert list(backend_entrypoints) == sorted(backend_entrypoints)
@mock.patch(
"xarray.backends.plugins.list_engines",
mock.MagicMock(return_value={"dummy": DummyBackendEntrypointArgs()}),
)
def test_no_matching_engine_found() -> None:
with pytest.raises(ValueError, match=r"did not find a match in any"):
plugins.guess_engine("not-valid")
with pytest.raises(ValueError, match=r"found the following matches with the input"):
plugins.guess_engine("foo.nc")
@mock.patch(
"xarray.backends.plugins.list_engines",
mock.MagicMock(return_value={}),
)
def test_engines_not_installed() -> None:
with pytest.raises(ValueError, match=r"xarray is unable to open"):
plugins.guess_engine("not-valid")
with pytest.raises(ValueError, match=r"found the following matches with the input"):
plugins.guess_engine("foo.nc")
def test_lazy_import() -> None:
"""Test that some modules are imported in a lazy manner.
When importing xarray these should not be imported as well.
Only when running code for the first time that requires them.
"""
deny_list = [
"cubed",
"cupy",
# "dask", # TODO: backends.locks is not lazy yet :(
"dask.array",
"dask.distributed",
"flox",
"h5netcdf",
"matplotlib",
"nc_time_axis",
"netCDF4",
"numbagg",
"pint",
"pydap",
# "scipy", # TODO: xarray.backends.scipy_ is currently not lazy
"sparse",
"zarr",
]
# ensure that none of the above modules has been imported before
modules_backup = {}
for pkg in list(sys.modules.keys()):
for mod in deny_list + ["xarray"]:
if pkg.startswith(mod):
modules_backup[pkg] = sys.modules[pkg]
del sys.modules[pkg]
break
try:
import xarray # noqa: F401
from xarray.backends import list_engines
list_engines()
# ensure that none of the modules that are supposed to be
# lazy loaded are loaded when importing xarray
is_imported = set()
for pkg in sys.modules:
for mod in deny_list:
if pkg.startswith(mod):
is_imported.add(mod)
break
assert len(is_imported) == 0, (
f"{is_imported} have been imported but should be lazy"
)
finally:
# restore original
sys.modules.update(modules_backup)
def test_list_engines() -> None:
from xarray.backends import list_engines
engines = list_engines()
assert list_engines.cache_info().currsize == 1
assert ("scipy" in engines) == has_scipy
assert ("h5netcdf" in engines) == has_h5netcdf
assert ("netcdf4" in engines) == has_netCDF4
assert ("pydap" in engines) == has_pydap
assert ("zarr" in engines) == has_zarr
assert "store" in engines
def test_refresh_engines() -> None:
from xarray.backends import list_engines, refresh_engines
EntryPointMock1 = mock.MagicMock()
EntryPointMock1.name = "test1"
EntryPointMock1.load.return_value = DummyBackendEntrypoint1
return_value = EntryPoints([EntryPointMock1])
with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value):
list_engines.cache_clear()
engines = list_engines()
assert "test1" in engines
assert isinstance(engines["test1"], DummyBackendEntrypoint1)
EntryPointMock2 = mock.MagicMock()
EntryPointMock2.name = "test2"
EntryPointMock2.load.return_value = DummyBackendEntrypoint2
return_value2 = EntryPoints([EntryPointMock2])
with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value2):
refresh_engines()
engines = list_engines()
assert "test1" not in engines
assert "test2" in engines
assert isinstance(engines["test2"], DummyBackendEntrypoint2)
# reset to original
refresh_engines()
| DummyBackendEntrypoint2 |
python | encode__django-rest-framework | rest_framework/authtoken/migrations/0002_auto_20160226_1747.py | {
"start": 76,
"end": 994
} | class ____(migrations.Migration):
dependencies = [
('authtoken', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='token',
options={'verbose_name_plural': 'Tokens', 'verbose_name': 'Token'},
),
migrations.AlterField(
model_name='token',
name='created',
field=models.DateTimeField(verbose_name='Created', auto_now_add=True),
),
migrations.AlterField(
model_name='token',
name='key',
field=models.CharField(verbose_name='Key', max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='token',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, verbose_name='User', related_name='auth_token', on_delete=models.CASCADE),
),
]
| Migration |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/operators/test_search_ads.py | {
"start": 5527,
"end": 6569
} | class ____:
@mock.patch(
"airflow.providers.google.marketing_platform.operators.search_ads.GoogleSearchAdsReportingHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.search_ads.BaseOperator")
def test_execute(self, mock_base_op, hook_mock):
customs_columns = [
{"id": "custom_column_id_1"},
{"id": "custom_column_id_2"},
{"id": "custom_column_id_3"},
]
hook_mock.return_value.list_custom_columns.return_value = {"customColumns": customs_columns}
op = GoogleSearchAdsListCustomColumnsOperator(
customer_id=CUSTOMER_ID,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version="v0",
)
hook_mock.return_value.list_custom_columns.assert_called_once_with(
customer_id=CUSTOMER_ID,
)
| TestGoogleSearchAdsListCustomColumnsOperator |
python | sphinx-doc__sphinx | sphinx/pycode/parser.py | {
"start": 4553,
"end": 5959
} | class ____:
def __init__(self, buffers: list[str]) -> None:
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
self.current: Token | None = None
self.previous: Token | None = None
def get_line(self, lineno: int) -> str:
"""Returns specified line."""
return self.buffers[lineno - 1]
def fetch_token(self) -> Token | None:
"""Fetch the next token from source code.
Returns ``None`` if sequence finished.
"""
try:
self.previous = self.current
self.current = Token(*next(self.tokens))
except StopIteration:
self.current = None
return self.current
def fetch_until(self, condition: Any) -> list[Token]:
"""Fetch tokens until specified token appeared.
.. note:: This also handles parenthesis well.
"""
tokens = []
while current := self.fetch_token():
tokens.append(current)
if current == condition:
break
if current == [OP, '(']:
tokens += self.fetch_until([OP, ')'])
elif current == [OP, '{']:
tokens += self.fetch_until([OP, '}'])
elif current == [OP, '[']:
tokens += self.fetch_until([OP, ']'])
return tokens
| TokenProcessor |
python | keras-team__keras | keras/src/layers/core/embedding.py | {
"start": 418,
"end": 21326
} | class ____(Layer):
"""Turns nonnegative integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on nonnegative integer inputs of a fixed range.
Example:
>>> model = keras.Sequential()
>>> model.add(keras.layers.Embedding(1000, 64))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
This is useful when using recurrent layers which
may take variable length input. If this is `True`,
then all subsequent layers in the model need
to support masking or an exception will be raised.
If `mask_zero` is set to `True`, as a consequence,
index 0 cannot be used in the vocabulary (`input_dim` should
equal size of vocabulary + 1).
weights: Optional floating-point matrix of size
`(input_dim, output_dim)`. The initial embeddings values
to use.
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's embeddings
matrix to non-trainable and replaces it with a delta over the
original matrix, obtained via multiplying two lower-rank
trainable matrices. This can be useful to reduce the
computation cost of fine-tuning large embedding layers.
You can also enable LoRA on an existing
`Embedding` layer by calling `layer.enable_lora(rank)`.
lora_alpha: Optional integer. If set, this parameter scales the
low-rank adaptation delta (computed as the product of two lower-rank
trainable matrices) during the forward pass. The delta is scaled by
`lora_alpha / lora_rank`, allowing you to fine-tune the strength of
the LoRA adjustment independently of `lora_rank`.
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
weights=None,
lora_rank=None,
lora_alpha=None,
**kwargs,
):
input_length = kwargs.pop("input_length", None)
if input_length is not None:
warnings.warn(
"Argument `input_length` is deprecated. Just remove it."
)
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.autocast = False
self.lora_rank = lora_rank
self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank
self.lora_enabled = False
if weights is not None:
self.build()
if not (isinstance(weights, list) and len(weights) == 1):
weights = [weights]
self.set_weights(weights)
def build(self, input_shape=None):
if self.built:
return
embeddings_shape = (self.input_dim, self.output_dim)
if self.quantization_mode:
self.quantized_build(embeddings_shape, mode=self.quantization_mode)
if self.quantization_mode not in ("int8", "int4"):
self._embeddings = self.add_weight(
shape=embeddings_shape,
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
trainable=True,
)
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank)
@property
def embeddings(self):
if not self.built:
raise AttributeError(
"You must build the layer before accessing `embeddings`."
)
embeddings = self._embeddings
if self.quantization_mode == "int4":
embeddings = quantizers.unpack_int4(
embeddings, self._orig_output_dim, axis=-1
)
if self.lora_enabled:
return embeddings + (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_embeddings_a, self.lora_embeddings_b
)
return embeddings
def call(self, inputs):
if inputs.dtype != "int32" and inputs.dtype != "int64":
inputs = ops.cast(inputs, "int32")
outputs = ops.take(self.embeddings, inputs, axis=0)
return ops.cast(outputs, dtype=self.compute_dtype)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
def compute_output_shape(self, input_shape):
return (*input_shape, self.output_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
ragged = getattr(inputs, "ragged", False)
return KerasTensor(
output_shape, dtype=self.compute_dtype, ragged=ragged
)
def enable_lora(
self,
rank,
lora_alpha=None,
a_initializer="he_uniform",
b_initializer="zeros",
):
if self.embeddings_constraint:
raise ValueError(
"Lora is incompatible with embedding constraints. "
"In order to enable lora on this layer, remove the "
"`embeddings_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. This can only be done once per layer."
)
self._tracker.unlock()
self.lora_embeddings_a = self.add_weight(
name="lora_embeddings_a",
shape=(self.input_dim, rank),
initializer=initializers.get(a_initializer),
regularizer=self.embeddings_regularizer,
)
self.lora_embeddings_b = self.add_weight(
name="lora_embeddings_b",
shape=(rank, self.output_dim),
initializer=initializers.get(b_initializer),
regularizer=self.embeddings_regularizer,
)
self.embeddings.trainable = False
self._tracker.lock()
self.lora_enabled = True
self.lora_rank = rank
self.lora_alpha = lora_alpha if lora_alpha is not None else rank
def save_own_variables(self, store):
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# Embeddings plus optional merged LoRA-aware scale
# (returns (embeddings, None) for `None` mode).
embeddings_value, merged_kernel_scale = (
self._get_embeddings_with_merged_lora()
)
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "embeddings":
store[str(idx)] = embeddings_value
elif name == "embeddings_scale" and mode in ("int4", "int8"):
# For int4/int8, the merged LoRA scale (if any) comes from
# `_get_embeddings_with_merged_lora()`
store[str(idx)] = merged_kernel_scale
else:
store[str(idx)] = getattr(self, name)
idx += 1
def load_own_variables(self, store):
if not self.lora_enabled:
self._check_load_own_variables(store)
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "embeddings":
self._embeddings.assign(store[str(idx)])
else:
getattr(self, name).assign(store[str(idx)])
idx += 1
if self.lora_enabled:
self.lora_embeddings_a.assign(
ops.zeros(self.lora_embeddings_a.shape)
)
self.lora_embeddings_b.assign(
ops.zeros(self.lora_embeddings_b.shape)
)
def get_config(self):
base_config = super().get_config()
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
}
if self.lora_rank:
config["lora_rank"] = self.lora_rank
config["lora_alpha"] = self.lora_alpha
return {**base_config, **config}
def _quantization_mode_error(self, mode):
return NotImplementedError(
"Invalid quantization mode. Expected one of ('int8', 'int4'). "
f"Received: quantization_mode={mode}"
)
@property
def variable_serialization_spec(self):
"""Returns a dict mapping quantization modes to variable names in order.
This spec is used by `save_own_variables` and `load_own_variables` to
determine the correct ordering of variables during serialization for
each quantization mode. `None` means no quantization.
"""
return {
None: [
"embeddings",
],
"int8": [
"embeddings",
"embeddings_scale",
],
"int4": [
"embeddings",
"embeddings_scale",
],
}
def quantized_build(self, embeddings_shape, mode):
if mode == "int8":
self._int8_build(embeddings_shape)
elif mode == "int4":
self._int4_build(embeddings_shape)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, embeddings_shape):
self._embeddings = self.add_weight(
name="embeddings",
shape=embeddings_shape,
initializer="zeros",
dtype="int8",
trainable=False,
)
# We choose to reduce the axis of `output_dim` because, typically,
# `input_dim` is larger than `output_dim`. This reduces quantization
# error.
self.embeddings_scale = self.add_weight(
name="embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int4_build(self, embeddings_shape):
input_dim, output_dim = embeddings_shape
packed_rows = (output_dim + 1) // 2 # ceil for odd dims
# Embeddings are stored *packed*: each int8 byte contains two int4
# values.
self._embeddings = self.add_weight(
name="embeddings",
shape=(input_dim, packed_rows),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.embeddings_scale = self.add_weight(
name="embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
# Record original output_dim for unpacking at runtime.
self._orig_output_dim = output_dim
def _int8_call(self, inputs, training=None):
# We cannot update quantized self._embeddings, so the custom gradient is
# not needed
if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"):
inputs = ops.cast(inputs, "int32")
embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0)
outputs = ops.take(self._embeddings, inputs, axis=0)
# De-scale outputs
outputs = ops.divide(
ops.cast(outputs, dtype=self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
if self.lora_enabled:
lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0)
lora_outputs = ops.matmul(lora_outputs, self.lora_embeddings_b)
outputs = ops.add(
outputs, (self.lora_alpha / self.lora_rank) * lora_outputs
)
return outputs
def _int4_call(self, inputs, training=None):
# We cannot update quantized self._embeddings, so the custom gradient is
# not needed
if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"):
inputs = ops.cast(inputs, "int32")
embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0)
unpacked_embeddings = quantizers.unpack_int4(
self._embeddings, self._orig_output_dim, axis=-1
)
outputs = ops.take(unpacked_embeddings, inputs, axis=0)
# De-scale outputs
outputs = ops.divide(
ops.cast(outputs, dtype=self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
if self.lora_enabled:
lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0)
lora_outputs = ops.matmul(lora_outputs, self.lora_embeddings_b)
outputs = ops.add(
outputs, (self.lora_alpha / self.lora_rank) * lora_outputs
)
return outputs
def quantize(self, mode, type_check=True, config=None):
# Prevent quantization of the subclasses.
if type_check and (type(self) is not Embedding):
raise self._not_implemented_error(self.quantize)
embeddings_shape = (self.input_dim, self.output_dim)
if mode == "int8":
# Quantize `self._embeddings` to int8 and compute corresponding
# scale.
embeddings_value, embeddings_scale = quantizers.abs_max_quantize(
self._embeddings, axis=-1, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
del self._embeddings
self.quantized_build(embeddings_shape, mode)
self._embeddings.assign(embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
elif mode == "int4":
# Quantize to int4 values (stored in int8 dtype, range [-8, 7]).
embeddings_value, embeddings_scale = quantizers.abs_max_quantize(
self._embeddings,
axis=-1,
value_range=(-8, 7),
dtype="int8",
to_numpy=True,
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# 2. Pack two int4 values into a single int8 byte.
packed_embeddings_value, _, _ = quantizers.pack_int4(
embeddings_value, axis=-1
)
del self._embeddings
self.quantized_build(embeddings_shape, mode)
self._embeddings.assign(packed_embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy.
if self.dtype_policy.quantization_mode is None:
policy = dtype_policies.get(f"{mode}_from_{self.dtype_policy.name}")
self.dtype_policy = policy
def _get_embeddings_with_merged_lora(self):
"""Returns the embeddings with LoRA matrices merged, for serialization.
This method is called by `save_own_variables` to produce a single
embeddings tensor that includes the adaptations from LoRA. This is
useful for deploying the model or for continuing training after
permanently applying the LoRA update.
If the layer is quantized (`int8` or `int4`), the process is:
1. Dequantize the base embeddings to float.
2. Compute the LoRA delta (`lora_embeddings_a @ lora_embeddings_b`) and
add it to the dequantized embeddings.
3. Re-quantize the merged result back to the original quantized
type (`int8` or packed `int4`), calculating a new scale factor.
If the layer is not quantized, this method returns the result of the
`embeddings` property (which computes the merge in floating-point) and a
scale of `None`.
If LoRA is not enabled, it returns the original embeddings and scale
without modification.
Returns:
A tuple `(embeddings_value, embeddings_scale)`:
`embeddings_value`: The merged embeddings. A quantized tensor if
quantization is active, otherwise a high precision tensor.
`embeddings_scale`: The quantization scale for the merged
embeddings. This is `None` if the layer is not quantized.
"""
if self.dtype_policy.quantization_mode in (None, "gptq"):
return self.embeddings, None
embeddings_value = self._embeddings
embeddings_scale = self.embeddings_scale
if not self.lora_enabled:
return embeddings_value, embeddings_scale
# Dequantize embeddings to float.
if self.quantization_mode == "int4":
unpacked_embeddings = quantizers.unpack_int4(
embeddings_value, self._orig_output_dim, axis=-1
)
float_embeddings = ops.divide(
ops.cast(unpacked_embeddings, self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
quant_range = (-8, 7)
elif self.quantization_mode == "int8":
float_embeddings = ops.divide(
ops.cast(embeddings_value, self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
quant_range = (-127, 127)
else:
raise ValueError(
f"Unsupported quantization mode: {self.quantization_mode}"
)
# Merge LoRA weights in float domain.
lora_delta = (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_embeddings_a, self.lora_embeddings_b
)
merged_float_embeddings = ops.add(float_embeddings, lora_delta)
# Requantize.
requantized_embeddings, embeddings_scale = quantizers.abs_max_quantize(
merged_float_embeddings,
axis=-1,
value_range=quant_range,
dtype="int8",
to_numpy=True,
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# Pack if int4.
if self.quantization_mode == "int4":
embeddings_value, _, _ = quantizers.pack_int4(
requantized_embeddings, axis=-1
)
else:
embeddings_value = requantized_embeddings
return embeddings_value, embeddings_scale
| Embedding |
python | graphql-python__graphene | graphene/relay/tests/test_custom_global_id.py | {
"start": 2491,
"end": 4706
} | class ____:
def setup_method(self):
self.user_list = [
{"id": "my global primary key in clear 1", "name": "First"},
{"id": "my global primary key in clear 2", "name": "Second"},
{"id": "my global primary key in clear 3", "name": "Third"},
{"id": "my global primary key in clear 4", "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomNode(Node):
class Meta:
global_id_type = SimpleGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
self.graphql_schema = self.schema.graphql_schema
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert (
'"""The ID of the object"""\n id: ID!'
== fields[types.index(custom_node_interface)]
)
user_type = "type User implements CustomNode"
assert user_type in types
assert (
'"""The ID of the object"""\n id: ID!\n name: String'
== fields[types.index(user_type)]
)
def test_get_by_id(self):
query = """query {
user(id: "my global primary key in clear 3") {
id
name
}
}"""
result = graphql_sync(self.graphql_schema, query)
assert not result.errors
assert result.data["user"]["id"] == self.user_list[2]["id"]
assert result.data["user"]["name"] == self.user_list[2]["name"]
| TestSimpleGlobalID |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 24202,
"end": 28016
} | class ____(base_classes.Book):
def __init__(self, xl):
self.xl = xl
@property
def api(self):
return self.xl
def json(self):
raise NotImplementedError()
@property
def name(self):
return self.xl.Name
@property
def sheets(self):
return Sheets(xl=self.xl.Worksheets)
@property
def app(self):
return App(xl=self.xl.Application)
def close(self):
self.xl.Close(SaveChanges=False)
def save(self, path=None, password=None):
saved_path = self.xl.Path
source_ext = os.path.splitext(self.name)[1] if saved_path else None
target_ext = os.path.splitext(path)[1] if path else ".xlsx"
if saved_path and source_ext == target_ext:
file_format = self.xl.FileFormat
else:
ext_to_file_format = {
".xlsx": FileFormat.xlOpenXMLWorkbook,
".xlsm": FileFormat.xlOpenXMLWorkbookMacroEnabled,
".xlsb": FileFormat.xlExcel12,
".xltm": FileFormat.xlOpenXMLTemplateMacroEnabled,
".xltx": FileFormat.xlOpenXMLTemplateMacroEnabled,
".xlam": FileFormat.xlOpenXMLAddIn,
".xls": FileFormat.xlWorkbookNormal,
".xlt": FileFormat.xlTemplate,
".xla": FileFormat.xlAddIn,
".html": FileFormat.xlHtml,
}
file_format = ext_to_file_format[target_ext]
if (saved_path != "") and (path is None):
# Previously saved: Save under existing name
self.xl.Save()
elif (
(saved_path != "") and (path is not None) and (os.path.split(path)[0] == "")
):
# Save existing book under new name in cwd if no path has been provided
path = os.path.join(os.getcwd(), path)
self.xl.SaveAs(
os.path.realpath(path), FileFormat=file_format, Password=password
)
elif (saved_path == "") and (path is None):
# Previously unsaved: Save under current name in current working directory
path = os.path.join(os.getcwd(), self.xl.Name + ".xlsx")
alerts_state = self.xl.Application.DisplayAlerts
self.xl.Application.DisplayAlerts = False
self.xl.SaveAs(
os.path.realpath(path), FileFormat=file_format, Password=password
)
self.xl.Application.DisplayAlerts = alerts_state
elif path:
# Save under new name/location
alerts_state = self.xl.Application.DisplayAlerts
self.xl.Application.DisplayAlerts = False
self.xl.SaveAs(
os.path.realpath(path), FileFormat=file_format, Password=password
)
self.xl.Application.DisplayAlerts = alerts_state
@property
def fullname(self):
if "://" in self.xl.FullName:
config = read_config_sheet(xlwings.Book(impl=self))
return fullname_url_to_local_path(
url=self.xl.FullName,
sheet_onedrive_consumer_config=config.get("ONEDRIVE_CONSUMER_WIN"),
sheet_onedrive_commercial_config=config.get("ONEDRIVE_COMMERCIAL_WIN"),
sheet_sharepoint_config=config.get("SHAREPOINT_WIN"),
)
else:
return self.xl.FullName
@property
def names(self):
return Names(xl=self.xl.Names)
def activate(self):
self.xl.Activate()
def to_pdf(self, path, quality):
self.xl.ExportAsFixedFormat(
Type=FixedFormatType.xlTypePDF,
Filename=path,
Quality=quality_types[quality],
IncludeDocProperties=True,
IgnorePrintAreas=False,
OpenAfterPublish=False,
)
| Book |
python | apache__airflow | providers/apache/druid/tests/unit/apache/druid/hooks/test_druid.py | {
"start": 16121,
"end": 21102
} | class ____:
def setup_method(self):
self.cur = MagicMock(rowcount=0)
self.conn = conn = MagicMock()
self.conn.host = "host"
self.conn.port = "1000"
self.conn.schema = None
self.conn.conn_type = "druid"
self.conn.extra_dejson = {"endpoint": "druid/v2/sql"}
self.conn.cursor.return_value = self.cur
class TestDruidDBApiHook(DruidDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestDruidDBApiHook
@patch("airflow.providers.apache.druid.hooks.druid.DruidDbApiHook.get_connection")
@patch("airflow.providers.apache.druid.hooks.druid.connect")
@pytest.mark.parametrize(
("specified_context", "passed_context"),
[
(None, {}),
({"query_origin": "airflow"}, {"query_origin": "airflow"}),
],
)
def test_get_conn_with_context(
self, mock_connect, mock_get_connection, specified_context, passed_context
):
get_conn_value = MagicMock()
get_conn_value.host = "test_host"
get_conn_value.conn_type = "https"
get_conn_value.login = "test_login"
get_conn_value.password = "test_password"
get_conn_value.port = 10000
get_conn_value.extra_dejson = {"endpoint": "/test/endpoint", "schema": "https"}
mock_get_connection.return_value = get_conn_value
hook = DruidDbApiHook(context=specified_context)
hook.get_conn()
mock_connect.assert_called_with(
host="test_host",
port=10000,
path="/test/endpoint",
scheme="https",
user="test_login",
password="test_password",
context=passed_context,
ssl_verify_cert=True,
)
@patch("airflow.providers.apache.druid.hooks.druid.DruidDbApiHook.get_connection")
@patch("airflow.providers.apache.druid.hooks.druid.connect")
def test_get_conn_respects_ssl_verify_cert(self, mock_connect, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.host = "test_host"
get_conn_value.conn_type = "https"
get_conn_value.login = "test_login"
get_conn_value.password = "test_password"
get_conn_value.port = 10000
get_conn_value.extra_dejson = {
"endpoint": "/test/endpoint",
"schema": "https",
"ssl_verify_cert": False,
}
mock_get_connection.return_value = get_conn_value
hook = DruidDbApiHook()
hook.get_conn()
mock_connect.assert_called_with(
host="test_host",
port=10000,
path="/test/endpoint",
scheme="https",
user="test_login",
password="test_password",
context={},
ssl_verify_cert=False,
)
def test_get_uri(self):
db_hook = self.db_hook()
assert db_hook.get_uri() == "druid://host:1000/druid/v2/sql"
def test_get_first_record(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_df_pandas(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_df(statement, df_type="pandas")
assert column == df.columns[0]
for i, item in enumerate(result_sets):
assert item[0] == df.values.tolist()[i][0]
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_df_polars(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
mock_execute = MagicMock()
mock_execute.description = [(column, None, None, None, None, None, None)]
mock_execute.fetchall.return_value = result_sets
self.cur.execute.return_value = mock_execute
df = self.db_hook().get_df(statement, df_type="polars")
assert column == df.columns[0]
assert result_sets[0][0] == df.row(0)[0]
assert result_sets[1][0] == df.row(1)[0]
| TestDruidDbApiHook |
python | skorch-dev__skorch | skorch/tests/callbacks/test_logging.py | {
"start": 15265,
"end": 22499
} | class ____:
@pytest.fixture
def print_log_cls(self):
from skorch.callbacks import PrintLog
keys_ignored = ['dur', 'event_odd']
return partial(PrintLog, sink=Mock(), keys_ignored=keys_ignored)
@pytest.fixture
def print_log(self, print_log_cls):
return print_log_cls().initialize()
@pytest.fixture
def scoring_cls(self):
from skorch.callbacks import EpochScoring
return EpochScoring
@pytest.fixture
def mse_scoring(self, scoring_cls):
return scoring_cls(
'neg_mean_squared_error',
name='nmse',
).initialize()
@pytest.fixture
def odd_epoch_callback(self):
from skorch.callbacks import Callback
class OddEpochCallback(Callback):
def on_epoch_end(self, net, **kwargs):
net.history[-1]['event_odd'] = bool(len(net.history) % 2)
return OddEpochCallback().initialize()
@pytest.fixture
def net(self, net_cls, module_cls, train_split, mse_scoring,
odd_epoch_callback, print_log, data):
net = net_cls(
module_cls, batch_size=1, train_split=train_split,
callbacks=[mse_scoring, odd_epoch_callback], max_epochs=2)
net.initialize()
# replace default PrintLog with test PrintLog
net.callbacks_[-1] = ('print_log', print_log)
return net.partial_fit(*data)
@pytest.fixture
def history(self, net):
return net.history
# pylint: disable=unused-argument
@pytest.fixture
def sink(self, history, print_log):
# note: the history fixture is required even if not used because it
# triggers the calls on print_log
return print_log.sink
@pytest.fixture
def ansi(self):
from skorch.utils import Ansi
return Ansi
def test_call_count(self, sink):
# header + lines + 2 epochs
assert sink.call_count == 4
def test_header(self, sink):
header = sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss']
assert columns == expected
def test_lines(self, sink):
lines = sink.call_args_list[1][0][0].split()
# Lines have length 2 + length of column, or 8 if the column
# name is short and the values are floats.
expected = [
'-' * (len('epoch') + 2),
'-' * 8,
'-' * (len('train_loss') + 2),
'-' * (len('valid_loss') + 2),
]
assert lines
assert lines == expected
@pytest.mark.parametrize('epoch', [0, 1])
def test_first_row(self, sink, ansi, epoch, history):
row = sink.call_args_list[epoch + 2][0][0]
items = row.split()
# epoch, nmse, valid, train
assert len(items) == 4
# epoch, starts at 1
assert items[0] == str(epoch + 1)
# is best
are_best = [
history[epoch, 'nmse_best'],
history[epoch, 'train_loss_best'],
history[epoch, 'valid_loss_best'],
]
# test that cycled colors are used if best
for item, color, is_best in zip(items[1:], list(ansi)[1:], are_best):
if is_best:
# if best, text colored
assert item.startswith(color.value)
assert item.endswith(ansi.ENDC.value)
else:
# if not best, text is only float, so converting possible
float(item)
def test_args_passed_to_tabulate(self, history):
with patch('skorch.callbacks.logging.tabulate') as tab:
from skorch.callbacks import PrintLog
print_log = PrintLog(
tablefmt='latex',
floatfmt='.9f',
).initialize()
print_log.table(history[-1])
assert tab.call_count == 1
assert tab.call_args_list[0][1]['tablefmt'] == 'latex'
assert tab.call_args_list[0][1]['floatfmt'] == '.9f'
def test_with_additional_key(self, history, print_log_cls):
keys_ignored = ['event_odd'] # 'dur' no longer ignored
print_log = print_log_cls(
sink=Mock(), keys_ignored=keys_ignored).initialize()
# does not raise
print_log.on_epoch_end(Mock(history=history))
header = print_log.sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss', 'dur']
assert columns == expected
def test_keys_ignored_as_str(self, print_log_cls):
print_log = print_log_cls(keys_ignored='a-key').initialize()
assert print_log.keys_ignored_ == {'a-key', 'batches'}
print_log.initialize()
assert print_log.keys_ignored_ == set(['a-key', 'batches'])
def test_keys_ignored_is_None(self, print_log_cls):
print_log = print_log_cls(keys_ignored=None)
assert print_log.keys_ignored is None
print_log.initialize()
assert print_log.keys_ignored_ == set(['batches'])
def test_with_event_key(self, history, print_log_cls):
print_log = print_log_cls(sink=Mock(), keys_ignored=None).initialize()
# history has two epochs, write them one by one
print_log.on_epoch_end(Mock(history=history[:-1]))
print_log.on_epoch_end(Mock(history=history))
header = print_log.sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss', 'odd', 'dur']
assert columns == expected
odd_row = print_log.sink.call_args_list[2][0][0].split()
even_row = print_log.sink.call_args_list[3][0][0].split()
assert len(odd_row) == 6 # odd row has entries in every column
assert odd_row[4] == '+' # including '+' sign for the 'event_odd'
assert len(even_row) == 5 # even row does not have 'event_odd' entry
def test_witout_valid_data(
self, net_cls, module_cls, mse_scoring, print_log, data):
net = net_cls(
module_cls, batch_size=1, train_split=None,
callbacks=[mse_scoring], max_epochs=2)
net.initialize()
# replace default PrintLog with test PrintLog
net.callbacks_[-1] = ('print_log', print_log)
net.partial_fit(*data)
sink = print_log.sink
row = sink.call_args_list[2][0][0]
items = row.split()
assert len(items) == 2 # no valid, only epoch and train
def test_print_not_skipped_if_verbose(self, capsys):
from skorch.callbacks import PrintLog
print_log = PrintLog().initialize()
net = Mock(history=[{'loss': 123}], verbose=1)
print_log.on_epoch_end(net)
stdout = capsys.readouterr()[0]
result = [x.strip() for x in stdout.split()]
expected = ['loss', '------', '123']
assert result == expected
def test_print_skipped_if_not_verbose(self, capsys):
from skorch.callbacks import PrintLog
print_log = PrintLog().initialize()
net = Mock(history=[{'loss': 123}], verbose=0)
print_log.on_epoch_end(net)
stdout = capsys.readouterr()[0]
assert not stdout
| TestPrintLog |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/base.py | {
"start": 4186,
"end": 4337
} | class ____(NamedTuple):
"""Pair of table-source and column-name information."""
table_source: DataSourceInfo
column_name: str
| DataSourcePair |
python | pytorch__pytorch | test/dynamo/cpython/3_13/seq_tests.py | {
"start": 3604,
"end": 3668
} | class ____(list):
def __iter__(self):
yield 1
| LyingList |
python | dask__distributed | distributed/shuffle/tests/test_merge.py | {
"start": 14213,
"end": 16652
} | class ____(_ShuffleRunManager):
seen: set[ShuffleId]
block_get_or_create: asyncio.Event
blocking_get_or_create: asyncio.Event
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.seen = set()
self.limit = 1
self.blocking_get_or_create = asyncio.Event()
self.block_get_or_create = asyncio.Event()
async def get_or_create(self, shuffle_id: ShuffleId, key: Key) -> ShuffleRun:
if len(self.seen) >= self.limit and shuffle_id not in self.seen:
self.blocking_get_or_create.set()
await self.block_get_or_create.wait()
self.seen.add(shuffle_id)
return await super().get_or_create(shuffle_id, key)
@mock.patch(
"distributed.shuffle._worker_plugin._ShuffleRunManager",
LimitedGetOrCreateShuffleRunManager,
)
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_merge_does_not_deadlock_if_worker_joins(c, s, a):
"""Regression test for https://github.com/dask/distributed/issues/8411"""
pdf1 = pd.DataFrame({"a": range(100), "b": range(0, 200, 2)})
pdf2 = pd.DataFrame({"x": range(200), "y": [1, 2, 3, 4] * 50})
df1 = dd.from_pandas(pdf1, npartitions=10)
df2 = dd.from_pandas(pdf2, npartitions=20)
run_manager_A = a.plugins["shuffle"].shuffle_runs
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
joined = dd.merge(df1, df2, left_on="a", right_on="x")
result = c.compute(joined)
await run_manager_A.blocking_get_or_create.wait()
async with Worker(s.address) as b:
run_manager_A.block_get_or_create.set()
run_manager_B = b.plugins["shuffle"].shuffle_runs
run_manager_B.block_get_or_create.set()
result = await result
expected = pd.merge(pdf1, pdf2, left_on="a", right_on="x")
assert_eq(result, expected, check_index=False)
@gen_cluster(client=True)
async def test_merge_indicator(c, s, a, b):
data = {
"id": [1, 2, 3],
"test": [4, 5, 6],
}
pdf = pd.DataFrame(data)
df = dd.from_pandas(pdf, npartitions=2)
result = df.merge(df, on="id", how="outer", indicator=True)
x = c.compute(result)
x = await x
expected = pdf.merge(pdf, on="id", how="outer", indicator=True)
pd.testing.assert_frame_equal(
x.sort_values("id", ignore_index=True),
expected.sort_values("id", ignore_index=True),
)
| LimitedGetOrCreateShuffleRunManager |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels32.py | {
"start": 315,
"end": 1980
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels32.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [71374336, 71414144]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"custom": [
{
"value": 33,
"font": {
"bold": True,
"italic": True,
"color": "red",
"baseline": -1,
},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 22534,
"end": 24722
} | class ____(IHaveNew):
"""This step input source models being directly downstream of a step with dynamic output.
Once that step completes successfully, this will resolve once per DynamicOutput.
"""
step_output_handle: StepOutputHandle
# deprecated, preserved for back-compat
node_handle: NodeHandle
input_name: str
def __new__(
cls,
step_output_handle: StepOutputHandle,
# deprecated, preserved for back-compat
node_handle: Optional[NodeHandle] = None,
input_name: Optional[str] = None,
):
# Model the unknown mapping key from known execution step
# using a StepOutputHandle with None mapping_key.
check.inst_param(step_output_handle, "step_output_handle", StepOutputHandle)
check.invariant(step_output_handle.mapping_key is None)
return super().__new__(
cls,
step_output_handle=step_output_handle,
# add placeholder values for back-compat
node_handle=node_handle or NodeHandle("", None),
input_name=input_name or "",
)
@property
def resolved_by_step_key(self) -> str:
return self.step_output_handle.step_key
@property
def resolved_by_output_name(self) -> str:
return self.step_output_handle.output_name
def resolve(self, mapping_key: str) -> FromStepOutput:
check.str_param(mapping_key, "mapping_key")
return FromStepOutput(
step_output_handle=StepOutputHandle(
step_key=self.step_output_handle.step_key,
output_name=self.step_output_handle.output_name,
mapping_key=mapping_key,
),
fan_in=False,
)
def get_step_output_handle_dep_with_placeholder(self) -> StepOutputHandle:
# None mapping_key on StepOutputHandle acts as placeholder
return self.step_output_handle
def required_resource_keys(
self, _job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str
) -> set[str]:
return set()
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
@record_custom
| FromPendingDynamicStepOutput |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 219078,
"end": 220095
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.true_divide(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
float,
)
x1_sparse = getattr(x1, "sparse", False)
x2_sparse = getattr(x2, "sparse", False)
output_sparse = x1_sparse and not x2_sparse
return KerasTensor(
output_shape, dtype=output_dtype, sparse=output_sparse
)
@keras_export(
[
"keras.ops.true_divide",
"keras.ops.numpy.true_divide",
]
)
def true_divide(x1, x2):
"""Alias for `keras.ops.divide`."""
if any_symbolic_tensors((x1, x2)):
return TrueDivide().symbolic_call(x1, x2)
return backend.numpy.true_divide(x1, x2)
| TrueDivide |
python | pandas-dev__pandas | pandas/core/computation/pytables.py | {
"start": 2456,
"end": 2743
} | class ____(Term):
def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:
assert isinstance(env, PyTablesScope), type(env)
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
| Constant |
python | cython__cython | Tools/make_dataclass_tests.py | {
"start": 9293,
"end": 9833
} | class ____(ast.NodeVisitor):
found = False
def visit_Name(self, node):
if node.id == "dataclass":
self.found = True
return self.generic_visit(node)
def generic_visit(self, node):
if self.found:
return # skip
return super().generic_visit(node)
def dataclass_in_decorators(decorator_list):
finder = DataclassInDecorators()
for dec in decorator_list:
finder.visit(dec)
if finder.found:
return True
return False
| DataclassInDecorators |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 44614,
"end": 53014
} | class ____:
"""A container of other elements.
Elements within a Block can be inspected and interacted with. This follows
the same syntax as inspecting and interacting within an ``AppTest`` object.
For all container classes, parameters of the original element can be
obtained as properties. For example, ``ChatMessage.avatar`` and
``Tab.label``.
"""
type: str
children: dict[int, Node]
proto: Any = field(repr=False)
root: ElementTree = field(repr=False)
def __init__(
self,
proto: BlockProto | None,
root: ElementTree,
) -> None:
self.children = {}
self.proto = proto
if proto:
ty = proto.WhichOneof("type")
if ty is not None:
self.type = ty
else:
# `st.container` has no sub-message
self.type = "container"
else:
self.type = "unknown"
self.root = root
def __len__(self) -> int:
return len(self.children)
def __iter__(self) -> Iterator[Node]:
yield self
for child_idx in self.children:
yield from self.children[child_idx]
def __getitem__(self, k: int) -> Node:
return self.children[k]
@property
def key(self) -> str | None:
return None
# We could implement these using __getattr__ but that would have
# much worse type information.
@property
def button(self) -> WidgetList[Button]:
return WidgetList(self.get("button")) # type: ignore
@property
def button_group(self) -> WidgetList[ButtonGroup[Any]]:
return WidgetList(self.get("button_group")) # type: ignore
@property
def caption(self) -> ElementList[Caption]:
return ElementList(self.get("caption")) # type: ignore
@property
def chat_input(self) -> WidgetList[ChatInput]:
return WidgetList(self.get("chat_input")) # type: ignore
@property
def chat_message(self) -> Sequence[ChatMessage]:
return self.get("chat_message") # type: ignore
@property
def checkbox(self) -> WidgetList[Checkbox]:
return WidgetList(self.get("checkbox")) # type: ignore
@property
def code(self) -> ElementList[Code]:
return ElementList(self.get("code")) # type: ignore
@property
def color_picker(self) -> WidgetList[ColorPicker]:
return WidgetList(self.get("color_picker")) # type: ignore
@property
def columns(self) -> Sequence[Column]:
return self.get("column") # type: ignore
@property
def dataframe(self) -> ElementList[Dataframe]:
return ElementList(self.get("arrow_data_frame")) # type: ignore
@property
def date_input(self) -> WidgetList[DateInput]:
return WidgetList(self.get("date_input")) # type: ignore
@property
def datetime_input(self) -> WidgetList[DateTimeInput]:
return WidgetList(self.get("date_time_input")) # type: ignore
@property
def divider(self) -> ElementList[Divider]:
return ElementList(self.get("divider")) # type: ignore
@property
def error(self) -> ElementList[Error]:
return ElementList(self.get("error")) # type: ignore
@property
def exception(self) -> ElementList[Exception]:
return ElementList(self.get("exception")) # type: ignore
@property
def expander(self) -> Sequence[Expander]:
return self.get("expander") # type: ignore
@property
def header(self) -> ElementList[Header]:
return ElementList(self.get("header")) # type: ignore
@property
def info(self) -> ElementList[Info]:
return ElementList(self.get("info")) # type: ignore
@property
def json(self) -> ElementList[Json]:
return ElementList(self.get("json")) # type: ignore
@property
def latex(self) -> ElementList[Latex]:
return ElementList(self.get("latex")) # type: ignore
@property
def markdown(self) -> ElementList[Markdown]:
return ElementList(self.get("markdown")) # type: ignore
@property
def metric(self) -> ElementList[Metric]:
return ElementList(self.get("metric")) # type: ignore
@property
def multiselect(self) -> WidgetList[Multiselect[Any]]:
return WidgetList(self.get("multiselect")) # type: ignore
@property
def number_input(self) -> WidgetList[NumberInput]:
return WidgetList(self.get("number_input")) # type: ignore
@property
def radio(self) -> WidgetList[Radio[Any]]:
return WidgetList(self.get("radio")) # type: ignore
@property
def select_slider(self) -> WidgetList[SelectSlider[Any]]:
return WidgetList(self.get("select_slider")) # type: ignore
@property
def selectbox(self) -> WidgetList[Selectbox[Any]]:
return WidgetList(self.get("selectbox")) # type: ignore
@property
def slider(self) -> WidgetList[Slider[Any]]:
return WidgetList(self.get("slider")) # type: ignore
@property
def status(self) -> Sequence[Status]:
return self.get("status") # type: ignore
@property
def subheader(self) -> ElementList[Subheader]:
return ElementList(self.get("subheader")) # type: ignore
@property
def success(self) -> ElementList[Success]:
return ElementList(self.get("success")) # type: ignore
@property
def table(self) -> ElementList[Table]:
return ElementList(self.get("arrow_table")) # type: ignore
@property
def tabs(self) -> Sequence[Tab]:
return self.get("tab") # type: ignore
@property
def text(self) -> ElementList[Text]:
return ElementList(self.get("text")) # type: ignore
@property
def text_area(self) -> WidgetList[TextArea]:
return WidgetList(self.get("text_area")) # type: ignore
@property
def text_input(self) -> WidgetList[TextInput]:
return WidgetList(self.get("text_input")) # type: ignore
@property
def time_input(self) -> WidgetList[TimeInput]:
return WidgetList(self.get("time_input")) # type: ignore
@property
def title(self) -> ElementList[Title]:
return ElementList(self.get("title")) # type: ignore
@property
def toast(self) -> ElementList[Toast]:
return ElementList(self.get("toast")) # type: ignore
@property
def toggle(self) -> WidgetList[Toggle]:
return WidgetList(self.get("toggle")) # type: ignore
@property
def warning(self) -> ElementList[Warning]:
return ElementList(self.get("warning")) # type: ignore
def get(self, element_type: str) -> Sequence[Node]:
return [e for e in self if e.type == element_type]
def run(self, *, timeout: float | None = None) -> AppTest:
"""Run the script with updated widget values.
Parameters
----------
timeout
The maximum number of seconds to run the script. None means
use the AppTest's default.
"""
return self.root.run(timeout=timeout)
def __repr__(self) -> str:
return repr_(self)
def repr_(self: object) -> str:
"""A custom repr similar to `streamlit.util.repr_` but that shows tree
structure using indentation.
"""
classname = self.__class__.__name__
defaults: list[Any] = [None, "", False, [], set(), {}]
if is_dataclass(self):
fields_vals = (
(f.name, getattr(self, f.name))
for f in fields(self)
if f.repr
and getattr(self, f.name) != f.default
and getattr(self, f.name) not in defaults
)
else:
fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults)
reprs = []
for field_name, value in fields_vals:
line = (
f"{field_name}={format_dict(value)}"
if isinstance(value, dict)
else f"{field_name}={value!r}"
)
reprs.append(line)
reprs[0] = "\n" + reprs[0]
field_reprs = ",\n".join(reprs)
field_reprs = textwrap.indent(field_reprs, " " * 4)
return f"{classname}({field_reprs}\n)"
def format_dict(d: dict[Any, Any]) -> str:
lines = []
for k, v in d.items():
line = f"{k}: {v!r}"
lines.append(line)
r = ",\n".join(lines)
r = textwrap.indent(r, " " * 4)
return f"{{\n{r}\n}}"
@dataclass(repr=False)
| Block |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/selective_checks.py | {
"start": 5133,
"end": 16055
} | class ____(dict[T, list[str]]):
def __hash__(self):
return hash(frozenset(self))
CI_FILE_GROUP_MATCHES: HashableDict[FileGroupForCi] = HashableDict(
{
FileGroupForCi.ENVIRONMENT_FILES: [
r"^.github/workflows",
r"^dev/breeze",
r"^dev/.*\.py$",
r"^Dockerfile",
r"^scripts/ci/docker-compose",
r"^scripts/ci/kubernetes",
r"^scripts/docker",
r"^scripts/in_container",
r"^generated/provider_dependencies.json$",
],
FileGroupForCi.PYTHON_PRODUCTION_FILES: [
r"^airflow-core/src/airflow/.*\.py",
r"^providers/.*\.py",
r"^pyproject.toml",
r"^hatch_build.py",
],
FileGroupForCi.JAVASCRIPT_PRODUCTION_FILES: [
r"^airflow-core/src/airflow/.*\.[jt]sx?",
r"^airflow-core/src/airflow/.*\.lock",
r"^airflow-core/src/airflow/ui/.*\.yaml$",
r"^airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/.*\.yaml$",
],
FileGroupForCi.API_FILES: [
r"^airflow-core/src/airflow/api/",
r"^airflow-core/src/airflow/api_fastapi/",
r"^airflow-core/tests/unit/api/",
r"^airflow-core/tests/unit/api_fastapi/",
],
FileGroupForCi.GIT_PROVIDER_FILES: [
r"^providers/git/src/",
],
FileGroupForCi.STANDARD_PROVIDER_FILES: [
r"^providers/standard/src/",
],
FileGroupForCi.API_CODEGEN_FILES: [
r"^airflow-core/src/airflow/api_fastapi/core_api/openapi/.*generated\.yaml",
r"^clients/gen",
],
FileGroupForCi.HELM_FILES: [
r"^chart",
r"^airflow-core/src/airflow/kubernetes",
r"^airflow-core/tests/unit/kubernetes",
r"^helm-tests",
],
FileGroupForCi.DOC_FILES: [
r"^docs",
r"^devel-common/src/docs",
r"^\.github/SECURITY\.md",
r"^airflow-core/src/.*\.py$",
r"^airflow-core/docs/",
r"^providers/.*/src/",
r"^providers/.*/tests/",
r"^providers/.*/docs/",
r"^providers-summary-docs",
r"^docker-stack-docs",
r"^chart",
r"^task-sdk/docs/",
r"^task-sdk/src/",
r"^airflow-ctl/src/",
r"^airflow-core/tests/system",
r"^airflow-ctl/src",
r"^airflow-ctl/docs",
r"^CHANGELOG\.txt",
r"^airflow-core/src/airflow/config_templates/config\.yml",
r"^chart/RELEASE_NOTES\.rst",
r"^chart/values\.schema\.json",
r"^chart/values\.json",
r"^RELEASE_NOTES\.rst",
],
FileGroupForCi.UI_FILES: [
r"^airflow-core/src/airflow/ui/",
r"^airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/",
],
FileGroupForCi.KUBERNETES_FILES: [
r"^chart",
r"^kubernetes-tests",
r"^providers/cncf/kubernetes/",
],
FileGroupForCi.ALL_PYTHON_FILES: [
r".*\.py$",
],
FileGroupForCi.ALL_AIRFLOW_PYTHON_FILES: [
r"^airflow-core/.*\.py$",
],
FileGroupForCi.ALL_AIRFLOW_CTL_PYTHON_FILES: [
r"^airflow-ctl/.*\.py$",
],
FileGroupForCi.ALL_PROVIDERS_PYTHON_FILES: [
r"^providers/.*\.py$",
],
FileGroupForCi.ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES: [
r"^providers/.*/pyproject\.toml$",
r"^providers/.*/provider\.yaml$",
],
FileGroupForCi.ALL_DEV_PYTHON_FILES: [
r"^dev/.*\.py$",
],
FileGroupForCi.ALL_DEVEL_COMMON_PYTHON_FILES: [
r"^devel-common/.*\.py$",
],
FileGroupForCi.ALL_SOURCE_FILES: [
r"^.pre-commit-config.yaml$",
r"^airflow-core/.*",
r"^airflow-ctl/.*",
r"^chart/.*",
r"^providers/.*",
r"^task-sdk/.*",
r"^devel-common/.*",
r"^kubernetes-tests/.*",
r"^docker-tests/.*",
r"^dev/.*",
],
FileGroupForCi.SYSTEM_TEST_FILES: [
r"^airflow-core/tests/system/",
],
FileGroupForCi.ALWAYS_TESTS_FILES: [
r"^airflow-core/tests/unit/always/",
],
FileGroupForCi.ALL_PROVIDER_YAML_FILES: [
r".*/provider\.yaml$",
],
FileGroupForCi.ALL_PYPROJECT_TOML_FILES: [
r".*pyproject\.toml$",
],
FileGroupForCi.TESTS_UTILS_FILES: [
r"^airflow-core/tests/unit/utils/",
r"^devel-common/.*\.py$",
],
FileGroupForCi.TASK_SDK_FILES: [
r"^task-sdk/src/airflow/sdk/.*\.py$",
r"^task-sdk/tests/.*\.py$",
],
FileGroupForCi.TASK_SDK_INTEGRATION_TEST_FILES: [
r"^task-sdk-integration-tests/.*\.py$",
],
FileGroupForCi.GO_SDK_FILES: [
r"^go-sdk/.*\.go$",
],
FileGroupForCi.ASSET_FILES: [
r"^airflow-core/src/airflow/assets/",
r"^airflow-core/src/airflow/models/assets/",
r"^airflow-core/src/airflow/datasets/",
r"^task-sdk/src/airflow/sdk/definitions/asset/",
],
FileGroupForCi.UNIT_TEST_FILES: [
r"^airflow-core/tests/unit/",
r"^task-sdk/tests/",
r"^providers/.*/tests/unit/",
r"^dev/breeze/tests/",
r"^airflow-ctl/tests/",
],
FileGroupForCi.AIRFLOW_CTL_FILES: [
r"^airflow-ctl/src/airflowctl/.*\.py$",
r"^airflow-ctl/tests/.*\.py$",
],
FileGroupForCi.DEVEL_TOML_FILES: [
r"^devel-common/pyproject\.toml$",
],
FileGroupForCi.UI_ENGLISH_TRANSLATION_FILES: [
r"^airflow-core/src/airflow/ui/public/i18n/locales/en/.*\.json$",
],
}
)
PYTHON_OPERATOR_FILES = [
r"^providers/tests/standard/operators/test_python.py",
]
TEST_TYPE_MATCHES: HashableDict[SelectiveCoreTestType] = HashableDict(
{
SelectiveCoreTestType.API: [
r"^airflow-core/src/airflow/api/",
r"^airflow-core/src/airflow/api_fastapi/",
r"^airflow-core/tests/unit/api/",
r"^airflow-core/tests/unit/api_fastapi/",
],
SelectiveCoreTestType.CLI: [
r"^airflow-core/src/airflow/cli/",
r"^airflow-core/tests/unit/cli/",
],
SelectiveProvidersTestType.PROVIDERS: [
r"^providers/.*/src/airflow/providers/",
r"^providers/.*/tests/",
],
SelectiveTaskSdkTestType.TASK_SDK: [
r"^task-sdk/src/",
r"^task-sdk/tests/",
],
SelectiveCoreTestType.SERIALIZATION: [
r"^airflow-core/src/airflow/serialization/",
r"^airflow-core/tests/unit/serialization/",
],
SelectiveAirflowCtlTestType.AIRFLOW_CTL: [
r"^airflow-ctl/src/",
r"^airflow-ctl/tests/",
],
}
)
def find_provider_affected(changed_file: str, include_docs: bool) -> str | None:
file_path = AIRFLOW_ROOT_PATH / changed_file
if not include_docs:
for parent_dir_path in file_path.parents:
if parent_dir_path.name == "docs" and (parent_dir_path.parent / "provider.yaml").exists():
# Skip Docs changes if include_docs is not set
return None
# Find if the path under src/system tests/tests belongs to provider or is a common code across
# multiple providers
for parent_dir_path in file_path.parents:
if parent_dir_path == AIRFLOW_PROVIDERS_ROOT_PATH:
# We have not found any provider specific path up to the root of the provider base folder
break
if parent_dir_path.is_relative_to(AIRFLOW_PROVIDERS_ROOT_PATH):
relative_path = parent_dir_path.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)
# check if this path belongs to a specific provider
if (parent_dir_path / "provider.yaml").exists():
# new providers structure
return str(relative_path).replace(os.sep, ".")
if file_path.is_relative_to(AIRFLOW_DEVEL_COMMON_PATH):
# if devel-common changes, we want to run tests for all providers, as they might start failing
return "Providers"
return None
def _match_files_with_regexps(files: tuple[str, ...], matched_files, matching_regexps):
for file in files:
if any(re.match(regexp, file) for regexp in matching_regexps):
matched_files.append(file)
def _exclude_files_with_regexps(files: tuple[str, ...], matched_files, exclude_regexps):
for file in files:
if any(re.match(regexp, file) for regexp in exclude_regexps):
if file in matched_files:
matched_files.remove(file)
@clearable_cache
def _matching_files(
files: tuple[str, ...], match_group: FileGroupForCi, match_dict: HashableDict
) -> list[str]:
matched_files: list[str] = []
match_regexps = match_dict[match_group]
_match_files_with_regexps(files, matched_files, match_regexps)
count = len(matched_files)
if count > 0:
get_console().print(f"[warning]{match_group} matched {count} files.[/]")
get_console().print(matched_files)
else:
get_console().print(f"[warning]{match_group} did not match any file.[/]")
return matched_files
# TODO: In Python 3.12 we will be able to use itertools.batched
def _split_list(input_list, n) -> list[list[str]]:
"""Splits input_list into n sub-lists."""
it = iter(input_list)
return [
list(itertools.islice(it, i))
for i in [len(input_list) // n + (1 if x < len(input_list) % n else 0) for x in range(n)]
]
def _get_test_type_description(provider_test_types: list[str]) -> str:
if not provider_test_types:
return ""
first_provider = provider_test_types[0]
last_provider = provider_test_types[-1]
if first_provider.startswith("Providers["):
first_provider = first_provider.replace("Providers[", "").replace("]", "")
if last_provider.startswith("Providers["):
last_provider = last_provider.replace("Providers[", "").replace("]", "")
return (
f"{first_provider[:13]}...{last_provider[:13]}"
if first_provider != last_provider
else (first_provider[:29])
)
def _get_test_list_as_json(list_of_list_of_types: list[list[str]]) -> list[dict[str, str]] | None:
if len(list_of_list_of_types) == 1 and len(list_of_list_of_types[0]) == 0:
return None
return [
{"description": _get_test_type_description(list_of_types), "test_types": " ".join(list_of_types)}
for list_of_types in list_of_list_of_types
]
| HashableDict |
python | ray-project__ray | python/ray/autoscaler/v2/sdk.py | {
"start": 396,
"end": 3928
} | class ____(NamedTuple):
resources: dict
label_selector: dict
def request_cluster_resources(
gcs_address: str,
to_request: List[dict],
timeout: int = DEFAULT_RPC_TIMEOUT_S,
):
"""Request resources from the autoscaler.
This will add a cluster resource constraint to GCS. GCS will asynchronously
pass the constraint to the autoscaler, and the autoscaler will try to provision the
requested minimal bundles in `to_request`.
If the cluster already has `to_request` resources, this will be an no-op.
Future requests submitted through this API will overwrite the previous requests.
Args:
gcs_address: The GCS address to query.
to_request: A list of resource requests to request the cluster to have.
Each resource request is a tuple of resources and a label_selector
to apply per-bundle. e.g.: [{"resources": {"CPU": 1, "GPU": 1}, "label_selector": {"accelerator-type": "A100"}}]
timeout: Timeout in seconds for the request to be timeout
"""
assert len(gcs_address) > 0, "GCS address is not specified."
# Convert bundle dicts to ResourceRequest tuples.
normalized: List[ResourceRequest] = []
for r in to_request:
assert isinstance(
r, dict
), f"Internal Error: Expected a dict, but got {type(r)}"
resources = r.get("resources", {})
selector = r.get("label_selector", {})
normalized.append(ResourceRequest(resources, selector))
to_request = normalized
# Aggregate bundle by shape
def keyfunc(r):
return (
frozenset(r.resources.items()),
frozenset(r.label_selector.items()),
)
grouped_requests = Counter(keyfunc(r) for r in to_request)
bundles: List[dict] = []
label_selectors: List[dict] = []
counts: List[int] = []
for (bundle, selector), count in grouped_requests.items():
bundles.append(dict(bundle))
label_selectors.append(dict(selector))
counts.append(count)
GcsClient(gcs_address).request_cluster_resource_constraint(
bundles, label_selectors, counts, timeout_s=timeout
)
def get_cluster_status(
gcs_address: str, timeout: int = DEFAULT_RPC_TIMEOUT_S
) -> ClusterStatus:
"""
Get the cluster status from the autoscaler.
Args:
gcs_address: The GCS address to query.
timeout: Timeout in seconds for the request to be timeout
Returns:
A ClusterStatus object.
"""
assert len(gcs_address) > 0, "GCS address is not specified."
req_time = time.time()
str_reply = GcsClient(gcs_address).get_cluster_status(timeout_s=timeout)
reply_time = time.time()
reply = GetClusterStatusReply()
reply.ParseFromString(str_reply)
# TODO(rickyx): To be more accurate, we could add a timestamp field from the reply.
return ClusterStatusParser.from_get_cluster_status_reply(
reply,
stats=Stats(gcs_request_time_s=reply_time - req_time, request_ts_s=req_time),
)
def get_cluster_resource_state(gcs_client: GcsClient) -> ClusterResourceState:
"""
Get the cluster resource state from GCS.
Args:
gcs_client: The GCS client to query.
Returns:
A ClusterResourceState object
Raises:
Exception: If the request times out or failed.
"""
str_reply = gcs_client.get_cluster_resource_state()
reply = GetClusterResourceStateReply()
reply.ParseFromString(str_reply)
return reply.cluster_resource_state
| ResourceRequest |
python | bokeh__bokeh | src/bokeh/model/model.py | {
"start": 2836,
"end": 21865
} | class ____(HasProps, HasDocumentRef, PropertyCallbackManager, EventCallbackManager):
''' Base class for all objects stored in Bokeh |Document| instances.
'''
# a canonical order for positional args that can be
# used for any functions derived from this class
_args = ()
_extra_kws = {}
@classmethod
def __init_subclass__(cls):
super().__init_subclass__()
if cls.__module__.startswith("bokeh.models"):
assert "__init__" in cls.__dict__, str(cls)
parameters = [x[0] for x in cls.parameters()]
cls.__init__.__signature__ = Signature(parameters=parameters)
process_example(cls)
_id: ID
def __new__(cls, *args: Any, id: ID | None = None, **kwargs: Any) -> Self:
obj = super().__new__(cls)
# Setting 'id' implies deferred initialization, which means properties
# will be initialized in a separate step by a deserializer, etc.
if id is not None:
if args or kwargs:
raise ValueError("'id' cannot be used together with property initializers")
obj._id = id
else:
obj._id = make_id()
return obj
def __init__(self, *args: Any, **kwargs: Any) -> None:
if args:
raise ValueError("positional arguments are not allowed")
if "id" in kwargs:
raise ValueError("initializing 'id' is not allowed")
super().__init__(**kwargs)
default_theme.apply_to_model(self)
def __str__(self) -> str:
name = self.__class__.__name__
return f"{name}(id={self.id!r}, ...)"
__repr__ = __str__
def destroy(self) -> None:
''' Clean up references to the document and property
'''
self._document = None
self._temp_document = None
self._property_values.clear()
@property
def id(self) -> ID:
return self._id
name = Nullable(String, help="""
An arbitrary, user-supplied name for this model.
This name can be useful when querying the document to retrieve specific
Bokeh models.
.. code:: python
>>> plot.scatter([1,2,3], [4,5,6], name="temp")
>>> plot.select(name="temp")
[GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)]
.. note::
No uniqueness guarantees or other conditions are enforced on any names
that are provided, nor is the name used directly by Bokeh for any
reason.
""")
tags = List(AnyRef, help="""
An optional list of arbitrary, user-supplied values to attach to this
model.
This data can be useful when querying the document to retrieve specific
Bokeh models:
.. code:: python
>>> r = plot.scatter([1,2,3], [4,5,6])
>>> r.tags = ["foo", 10]
>>> plot.select(tags=['foo', 10])
[GlyphRenderer(id='1de4c3df-a83d-480a-899b-fb263d3d5dd9', ...)]
Or simply a convenient way to attach any necessary metadata to a model
that can be accessed by ``CustomJS`` callbacks, etc.
.. note::
No uniqueness guarantees or other conditions are enforced on any tags
that are provided, nor are the tags used directly by Bokeh for any
reason.
""")
js_event_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.Callback")), help="""
A mapping of event names to lists of ``CustomJS`` callbacks.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_event`` method:
.. code:: python
callback = CustomJS(code="console.log('tap event occurred')")
plot.js_on_event('tap', callback)
""")
js_property_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.Callback")), help="""
A mapping of attribute names to lists of ``CustomJS`` callbacks, to be set up on
BokehJS side when the document is created.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_change`` method:
.. code:: python
callback = CustomJS(code="console.log('stuff')")
plot.x_range.js_on_change('start', callback)
""")
subscribed_events = Set(String, help="""
Collection of events that are subscribed to by Python callbacks. This is
the set of events that will be communicated from BokehJS back to Python
for this model.
""")
syncable: bool = Bool(default=True, help="""
Indicates whether this model should be synchronized back to a Bokeh server when
updated in a web browser. Setting to ``False`` may be useful to reduce network
traffic when dealing with frequently updated objects whose updated values we
don't need.
.. note::
Setting this property to ``False`` will prevent any ``on_change()`` callbacks
on this object from triggering. However, any JS-side callbacks will still
work.
""")
# Properties --------------------------------------------------------------
@property
def ref(self) -> Ref:
return Ref(id=self._id)
# Public methods ----------------------------------------------------------
@classmethod
def clear_extensions(cls) -> None:
""" Clear any currently defined custom extensions.
Serialization calls will result in any currently defined custom
extensions being included with the generated Document, whether or not
there are utilized. This method can be used to clear out all existing
custom extension definitions.
"""
_default_resolver.clear_extensions()
@classmethod
@without_property_validation
def parameters(cls: type[Model]) -> list[Parameter]:
''' Generate Python ``Parameter`` values suitable for functions that are
derived from the glyph.
Returns:
list(Parameter)
'''
arg_params = []
no_more_defaults = False
for arg in reversed(cls._args):
descriptor = cls.lookup(arg)
default = descriptor.class_default(cls, no_eval=True)
if default is None:
no_more_defaults = True
# simplify field(x) defaults to just present the column name
if isinstance(default, dict) and set(default) == {"field"}:
default = default["field"]
# make sure built-ins don't hold on to references to actual Models
if cls.__module__.startswith("bokeh.models"):
assert not isinstance(default, Model)
param = Parameter(
name=arg,
kind=Parameter.POSITIONAL_OR_KEYWORD,
# For positional arg properties, default=None means no default.
default=Parameter.empty if no_more_defaults else default,
)
if default:
del default
typ = type_link(descriptor.property)
arg_params.insert(0, (param, typ, descriptor.__doc__))
# these are not really useful, and should also really be private, just skip them
omissions = {'js_event_callbacks', 'js_property_callbacks', 'subscribed_events'}
kwarg_params = []
kws = set(cls.properties()) - set(cls._args) - omissions
for kw in kws:
descriptor = cls.lookup(kw)
default = descriptor.class_default(cls, no_eval=True)
# simplify field(x) defaults to just present the column name
if isinstance(default, dict) and set(default) == {"field"}:
default = default["field"]
# make sure built-ins don't hold on to references to actual Models
if cls.__module__.startswith("bokeh.models"):
assert not isinstance(default, Model)
param = Parameter(
name=kw,
kind=Parameter.KEYWORD_ONLY,
default=default,
)
del default
typ = type_link(descriptor.property)
kwarg_params.append((param, typ, descriptor.__doc__))
for kw, (typ, doc) in cls._extra_kws.items():
param = Parameter(
name=kw,
kind=Parameter.KEYWORD_ONLY,
)
kwarg_params.append((param, typ, doc))
kwarg_params.sort(key=lambda x: x[0].name)
return arg_params + kwarg_params
def js_on_event(self, event: str | type[Event], *callbacks: JSEventCallback) -> None:
if isinstance(event, str):
event_name = Event.cls_for(event).event_name
elif isinstance(event, type) and issubclass(event, Event):
event_name = event.event_name
else:
raise ValueError(f"expected string event name or event class, got {event}")
all_callbacks = list(self.js_event_callbacks.get(event_name, []))
for callback in callbacks:
if callback not in all_callbacks:
all_callbacks.append(callback)
self.js_event_callbacks[event_name] = all_callbacks
def js_link(self, attr: str, other: Model, other_attr: str, attr_selector: int | str | None = None) -> None:
''' Link two Bokeh model properties using JavaScript.
This is a convenience method that simplifies adding a
:class:`~bokeh.models.CustomJS` callback to update one Bokeh model
property whenever another changes value.
Args:
attr (str) :
The name of a Bokeh property on this model
other (Model):
A Bokeh model to link to self.attr
other_attr (str) :
The property on ``other`` to link together
attr_selector (int | str) :
The index to link an item in a subscriptable ``attr``
Added in version 1.1
Raises:
ValueError
Examples:
This code with ``js_link``:
.. code :: python
select.js_link('value', plot, 'sizing_mode')
is equivalent to the following:
.. code:: python
from bokeh.models import CustomJS
select.js_on_change('value',
CustomJS(args=dict(other=plot),
code="other.sizing_mode = this.value"
)
)
Additionally, to use attr_selector to attach the left side of a range slider to a plot's x_range:
.. code :: python
range_slider.js_link('value', plot.x_range, 'start', attr_selector=0)
which is equivalent to:
.. code :: python
from bokeh.models import CustomJS
range_slider.js_on_change('value',
CustomJS(args=dict(other=plot.x_range),
code="other.start = this.value[0]"
)
)
'''
descriptor = self.lookup(attr, raises=False)
if descriptor is None:
raise ValueError(f"{attr!r} is not a property of self ({self!r})")
if not isinstance(other, Model):
raise ValueError(f"'other' is not a Bokeh model: {other!r}")
other_descriptor = other.lookup(other_attr, raises=False)
if other_descriptor is None:
raise ValueError(f"{other_attr!r} is not a property of other ({other!r})")
from bokeh.models import CustomJS
selector = f"[{attr_selector!r}]" if attr_selector is not None else ""
cb = CustomJS(args=dict(other=other), code=f"other.{other_descriptor.name} = this.{descriptor.name}{selector}")
self.js_on_change(attr, cb)
def js_on_change(self, event: str, *callbacks: JSChangeCallback) -> None:
''' Attach a :class:`~bokeh.models.CustomJS` callback to an arbitrary
BokehJS model event.
On the BokehJS side, change events for model properties have the
form ``"change:property_name"``. As a convenience, if the event name
passed to this method is also the name of a property on the model,
then it will be prefixed with ``"change:"`` automatically:
.. code:: python
# these two are equivalent
source.js_on_change('data', callback)
source.js_on_change('change:data', callback)
However, there are other kinds of events that can be useful to respond
to, in addition to property change events. For example to run a
callback whenever data is streamed to a ``ColumnDataSource``, use the
``"stream"`` event on the source:
.. code:: python
source.js_on_change('streaming', callback)
'''
if len(callbacks) == 0:
raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter")
# handle any CustomJS callbacks here
from bokeh.models.callbacks import CustomCode
if not all(isinstance(x, CustomCode) for x in callbacks):
raise ValueError("not all callback values are CustomCode instances")
descriptor = self.lookup(event, raises=False)
if descriptor is not None:
event = f"change:{descriptor.name}"
old = {k: [cb for cb in cbs] for k, cbs in self.js_property_callbacks.items()}
if event not in self.js_property_callbacks:
self.js_property_callbacks[event] = []
for callback in callbacks:
if callback in self.js_property_callbacks[event]:
continue
self.js_property_callbacks[event].append(callback)
self.trigger('js_property_callbacks', old, self.js_property_callbacks)
def on_change(self, attr: str, *callbacks: PropertyCallback) -> None:
''' Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
*callbacks (callable) : callback functions to register
Returns:
None
Examples:
.. code-block:: python
widget.on_change('value', callback1, callback2, ..., callback_n)
'''
descriptor = self.lookup(attr)
super().on_change(descriptor.name, *callbacks)
def references(self) -> set[Model]:
''' Returns all ``Models`` that this object has references to.
'''
return set(collect_models(self))
def select(self, selector: SelectorType) -> Iterable[Model]:
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[Model]
'''
from ..core.query import find
return find(self.references(), selector)
def select_one(self, selector: SelectorType) -> Model | None:
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
Model
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError(f"Found more than one object matching {selector}: {result!r}")
if len(result) == 0:
return None
return result[0]
def set_select(self, selector: type[Model] | SelectorType, updates: dict[str, Any]) -> None:
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
if isclass(selector) and issubclass(selector, Model):
selector = dict(type=selector)
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def to_serializable(self, serializer: Serializer) -> ObjectRefRep:
serializer.add_ref(self, self.ref)
super_rep = super().to_serializable(serializer)
rep = ObjectRefRep(
type="object",
name=super_rep["name"],
id=self.id,
)
attributes = super_rep.get("attributes")
if attributes is not None:
rep["attributes"] = attributes
return rep
def trigger(self, attr: str, old: Any, new: Any,
hint: DocumentPatchedEvent | None = None, setter: Setter | None = None) -> None:
'''
'''
# The explicit assumption here is that hinted events do not need to
# go through all the same invalidation steps. Currently this is the
# case for ColumnsStreamedEvent and ColumnsPatchedEvent. However,
# this may need to be further refined in the future, if the
# assumption does not hold for future hinted events (e.g. the hint
# could specify explicitly whether to do normal invalidation or not)
if hint is None:
dirty_count = 0
def mark_dirty(_: HasProps):
nonlocal dirty_count
dirty_count += 1
if self._document is not None:
visit_value_and_its_immediate_references(new, mark_dirty)
visit_value_and_its_immediate_references(old, mark_dirty)
if dirty_count > 0:
self.document.models.invalidate()
# chain up to invoke callbacks
descriptor = self.lookup(attr)
super().trigger(descriptor.name, old, new, hint=hint, setter=setter)
def _attach_document(self, doc: Document) -> None:
''' Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly
'''
if self.document is doc: # nothing to do
return
if self.document is not None:
raise RuntimeError(f"Models must be owned by only a single document, {self!r} is already in a doc")
doc.theme.apply_to_model(self)
self.document = doc
self._update_event_callbacks()
def _detach_document(self) -> None:
''' Detach a model from a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to unset the private ._document field properly
'''
self.document = None
default_theme.apply_to_model(self)
def _repr_html_(self) -> str:
return html_repr(self)
def _sphinx_height_hint(self) -> int|None:
return None
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Model |
python | mlflow__mlflow | mlflow/store/jobs/abstract_store.py | {
"start": 231,
"end": 4116
} | class ____(ABC):
"""
Abstract class that defines API interfaces for storing Job metadata.
"""
@abstractmethod
def create_job(self, function_fullname: str, params: str, timeout: float | None = None) -> Job:
"""
Create a new job with the specified function and parameters.
Args:
function_fullname: The full name of the function to execute
params: The job parameters that are serialized as a JSON string
timeout: The job execution timeout in seconds
Returns:
Job entity instance
"""
@abstractmethod
def start_job(self, job_id: str) -> None:
"""
Start a job by setting its status to RUNNING.
Args:
job_id: The ID of the job to start
"""
@abstractmethod
def reset_job(self, job_id: str) -> None:
"""
Reset a job by setting its status to PENDING.
Args:
job_id: The ID of the job to re-enqueue.
"""
@abstractmethod
def finish_job(self, job_id: str, result: str) -> None:
"""
Finish a job by setting its status to DONE and setting the result.
Args:
job_id: The ID of the job to finish
result: The job result as a string
"""
@abstractmethod
def mark_job_timed_out(self, job_id: str) -> None:
"""
Set a job status to Timeout.
Args:
job_id: The ID of the job
"""
@abstractmethod
def fail_job(self, job_id: str, error: str) -> None:
"""
Fail a job by setting its status to FAILED and setting the error message.
Args:
job_id: The ID of the job to fail
error: The error message as a string
"""
@abstractmethod
def retry_or_fail_job(self, job_id: str, error: str) -> int | None:
"""
If the job retry_count is less than maximum allowed retry count,
increment the retry_count and reset the job to PENDING status,
otherwise set the job to FAILED status and fill the job's error field.
Args:
job_id: The ID of the job to fail
error: The error message as a string
Returns:
If the job is allowed to retry, returns the retry count,
otherwise returns None.
"""
@abstractmethod
def list_jobs(
self,
function_fullname: str | None = None,
statuses: list[JobStatus] | None = None,
begin_timestamp: int | None = None,
end_timestamp: int | None = None,
params: dict[str, Any] | None = None,
) -> Iterator[Job]:
"""
List jobs based on the provided filters.
Args:
function_fullname: Filter by function full name (exact match)
statuses: Filter by a list of job status (PENDING, RUNNING, DONE, FAILED, TIMEOUT)
begin_timestamp: Filter jobs created after this timestamp (inclusive)
end_timestamp: Filter jobs created before this timestamp (inclusive)
params: Filter jobs by matching job params dict with the provided params dict
e.g., if `params` is ``{'a': 3, 'b': 4}``, it can match the following job params:
``{'a': 3, 'b': 4}``, ``{'a': 3, 'b': 4, 'c': 5}``, but it does not match the
following job params: ``{'a': 3, 'b': 6}``, ``{'a': 3, 'c': 5}``.
Returns:
Iterator of Job entities that match the filters, ordered by creation time (oldest first)
"""
@abstractmethod
def get_job(self, job_id: str) -> Job:
"""
Get a job by its ID.
Args:
job_id: The ID of the job to retrieve
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
| AbstractJobStore |
python | sympy__sympy | sympy/matrices/expressions/permutation.py | {
"start": 4352,
"end": 8050
} | class ____(MatrixExpr):
r"""Symbolic representation for permuting matrix rows or columns.
Parameters
==========
perm : Permutation, PermutationMatrix
The permutation to use for permuting the matrix.
The permutation can be resized to the suitable one,
axis : 0 or 1
The axis to permute alongside.
If `0`, it will permute the matrix rows.
If `1`, it will permute the matrix columns.
Notes
=====
This follows the same notation used in
:meth:`sympy.matrices.matrixbase.MatrixBase.permute`.
Examples
========
>>> from sympy import Matrix, MatrixPermute
>>> from sympy.combinatorics import Permutation
Permuting the matrix rows:
>>> p = Permutation(1, 2, 0)
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> B = MatrixPermute(A, p, axis=0)
>>> B.as_explicit()
Matrix([
[4, 5, 6],
[7, 8, 9],
[1, 2, 3]])
Permuting the matrix columns:
>>> B = MatrixPermute(A, p, axis=1)
>>> B.as_explicit()
Matrix([
[2, 3, 1],
[5, 6, 4],
[8, 9, 7]])
See Also
========
sympy.matrices.matrixbase.MatrixBase.permute
"""
def __new__(cls, mat, perm, axis=S.Zero):
from sympy.combinatorics.permutations import Permutation
mat = _sympify(mat)
if not mat.is_Matrix:
raise ValueError(
"{} must be a SymPy matrix instance.".format(perm))
perm = _sympify(perm)
if isinstance(perm, PermutationMatrix):
perm = perm.args[0]
if not isinstance(perm, Permutation):
raise ValueError(
"{} must be a SymPy Permutation or a PermutationMatrix " \
"instance".format(perm))
axis = _sympify(axis)
if axis not in (0, 1):
raise ValueError("The axis must be 0 or 1.")
mat_size = mat.shape[axis]
if mat_size != perm.size:
try:
perm = perm.resize(mat_size)
except ValueError:
raise ValueError(
"Size does not match between the permutation {} "
"and the matrix {} threaded over the axis {} "
"and cannot be converted."
.format(perm, mat, axis))
return super().__new__(cls, mat, perm, axis)
def doit(self, deep=True, **hints):
mat, perm, axis = self.args
if deep:
mat = mat.doit(deep=deep, **hints)
perm = perm.doit(deep=deep, **hints)
if perm.is_Identity:
return mat
if mat.is_Identity:
if axis is S.Zero:
return PermutationMatrix(perm)
elif axis is S.One:
return PermutationMatrix(perm**-1)
if isinstance(mat, (ZeroMatrix, OneMatrix)):
return mat
if isinstance(mat, MatrixPermute) and mat.args[2] == axis:
return MatrixPermute(mat.args[0], perm * mat.args[1], axis)
return self
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j, **kwargs):
mat, perm, axis = self.args
if axis == 0:
return mat[perm.apply(i), j]
elif axis == 1:
return mat[i, perm.apply(j)]
def _eval_rewrite_as_MatMul(self, *args, **kwargs):
from .matmul import MatMul
mat, perm, axis = self.args
deep = kwargs.get("deep", True)
if deep:
mat = mat.rewrite(MatMul)
if axis == 0:
return MatMul(PermutationMatrix(perm), mat)
elif axis == 1:
return MatMul(mat, PermutationMatrix(perm**-1))
| MatrixPermute |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 52336,
"end": 54607
} | class ____(QuantizationTestCase):
def _create_quantized_model(self, model_class: type[torch.nn.Module], **kwargs):
# Creates quantized model for testing mobile script modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
# FIXME(rec): shouldn't qconfig be passed to quantize?
qconfig = torch.ao.quantization.get_default_qconfig(qengine) # noqa: F841
model = model_class(**kwargs)
model = quantize(model, test_only_eval_fn, [self.calib_data])
return model
def _compare_script_and_mobile(self, model: torch.nn.Module, input: torch.Tensor):
# Compares the numerical outputs for script and lite modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
script_module = torch.jit.script(model)
script_module_result = script_module(input)
max_retry = 5
for retry in range(1, max_retry + 1):
# retries `max_retry` times; breaks iff succeeds else throws exception
try:
buffer = io.BytesIO(
script_module._save_to_buffer_for_lite_interpreter()
)
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
mobile_module_result = mobile_module(input)
torch.testing.assert_close(
script_module_result, mobile_module_result
)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_close(
script_module_result, mobile_module_forward_result
)
mobile_module_run_method_result = mobile_module.run_method(
"forward", input
)
torch.testing.assert_close(
script_module_result, mobile_module_run_method_result
)
except AssertionError as e:
if retry == max_retry:
raise e
else:
continue
break
| QuantizationLiteTestCase |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 303885,
"end": 307686
} | class ____(MutableBox):
"""
StorageBox allow in-place mutation of Tensors
"""
def is_input_buffer(self) -> bool:
if isinstance(self.data, (InputBuffer, ReinterpretView)):
return self.data.get_name() in V.graph.graph_inputs
return False
def is_module_buffer(self) -> bool:
return (
isinstance(self.data, (ConstantBuffer))
and self.data.get_name() in V.graph.constants
)
def realize(self) -> Optional[str]:
if IRNode.is_realized_node(self.data):
return self.data.get_name()
assert isinstance(self.data, (Pointwise, Reduction, Scan, Sort)), type(
self.data
)
origin_node = self.data.get_origin_node()
traceback = self.data.get_traceback()
device = self.data.get_device()
assert device is not None
self.data = ComputedBuffer(
name=None,
layout=FlexibleLayout(
device=device,
dtype=self.data.get_dtype(),
size=self.data.get_size(),
is_pinned=False,
),
data=self.data,
)
self.data.name = V.graph.register_buffer(self.data)
V.graph.register_operation(self.data)
self.data.origins = self.origins
self.data.origin_node = origin_node
self.data.traceback = traceback
return self.data.name
def realize_hint(self) -> None:
"""
Called on buffers we expect to be forced to realize later.
"""
if (
isinstance(self.data, (Pointwise, Reduction))
and self.data.inner_fn_opcount().nontrivial_read_count > 1
):
self.realize()
def has_accumulated_enough_reads_by_size(self, threshold: int) -> bool:
from torch._inductor.utils import is_nonfreeable_buffers
size_of_reads = [
V.graph.get_dep_size_hint(dep)
for dep in self.get_reads()
if not is_nonfreeable_buffers(dep)
]
if not size_of_reads:
return False
total_size = sum(size_of_reads)
max_size = max(size_of_reads)
min_size = min(size_of_reads)
return (
total_size >= threshold
and total_size / max_size >= 2
and max_size == min_size
)
def has_exceeded_max_reads(self) -> bool:
return isinstance(self.data, Pointwise) and (
self.num_reads() > config.realize_acc_reads_threshold
or self.has_large_inner_fn()
or (
config.realize_acc_reads_size_threshold is not None
and self.has_accumulated_enough_reads_by_size(
config.realize_acc_reads_size_threshold
)
)
)
def should_realize_on_reuse(self, users: int) -> bool:
"""
A heuristic to decide if we should realize a tensor
that is used multiple times.
"""
if users > 1 and isinstance(self.data, (Pointwise, Reduction)):
if is_cpu(self.data):
# Heuristic for realizing reused result of heavy ops on cpu
opcount = self.data.inner_fn_opcount()
heavy_ops = ["exp", "sigmoid"] # a list of heavy ops
if any(x in opcount.used_ops for x in heavy_ops):
return True
return (
self.num_reads() > config.realize_reads_threshold
or self.has_large_inner_fn()
)
return False
def mark_reuse(self, users: int) -> None:
if self.should_realize_on_reuse(users):
self.realize()
def num_reads(self) -> int:
return self.data.num_reads()
@ir_dataclass(frozen=False)
| StorageBox |
python | django__django | tests/template_tests/syntax_tests/test_static.py | {
"start": 3963,
"end": 4411
} | class ____(SimpleTestCase):
def test_repr(self):
static_node = StaticNode(varname="named-var", path="named-path")
self.assertEqual(
repr(static_node),
"StaticNode(varname='named-var', path='named-path')",
)
static_node = StaticNode(path="named-path")
self.assertEqual(
repr(static_node),
"StaticNode(varname=None, path='named-path')",
)
| StaticNodeTests |
python | getsentry__sentry | src/sentry_plugins/opsgenie/plugin.py | {
"start": 1365,
"end": 3929
} | class ____(CorePluginMixin, notify.NotificationPlugin):
author = "Sentry Team"
author_url = "https://github.com/getsentry"
title = "Opsgenie"
slug = "opsgenie"
description = DESCRIPTION
conf_key = "opsgenie"
version = sentry.VERSION
project_conf_form = OpsGenieOptionsForm
required_field = "api_key"
feature_descriptions = [
FeatureDescription(
"""
Manage incidents and outages by sending Sentry notifications to Opsgenie.
""",
IntegrationFeatures.INCIDENT_MANAGEMENT,
),
FeatureDescription(
"""
Configure Sentry rules to trigger notifications based on conditions you set.
""",
IntegrationFeatures.ALERT_RULE,
),
]
logger = logging.getLogger("sentry.plugins.opsgenie")
def is_configured(self, project) -> bool:
return all(self.get_option(k, project) for k in ("api_key", "alert_url"))
@staticmethod
def build_payload(group, event, triggering_rules):
return {
"message": event.message or event.title,
"alias": f"sentry: {group.id}",
"source": "Sentry",
"details": {
"Sentry ID": str(group.id),
"Sentry Group": getattr(group, "title", group.message).encode("utf-8"),
"Project ID": group.project.slug,
"Project Name": group.project.name,
"Logger": group.logger,
"Level": group.get_level_display(),
"URL": group.get_absolute_url(),
# TODO(ecosystem): We need to eventually change the key on this
"Triggering Rules": json.dumps(triggering_rules),
},
"entity": group.culprit,
"tags": [f'{str(x).replace(",", "")}:{str(y).replace(",", "")}' for x, y in event.tags],
}
def notify_users(self, group, event, triggering_rules) -> None:
if not self.is_configured(group.project):
return
client = self.get_client(group.project)
payload = self.build_payload(group, event, triggering_rules)
try:
client.trigger_incident(payload)
except Exception as e:
self.raise_error(e)
def get_client(self, project):
api_key = self.get_option("api_key", project)
alert_url = self.get_option("alert_url", project)
recipients = self.get_option("recipients", project)
return OpsGenieApiClient(api_key, alert_url, recipients)
| OpsGeniePlugin |
python | tiangolo__fastapi | scripts/notify_translations.py | {
"start": 2206,
"end": 2265
} | class ____(BaseModel):
edges: List[CommentsEdge]
| Comments |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 33180,
"end": 35326
} | class ____(GradientCheckpointingLayer):
def __init__(
self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0
):
super().__init__()
self.config = config
self.dim = dim
blocks = []
for i in range(depth):
block = Swinv2Layer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
drop_path_rate=drop_path[i],
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
pretrained_window_size=pretrained_window_size,
)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_outputs = layer_module(
hidden_states,
input_dimensions,
output_attentions,
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
| Swinv2Stage |
python | scrapy__scrapy | tests/test_utils_trackref.py | {
"start": 131,
"end": 174
} | class ____(trackref.object_ref):
pass
| Foo |
python | python-openxml__python-docx | src/docx/styles/style.py | {
"start": 5131,
"end": 6141
} | class ____(BaseStyle):
"""A character style.
A character style is applied to a |Run| object and primarily provides character-
level formatting via the |Font| object in its :attr:`.font` property.
"""
@property
def base_style(self):
"""Style object this style inherits from or |None| if this style is not based on
another style."""
base_style = self._element.base_style
if base_style is None:
return None
return StyleFactory(base_style)
@base_style.setter
def base_style(self, style):
style_id = style.style_id if style is not None else None
self._element.basedOn_val = style_id
@property
def font(self):
"""The |Font| object providing access to the character formatting properties for
this style, such as font name and size."""
return Font(self._element)
# -- just in case someone uses the old name in an extension function --
_CharacterStyle = CharacterStyle
| CharacterStyle |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams1.py | {
"start": 568,
"end": 607
} | class ____[T](list["T"]):
pass
| ClassG |
python | boto__boto3 | boto3/docs/docstring.py | {
"start": 1042,
"end": 1178
} | class ____(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_action(*args, **kwargs)
| ActionDocstring |
python | getsentry__sentry | tests/sentry/integrations/vsts/test_repository.py | {
"start": 599,
"end": 4748
} | class ____(TestCase):
def setUp(self) -> None:
self.base_url = "https://visualstudio.com/"
self.vsts_external_id = "654321"
@cached_property
def provider(self):
return VstsRepositoryProvider("integrations:vsts")
@responses.activate
def test_compare_commits(self) -> None:
responses.add(
responses.POST,
"https://visualstudio.com/_apis/git/repositories/None/commitsBatch",
body=COMPARE_COMMITS_EXAMPLE,
)
responses.add(
responses.GET,
"https://visualstudio.com/_apis/git/repositories/None/commits/6c36052c58bde5e57040ebe6bdb9f6a52c906fff/changes",
body=FILE_CHANGES_EXAMPLE,
)
responses.add(
responses.GET,
"https://visualstudio.com/_apis/git/repositories/None/commits/6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
body=COMMIT_DETAILS_EXAMPLE,
)
integration = self.create_provider_integration(
provider="vsts",
external_id=self.vsts_external_id,
name="Hello world",
metadata={"domain_name": self.base_url},
)
default_auth = Identity.objects.create(
idp=self.create_identity_provider(type="vsts"),
user=self.user,
external_id="123",
data={
"access_token": "123456789",
"expires": int(time()) + 3600,
"refresh_token": "rxxx-xxxx",
"token_type": "jwt-bearer",
},
)
integration.add_organization(self.organization, self.user, default_auth.id)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={"instance": self.base_url, "project": "project-name", "name": "example"},
integration_id=integration.id,
)
res = self.provider.compare_commits(repo, "a", "b")
assert res == [
{
"patch_set": [{"path": "/README.md", "type": "M"}],
"author_email": "max@sentry.io",
"author_name": "max bittker",
"message": "Updated README.md\n\nSecond line\n\nFixes SENTRY-1",
"id": "6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"repository": "example",
"timestamp": datetime.datetime(2018, 4, 24, 0, 3, 18, tzinfo=timezone.utc),
}
]
@responses.activate
def test_build_repository_config(self) -> None:
organization = self.create_organization()
integration = self.create_provider_integration(
provider="vsts",
external_id=self.vsts_external_id,
name="Hello world",
metadata={"domain_name": self.base_url},
)
data = {
"name": "MyFirstProject",
"external_id": "654321",
"url": "https://mbittker.visualstudio.com/_git/MyFirstProject/",
"instance": self.base_url,
"project": "MyFirstProject",
"installation": integration.id,
}
data = self.provider.build_repository_config(organization, data)
assert data == {
"name": "MyFirstProject",
"external_id": self.vsts_external_id,
"url": "https://mbittker.visualstudio.com/_git/MyFirstProject/",
"config": {
"project": "MyFirstProject",
"name": "MyFirstProject",
"instance": self.base_url,
},
"integration_id": integration.id,
}
def test_repository_external_slug(self) -> None:
repo = Repository(
name="MyFirstProject",
url="https://mbittker.visualstudio.com/_git/MyFirstProject/",
external_id=self.vsts_external_id,
)
result = self.provider.repository_external_slug(repo)
assert result == repo.external_id
@control_silo_test
| VisualStudioRepositoryProviderTest |
python | huggingface__transformers | src/transformers/models/mbart/modeling_mbart.py | {
"start": 53133,
"end": 59703
} | class ____(MBartPreTrainedModel):
def __init__(self, config: MBartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = MBartModel(config)
self.classification_head = MBartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
# Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring
| MBartForSequenceClassification |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py | {
"start": 1240,
"end": 1337
} | class ____(DepthAnythingConfig):
model_type = "prompt_depth_anything"
| PromptDepthAnythingConfig |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 18527,
"end": 18813
} | class ____(StringIORewind):
def setup(self):
count_elem = 100_000
data = "a,b\n" + "1,2\n" * count_elem
self.StringIO_input = StringIO(data)
def time_read_csv_index_col(self):
read_csv(self.data(self.StringIO_input), index_col="a")
| ReadCSVIndexCol |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 28127,
"end": 28566
} | class ____(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ("x",)
| W35 |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-orchestrate/test_flow_docarray_return.py | {
"start": 154,
"end": 2181
} | class ____(Executor):
@requests
def add_text(self, docs, **kwargs):
docs[0].text = 'Hello World!'
def test_simple_docarray_return():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(on='/index', inputs=[Document()])
assert docs[0].text == 'Hello World!'
def test_flatten_docarrays():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
)
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
def my_cb(resp):
return resp
@pytest.mark.parametrize('on_done', [None, my_cb])
@pytest.mark.parametrize('on_always', [None, my_cb])
@pytest.mark.parametrize('on_error', [None, my_cb])
def test_automatically_set_returnresults(on_done, on_always, on_error):
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
on_done=on_done,
on_always=on_always,
on_error=on_error,
)
if on_done is None and on_always is None:
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
else:
assert docs is None
def test_empty_docarray():
f = Flow().add(uses=SimplExecutor)
with pytest.raises(BadServer):
with f:
f.post(on='/')
def test_flow_client_defaults(port_generator):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(uses=SimplExecutor)
c = Client(port=exposed_port)
with f:
docs = f.post(on='/index', inputs=[Document()])
results = c.post(on='/index', inputs=[Document()])
assert isinstance(docs, DocumentArray)
assert docs[0].text == 'Hello World!'
assert isinstance(results, DocumentArray)
assert results[0].text == 'Hello World!'
| SimplExecutor |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 22468,
"end": 22540
} | class ____(Glm4vVisionModel):
pass
@auto_docstring
| Glm4vMoeVisionModel |
python | streamlit__streamlit | lib/tests/streamlit/components/v2/test_bidi_presentation.py | {
"start": 1283,
"end": 8966
} | class ____:
def __init__(self) -> None:
self._new_widget_state = _FakeWStates()
def test_bidi_presenter_merges_events_when_present() -> None:
"""Test that the presenter correctly merges event payloads into the base state."""
ss = _FakeSession()
agg_id = "$$_internal__wid__events"
presenter = make_bidi_component_presenter(agg_id)
ss._new_widget_state.widget_metadata[agg_id] = SimpleNamespace(
value_type="json_trigger_value"
)
ss._new_widget_state._payloads[agg_id] = [
{"event": "foo", "value": True},
{"event": "bar", "value": 123},
]
base = {"alpha": 1}
out = presenter(base, ss)
assert dict(out) == {"alpha": 1, "foo": True, "bar": 123}
def test_bidi_presenter_handles_non_list_payload() -> None:
"""Test that the presenter can handle a single, non-list event payload."""
ss = _FakeSession()
agg_id = "$$_internal__wid__events"
presenter = make_bidi_component_presenter(agg_id)
ss._new_widget_state.widget_metadata[agg_id] = SimpleNamespace(
value_type="json_trigger_value"
)
ss._new_widget_state._payloads[agg_id] = {"event": "foo", "value": "x"}
base = {}
out = presenter(base, ss)
assert dict(out) == {"foo": "x"}
def test_bidi_presenter_returns_base_on_missing_meta_or_wrong_type() -> None:
"""Test that the presenter returns the base value if metadata is missing or incorrect."""
ss = _FakeSession()
agg_id = "$$_internal__wid__events"
presenter = make_bidi_component_presenter(agg_id)
base = {"value": {"beta": 2}}
# No metadata
assert presenter(base, ss) == base
# Wrong value type
ss._new_widget_state.widget_metadata[agg_id] = SimpleNamespace(value_type="json")
assert presenter(base, ss) == base
def test_bidi_presenter_returns_base_on_non_canonical_state_shape() -> None:
"""Test that the presenter returns the base value if the state shape is not canonical."""
ss = _FakeSession()
agg_id = "$$_internal__wid__events"
presenter = make_bidi_component_presenter(agg_id)
ss._new_widget_state.widget_metadata[agg_id] = SimpleNamespace(
value_type="json_trigger_value"
)
base = {"not_value": {}}
assert presenter(base, ss) == base
def test_setitem_disallows_setting_created_widget():
"""Test that __setitem__ disallows setting a created widget."""
mock_session_state = MagicMock(spec=SessionState)
mock_session_state._key_id_mapper = MagicMock()
mock_session_state._key_id_mapper.get_key_from_id.return_value = "test_key"
mock_session_state._new_widget_state = MagicMock()
mock_session_state._new_widget_state.widget_metadata.get.return_value = MagicMock(
value_type="json_trigger_value"
)
mock_ctx = MagicMock()
mock_ctx.widget_ids_this_run = {"test_component_id"}
mock_ctx.form_ids_this_run = set()
presenter = make_bidi_component_presenter(
aggregator_id="test_aggregator_id",
component_id="test_component_id",
)
write_through_dict = presenter({}, mock_session_state)
with patch(
"streamlit.components.v2.presentation.get_script_run_ctx",
return_value=mock_ctx,
):
with pytest.raises(StreamlitAPIException) as e:
write_through_dict["value"] = "new_value"
assert (
"`st.session_state.test_key.value` cannot be modified after the component"
in str(e.value)
)
def test_delitem_disallows_deleting_from_created_widget():
"""Test that __delitem__ disallows deleting from a created widget."""
mock_session_state = MagicMock(spec=SessionState)
mock_session_state._key_id_mapper = MagicMock()
mock_session_state._key_id_mapper.get_key_from_id.return_value = "test_key"
mock_session_state._new_widget_state = MagicMock()
mock_session_state._new_widget_state.widget_metadata.get.return_value = MagicMock(
value_type="json_trigger_value"
)
mock_ctx = MagicMock()
mock_ctx.widget_ids_this_run = {"test_component_id"}
mock_ctx.form_ids_this_run = set()
presenter = make_bidi_component_presenter(
aggregator_id="test_aggregator_id",
component_id="test_component_id",
)
write_through_dict = presenter({"value": "old_value"}, mock_session_state)
with patch(
"streamlit.components.v2.presentation.get_script_run_ctx",
return_value=mock_ctx,
):
with pytest.raises(StreamlitAPIException) as e:
del write_through_dict["value"]
assert (
"`st.session_state.test_key.value` cannot be modified after the component"
in str(e.value)
)
def test_setitem_disallows_setting_widget_in_form():
"""Test that __setitem__ disallows setting a widget in a form."""
mock_session_state = MagicMock(spec=SessionState)
mock_session_state._key_id_mapper = MagicMock()
mock_session_state._key_id_mapper.get_key_from_id.return_value = "test_key"
mock_session_state._new_widget_state = MagicMock()
mock_session_state._new_widget_state.widget_metadata.get.return_value = MagicMock(
value_type="json_trigger_value"
)
mock_ctx = MagicMock()
mock_ctx.widget_ids_this_run = set()
mock_ctx.form_ids_this_run = {"test_key"}
presenter = make_bidi_component_presenter(
aggregator_id="test_aggregator_id",
component_id="test_component_id",
)
write_through_dict = presenter({}, mock_session_state)
with patch(
"streamlit.components.v2.presentation.get_script_run_ctx",
return_value=mock_ctx,
):
with pytest.raises(StreamlitAPIException) as e:
write_through_dict["value"] = "new_value"
assert (
"`st.session_state.test_key.value` cannot be modified after the component"
in str(e.value)
)
def test_setitem_allows_setting_before_widget_creation():
"""Test that __setitem__ allows setting state before widget creation."""
mock_session_state = MagicMock(spec=SessionState)
mock_session_state._key_id_mapper = MagicMock()
mock_session_state._key_id_mapper.get_key_from_id.return_value = "test_key"
mock_session_state._new_widget_state = MagicMock()
mock_session_state._new_widget_state.widget_metadata.get.return_value = MagicMock(
value_type="json_trigger_value"
)
mock_ctx = MagicMock()
mock_ctx.widget_ids_this_run = set()
mock_ctx.form_ids_this_run = set()
presenter = make_bidi_component_presenter(
aggregator_id="test_aggregator_id",
component_id="test_component_id",
)
write_through_dict = presenter({}, mock_session_state)
with patch(
"streamlit.components.v2.presentation.get_script_run_ctx",
return_value=mock_ctx,
):
try:
write_through_dict["value"] = "new_value"
except StreamlitAPIException as e:
pytest.fail(f"Setting state before creation raised an exception: {e}")
def test_deepcopy_returns_self():
"""Test that deepcopy returns the same object."""
mock_session_state = MagicMock(spec=SessionState)
mock_session_state._key_id_mapper = MagicMock()
mock_session_state._new_widget_state = MagicMock()
mock_session_state._new_widget_state.widget_metadata.get.return_value = MagicMock(
value_type="json_trigger_value"
)
presenter = make_bidi_component_presenter(
aggregator_id="test_aggregator_id",
component_id="test_component_id",
)
write_through_dict = presenter({}, mock_session_state)
copied_dict = copy.deepcopy(write_through_dict)
assert write_through_dict is copied_dict
| _FakeSession |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_foreign_keys_in_column_a_to_exist_in_column_b.py | {
"start": 1749,
"end": 9980
} | class ____(ColumnMapExpectation):
"""Expect foreign keys in a column to exist in another specified column.
Ensure that values in the column of interest (ColumnA) are in a valueset provided as a dataframe (df parameter) + column (column_B parameter) or as a list of elements supported by pandas.DataFrame() (e.g. list of dicts [{"col_name": value},], list of tuples [(value, value), (value, value)]. This is a very experimental implementation to describe the functionality, but this expectation should be revisited once cross-table expectation templates are available.
"""
examples = [
{
# "type": "expect_column_values_to_be_in_set",
"data": {
"x": [1, 2, 4],
"y": [1.1, 2.2, 5.5],
"z": ["hello", "jello", "mello"],
},
"tests": [
{
"title": "basic_positive_test_case_number_set",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "x",
"df": [{"fk_col": 1}, {"fk_col": 2}, {"fk_col": 4}],
"column_B": "fk_col",
},
"out": {"success": True},
},
{
"title": "basic_negative_test_case_number_set",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "x",
"df": [{"fk_col": 1}, {"fk_col": 2}, {"fk_col": 7}],
"column_B": "fk_col",
},
"out": {"success": False},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"experimental",
"help_wanted",
], # Tags for this Expectation in the gallery
"contributors": ["@robertparker"],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.foreign_key_in_other_col"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("df", "column_B")
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
# def get_validation_dependencies(
# self,
# configuration: Optional[ExpectationConfiguration] = None,
# execution_engine: Optional[ExecutionEngine] = None,
# runtime_configuration: Optional[dict] = None,
# ):
# dependencies = super().get_validation_dependencies(
# configuration, execution_engine, runtime_configuration
# )
# # get other_table_name kwarg
# # get the column_B kwarg
# # get metric expect_column_values_to_be_unique
# other_table_name = configuration.kwargs.get("other_table_name")
# # create copy of table.row_count metric and modify "table" metric domain kwarg to be other table name
# table_row_count_metric_config_other = deepcopy(
# dependencies["metrics"]["table.row_count"]
# )
# table_row_count_metric_config_other.metric_domain_kwargs[
# "table"
# ] = other_table_name
# # rename original "table.row_count" metric to "table.row_count.self"
# dependencies["metrics"]["table.row_count.self"] = dependencies["metrics"].pop(
# "table.row_count"
# )
# # add a new metric dependency named "table.row_count.other" with modified metric config
# dependencies["metrics"][
# "table.row_count.other"
# ] = table_row_count_metric_config_other
# return dependencies
# def _validate(
# self,
# configuration: ExpectationConfiguration,
# metrics: Dict,
# runtime_configuration: dict = None,
# execution_engine: ExecutionEngine = None,
# ):
# table_row_count_self = metrics["table.row_count.self"]
# table_row_count_other = metrics["table.row_count.other"]
# return {
# "success": table_row_count_self == table_row_count_other,
# "result": {
# "observed_value": {
# "self": table_row_count_self,
# "other": table_row_count_other,
# }
# },
# }
if __name__ == "__main__":
ExpectForeignKeysInColumnAToExistInColumnB().print_diagnostic_checklist()
| ExpectForeignKeysInColumnAToExistInColumnB |
python | scipy__scipy | scipy/linalg/tests/test_special_matrices.py | {
"start": 791,
"end": 2214
} | class ____:
def test_basic(self):
y = toeplitz([1, 2, 3])
assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
y = toeplitz([1, 2, 3], [1, 4, 5])
assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
def test_complex_01(self):
data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
x = copy(data)
t = toeplitz(x)
# Calling toeplitz should not change x.
assert_array_equal(x, data)
# According to the docstring, x should be the first column of t.
col0 = t[:, 0]
assert_array_equal(col0, data)
assert_array_equal(t[0, 1:], data[1:].conj())
def test_scalar_00(self):
"""Scalar arguments still produce a 2D array."""
t = toeplitz(10)
assert_array_equal(t, [[10]])
t = toeplitz(10, 20)
assert_array_equal(t, [[10]])
def test_scalar_01(self):
c = array([1, 2, 3])
t = toeplitz(c, 1)
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_02(self):
c = array([1, 2, 3])
t = toeplitz(c, array(1))
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_03(self):
c = array([1, 2, 3])
t = toeplitz(c, array([1]))
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_04(self):
r = array([10, 2, 3])
t = toeplitz(1, r)
assert_array_equal(t, [[1, 2, 3]])
| TestToeplitz |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 221578,
"end": 221658
} | class ____(TestCSRNonCanonical, TestCSRMatrix):
pass
| TestCSRNonCanonicalMatrix |
python | great-expectations__great_expectations | tests/integration/metrics/column/test_values_not_match_regex_values.py | {
"start": 823,
"end": 3342
} | class ____:
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES_EXCEPT_SNOWFLAKE,
data=DATA_FRAME,
)
def test_partial_match_characters(self, batch_for_datasource: Batch) -> None:
metric = ColumnValuesNotMatchRegexValues(column=COLUMN_NAME, regex="ab")
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesNotMatchRegexValuesResult)
# Expect values that DO NOT contain 'ab'
assert sorted(metric_result.value) == ["def", "ghi"]
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME,
)
def test_special_characters(self, batch_for_datasource: Batch) -> None:
metric = ColumnValuesNotMatchRegexValues(column=COLUMN_NAME, regex="^(a|d).+")
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesNotMatchRegexValuesResult)
# Expect values that DO NOT start with 'a' or 'd'
assert sorted(metric_result.value) == ["1ab2", "ghi"]
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME_WITH_LOTS_OF_VALUES,
)
def test_default_limit(self, batch_for_datasource: Batch) -> None:
# Use a regex that matches nothing to get all values back
metric = ColumnValuesNotMatchRegexValues(column=COLUMN_NAME, regex=MATCH_NONE_REGEX)
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesNotMatchRegexValuesResult)
# Should return up to the default limit (20)
assert len(metric_result.value) == 20
assert all(val == "A" for val in metric_result.value)
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME_WITH_LOTS_OF_VALUES,
)
def test_custom_limit(self, batch_for_datasource: Batch) -> None:
limit = 7
# Use a regex that matches nothing to get all values back
metric = ColumnValuesNotMatchRegexValues(
column=COLUMN_NAME, regex=MATCH_NONE_REGEX, limit=limit
)
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesNotMatchRegexValuesResult)
assert len(metric_result.value) == limit
assert all(val == "A" for val in metric_result.value)
| TestColumnValuesNotMatchRegexValues |
python | django__django | tests/expressions/models.py | {
"start": 1783,
"end": 2237
} | class ____(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
estimated_time = models.DurationField()
start = models.DateTimeField()
end = models.DateTimeField()
scalar = models.IntegerField(null=True)
class Meta:
db_table = "expressions_ExPeRiMeNt"
ordering = ("name",)
def duration(self):
return self.end - self.start
| Experiment |
python | astropy__astropy | astropy/units/tests/test_format.py | {
"start": 11987,
"end": 12472
} | class ____(RoundtripBase):
format_ = u_format.Generic
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u.__dict__.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
ids=str,
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format="unicode")
self.check_roundtrip_decompose(unit)
| TestRoundtripGeneric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.