language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aio-libs__aiohttp | aiohttp/compression_utils.py | {
"start": 1902,
"end": 1999
} | class ____(TypedDict, total=False):
wbits: int
strategy: int
level: int
| CompressObjArgs |
python | optuna__optuna | optuna/distributions.py | {
"start": 17111,
"end": 27625
} | class ____(BaseDistribution):
"""A categorical distribution.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_categorical`, and
passed to :mod:`~optuna.samplers` in general.
Args:
choices:
Parameter value candidates. ``choices`` must have one element at least.
.. note::
Not all types are guaranteed to be compatible with all storages. It is recommended to
restrict the types of the choices to :obj:`None`, :class:`bool`, :class:`int`,
:class:`float` and :class:`str`.
Attributes:
choices:
Parameter value candidates.
"""
def __init__(self, choices: Sequence[CategoricalChoiceType]) -> None:
if len(choices) == 0:
raise ValueError("The `choices` must contain one or more elements.")
for choice in choices:
if choice is not None and not isinstance(choice, (bool, int, float, str)):
message = (
"Choices for a categorical distribution should be a tuple of None, bool, int, "
"float and str for persistent storage but contains "
f"{choice} which is of type {type(choice).__name__}."
)
optuna_warn(message)
self.choices = tuple(choices)
def to_external_repr(self, param_value_in_internal_repr: float) -> CategoricalChoiceType:
return self.choices[int(param_value_in_internal_repr)]
def to_internal_repr(self, param_value_in_external_repr: CategoricalChoiceType) -> float:
try:
# NOTE(nabenabe): With this implementation, we cannot distinguish some values
# such as True and 1, or 1.0 and 1. For example, if choices=[True, 1] and external_repr
# is 1, this method wrongly returns 0 instead of 1. However, we decided to accept this
# bug for such exceptional choices for less complexity and faster processing.
return self.choices.index(param_value_in_external_repr)
except ValueError: # ValueError: param_value_in_external_repr is not in choices.
# ValueError also happens if external_repr is nan or includes precision error in float.
for index, choice in enumerate(self.choices):
if _categorical_choice_equal(param_value_in_external_repr, choice):
return index
raise ValueError(f"'{param_value_in_external_repr}' not in {self.choices}.")
def single(self) -> bool:
return len(self.choices) == 1
def _contains(self, param_value_in_internal_repr: float) -> bool:
index = int(param_value_in_internal_repr)
return 0 <= index < len(self.choices)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, BaseDistribution):
return NotImplemented
if not isinstance(other, self.__class__):
return False
if self.__dict__.keys() != other.__dict__.keys():
return False
for key, value in self.__dict__.items():
if key == "choices":
if len(value) != len(getattr(other, key)):
return False
for choice, other_choice in zip(value, getattr(other, key)):
if not _categorical_choice_equal(choice, other_choice):
return False
else:
if value != getattr(other, key):
return False
return True
__hash__ = BaseDistribution.__hash__
DISTRIBUTION_CLASSES = (
IntDistribution,
IntLogUniformDistribution,
IntUniformDistribution,
FloatDistribution,
UniformDistribution,
LogUniformDistribution,
DiscreteUniformDistribution,
CategoricalDistribution,
)
def json_to_distribution(json_str: str) -> BaseDistribution:
"""Deserialize a distribution in JSON format.
Args:
json_str: A JSON-serialized distribution.
Returns:
A deserialized distribution.
"""
json_dict = json.loads(json_str)
if "name" in json_dict:
if json_dict["name"] == CategoricalDistribution.__name__:
json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"])
for cls in DISTRIBUTION_CLASSES:
if json_dict["name"] == cls.__name__:
return cls(**json_dict["attributes"])
raise ValueError(f"Unknown distribution class: {json_dict['name']}")
else:
# Deserialize a distribution from an abbreviated format.
if json_dict["type"] == "categorical":
return CategoricalDistribution(json_dict["choices"])
elif json_dict["type"] in ("float", "int"):
low = json_dict["low"]
high = json_dict["high"]
step = json_dict.get("step")
log = json_dict.get("log", False)
if json_dict["type"] == "float":
return FloatDistribution(low, high, log=log, step=step)
else:
if step is None:
step = 1
return IntDistribution(low=low, high=high, log=log, step=step)
raise ValueError(f"Unknown distribution type: {json_dict['type']}")
def distribution_to_json(dist: BaseDistribution) -> str:
"""Serialize a distribution to JSON format.
Args:
dist: A distribution to be serialized.
Returns:
A JSON string of a given distribution.
"""
return json.dumps({"name": dist.__class__.__name__, "attributes": dist._asdict()})
def check_distribution_compatibility(
dist_old: BaseDistribution, dist_new: BaseDistribution
) -> None:
"""A function to check compatibility of two distributions.
It checks whether ``dist_old`` and ``dist_new`` are the same kind of distributions.
If ``dist_old`` is :class:`~optuna.distributions.CategoricalDistribution`,
it further checks ``choices`` are the same between ``dist_old`` and ``dist_new``.
Note that this method is not supposed to be called by library users.
Args:
dist_old:
A distribution previously recorded in storage.
dist_new:
A distribution newly added to storage.
"""
if dist_old.__class__ != dist_new.__class__:
raise ValueError("Cannot set different distribution kind to the same parameter name.")
if isinstance(dist_old, (FloatDistribution, IntDistribution)):
# For mypy.
assert isinstance(dist_new, (FloatDistribution, IntDistribution))
if dist_old.log != dist_new.log:
raise ValueError("Cannot set different log configuration to the same parameter name.")
if not isinstance(dist_old, CategoricalDistribution):
return
if not isinstance(dist_new, CategoricalDistribution):
return
if dist_old != dist_new:
raise ValueError(
CategoricalDistribution.__name__ + " does not support dynamic value space."
)
def _adjust_discrete_uniform_high(low: float, high: float, step: float) -> float:
d_high = decimal.Decimal(str(high))
d_low = decimal.Decimal(str(low))
d_step = decimal.Decimal(str(step))
d_r = d_high - d_low
if d_r % d_step != decimal.Decimal("0"):
old_high = high
high = float((d_r // d_step) * d_step + d_low)
optuna_warn(
f"The distribution is specified by [{low}, {old_high}] and {step=}, but the range is "
f"not divisible by `step`. It will be replaced with [{low}, {high}]."
)
return high
def _adjust_int_uniform_high(low: int, high: int, step: int) -> int:
r = high - low
if r % step != 0:
old_high = high
high = r // step * step + low
optuna_warn(
f"The distribution is specified by [{low}, {old_high}] and {step=}, but the range is "
f"not divisible by `step`. It will be replaced with [{low}, {high}]."
)
return high
def _get_single_value(distribution: BaseDistribution) -> int | float | CategoricalChoiceType:
assert distribution.single()
if isinstance(
distribution,
(
FloatDistribution,
IntDistribution,
),
):
return distribution.low
elif isinstance(distribution, CategoricalDistribution):
return distribution.choices[0]
assert False
# TODO(himkt): Remove this method with the deletion of deprecated distributions.
# https://github.com/optuna/optuna/issues/2941
def _convert_old_distribution_to_new_distribution(
distribution: BaseDistribution,
suppress_warning: bool = False,
) -> BaseDistribution:
new_distribution: BaseDistribution
# Float distributions.
if isinstance(distribution, UniformDistribution):
new_distribution = FloatDistribution(
low=distribution.low,
high=distribution.high,
log=False,
step=None,
)
elif isinstance(distribution, LogUniformDistribution):
new_distribution = FloatDistribution(
low=distribution.low,
high=distribution.high,
log=True,
step=None,
)
elif isinstance(distribution, DiscreteUniformDistribution):
new_distribution = FloatDistribution(
low=distribution.low,
high=distribution.high,
log=False,
step=distribution.q,
)
# Integer distributions.
elif isinstance(distribution, IntUniformDistribution):
new_distribution = IntDistribution(
low=distribution.low,
high=distribution.high,
log=False,
step=distribution.step,
)
elif isinstance(distribution, IntLogUniformDistribution):
new_distribution = IntDistribution(
low=distribution.low,
high=distribution.high,
log=True,
step=distribution.step,
)
# Categorical distribution.
else:
new_distribution = distribution
if new_distribution != distribution and not suppress_warning:
message = (
f"{distribution} is deprecated and internally converted to"
f" {new_distribution}. See https://github.com/optuna/optuna/issues/2941."
)
optuna_warn(message, FutureWarning)
return new_distribution
def _is_distribution_log(distribution: BaseDistribution) -> bool:
if isinstance(distribution, (FloatDistribution, IntDistribution)):
return distribution.log
return False
| CategoricalDistribution |
python | numpy__numpy | numpy/lib/tests/test_recfunctions.py | {
"start": 796,
"end": 16876
} | class ____:
# Misc tests
def test_zip_descr(self):
# Test zip_descr
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
control = np.array([(), ()], dtype=[])
assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ())))
ndtype = np.dtype([])
test = get_names(ndtype)
assert_equal(test, ())
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b'))
ndtype = np.dtype([])
test = get_names_flat(ndtype)
assert_equal(test, ())
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
# 0 fields
ndtype = np.dtype([])
test = get_fieldstructure(ndtype)
assert_equal(test, {})
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)")
def test_structured_to_unstructured(self, tmp_path):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4, 5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([3., 5.5, 9., 11.]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203
c = np.arange(20).reshape((4, 5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3., 5.5, 9., 11.]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(np.shares_memory(dd, d))
assert_(np.shares_memory(ddd, d))
# check that reversing the order of attributes works
dd_attrib_rev = structured_to_unstructured(d[['z', 'x']])
assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]])
assert_(np.shares_memory(dd_attrib_rev, d))
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(np.shares_memory(dd, d))
assert_(np.shares_memory(ddd, d))
# check that reversing with sub-arrays works as expected
d_rev = d[::-1]
dd_rev = structured_to_unstructured(d_rev)
assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14],
[1, 2, 3, 4, 5, 6, 7]])
# check that sub-arrays keep the order of their values
d_attrib_rev = d[['x2', 'x1', 'x0']]
dd_attrib_rev = structured_to_unstructured(d_attrib_rev)
assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1],
[11, 12, 13, 14, 9, 10, 8]])
# with ignored field at the end
d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32),
(8, [9, 10], [[11, 12], [13, 14]], 64)],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
('x2', ('i4', (2, 2))), ('ignored', 'u1')])
dd = structured_to_unstructured(d[['x0', 'x1', 'x2']])
assert_(np.shares_memory(dd, d))
assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 13, 14]])
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
# test nested combinations of subarrays and structured arrays, gh-13333
def subarray(dt, shape):
return np.dtype((dt, shape))
def structured(*dts):
return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)])
def inspect(dt, dtype=None):
arr = np.zeros((), dt)
ret = structured_to_unstructured(arr, dtype=dtype)
backarr = unstructured_to_structured(ret, dt)
return ret.shape, ret.dtype, backarr.dtype
dt = structured(subarray(structured(np.int32, np.int32), 3))
assert_equal(inspect(dt), ((6,), np.int32, dt))
dt = structured(subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((4,), np.int32, dt))
dt = structured(np.int32)
assert_equal(inspect(dt), ((1,), np.int32, dt))
dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((5,), np.int32, dt))
dt = structured()
assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
# these currently don't work, but we may make it work in the future
assert_raises(NotImplementedError, structured_to_unstructured,
np.zeros(3, dt), dtype=np.int32)
assert_raises(NotImplementedError, unstructured_to_structured,
np.zeros((3, 0), dtype=np.int32))
# test supported ndarray subclasses
d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])
dd_expected = structured_to_unstructured(d_plain, copy=True)
# recarray
d = d_plain.view(np.recarray)
dd = structured_to_unstructured(d, copy=False)
ddd = structured_to_unstructured(d, copy=True)
assert_(np.shares_memory(d, dd))
assert_(type(dd) is np.recarray)
assert_(type(ddd) is np.recarray)
assert_equal(dd, dd_expected)
assert_equal(ddd, dd_expected)
# memmap
d = np.memmap(tmp_path / 'memmap',
mode='w+',
dtype=d_plain.dtype,
shape=d_plain.shape)
d[:] = d_plain
dd = structured_to_unstructured(d, copy=False)
ddd = structured_to_unstructured(d, copy=True)
assert_(np.shares_memory(d, dd))
assert_(type(dd) is np.memmap)
assert_(type(ddd) is np.memmap)
assert_equal(dd, dd_expected)
assert_equal(ddd, dd_expected)
def test_unstructured_to_structured(self):
# test if dtype is the args of np.dtype
a = np.zeros((20, 2))
test_dtype_args = [('x', float), ('y', float)]
test_dtype = np.dtype(test_dtype_args)
field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now
field2 = unstructured_to_structured(a, dtype=test_dtype) # before
assert_equal(field1, field2)
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1, 2), (3, 4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
| TestRecFunctions |
python | getsentry__sentry | src/sentry/integrations/github/handlers/github_handler.py | {
"start": 404,
"end": 554
} | class ____(TicketingActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.GITHUB
| GithubActionHandler |
python | doocs__leetcode | solution/0200-0299/0276.Paint Fence/Solution.py | {
"start": 0,
"end": 257
} | class ____:
def numWays(self, n: int, k: int) -> int:
f = [0] * n
g = [0] * n
f[0] = k
for i in range(1, n):
f[i] = (f[i - 1] + g[i - 1]) * (k - 1)
g[i] = f[i - 1]
return f[-1] + g[-1]
| Solution |
python | kennethreitz__tablib | src/tablib/packages/dbfpy/header.py | {
"start": 696,
"end": 9221
} | class ____:
"""Dbf header definition.
For more information about dbf header format visit
`http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_STRUCT`
Examples:
Create an empty dbf header and add some field definitions:
dbfh = DbfHeader()
dbfh.addField(("name", "C", 10))
dbfh.addField(("date", "D"))
dbfh.addField(DbfNumericFieldDef("price", 5, 2))
Create a dbf header with field definitions:
dbfh = DbfHeader([
("name", "C", 10),
("date", "D"),
DbfNumericFieldDef("price", 5, 2),
])
"""
__slots__ = ("signature", "fields", "lastUpdate", "recordLength",
"recordCount", "headerLength", "changed", "_ignore_errors")
# instance construction and initialization methods
def __init__(self, fields=None, headerLength=0, recordLength=0,
recordCount=0, signature=0x03, lastUpdate=None, ignoreErrors=False):
"""Initialize instance.
Arguments:
fields:
a list of field definitions;
recordLength:
size of the records;
headerLength:
size of the header;
recordCount:
number of records stored in DBF;
signature:
version number (aka signature). using 0x03 as a default meaning
"File without DBT". for more information about this field visit
``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET``
lastUpdate:
date of the DBF's update. this could be a string ('yymmdd' or
'yyyymmdd'), timestamp (int or float), datetime/date value,
a sequence (assuming (yyyy, mm, dd, ...)) or an object having
callable ``ticks`` field.
ignoreErrors:
error processing mode for DBF fields (boolean)
"""
self.signature = signature
if fields is None:
self.fields = []
else:
self.fields = list(fields)
self.lastUpdate = getDate(lastUpdate)
self.recordLength = recordLength
self.headerLength = headerLength
self.recordCount = recordCount
self.ignoreErrors = ignoreErrors
# XXX: I'm not sure this is safe to
# initialize `self.changed` in this way
self.changed = bool(self.fields)
# @classmethod
def fromString(cls, string):
"""Return header instance from the string object."""
return cls.fromStream(io.StringIO(str(string)))
fromString = classmethod(fromString)
# @classmethod
def fromStream(cls, stream):
"""Return header object from the stream."""
stream.seek(0)
first_32 = stream.read(32)
if type(first_32) != bytes:
_data = bytes(first_32, sys.getfilesystemencoding())
_data = first_32
(_cnt, _hdrLen, _recLen) = struct.unpack("<I2H", _data[4:12])
# reserved = _data[12:32]
_year = _data[1]
if _year < 80:
# dBase II started at 1980. It is quite unlikely
# that actual last update date is before that year.
_year += 2000
else:
_year += 1900
# create header object
_obj = cls(None, _hdrLen, _recLen, _cnt, _data[0],
(_year, _data[2], _data[3]))
# append field definitions
# position 0 is for the deletion flag
_pos = 1
_data = stream.read(1)
while _data != b'\r':
_data += stream.read(31)
_fld = fields.lookupFor(_data[11]).fromString(_data, _pos)
_obj._addField(_fld)
_pos = _fld.end
_data = stream.read(1)
return _obj
fromStream = classmethod(fromStream)
# properties
year = property(lambda self: self.lastUpdate.year)
month = property(lambda self: self.lastUpdate.month)
day = property(lambda self: self.lastUpdate.day)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on self and all fields"""
self._ignore_errors = value = bool(value)
for _field in self.fields:
_field.ignoreErrors = value
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
# object representation
def __repr__(self):
_rv = """\
Version (signature): 0x%02x
Last update: %s
Header length: %d
Record length: %d
Record count: %d
FieldName Type Len Dec
""" % (self.signature, self.lastUpdate, self.headerLength,
self.recordLength, self.recordCount)
_rv += "\n".join(
["%10s %4s %3s %3s" % _fld.fieldInfo() for _fld in self.fields]
)
return _rv
# internal methods
def _addField(self, *defs):
"""Internal variant of the `addField` method.
This method doesn't set `self.changed` field to True.
Return value is a length of the appended records.
Note: this method doesn't modify ``recordLength`` and
``headerLength`` fields. Use `addField` instead of this
method if you don't exactly know what you're doing.
"""
# insure we have dbf.DbfFieldDef instances first (instantiation
# from the tuple could raise an error, in such a case I don't
# wanna add any of the definitions -- all will be ignored)
_defs = []
_recordLength = 0
for _def in defs:
if isinstance(_def, fields.DbfFieldDef):
_obj = _def
else:
(_name, _type, _len, _dec) = (tuple(_def) + (None,) * 4)[:4]
_cls = fields.lookupFor(_type)
_obj = _cls(_name, _len, _dec, ignoreErrors=self._ignore_errors)
_recordLength += _obj.length
_defs.append(_obj)
# and now extend field definitions and
# update record length
self.fields += _defs
return _recordLength
# interface methods
def addField(self, *defs):
"""Add field definition to the header.
Examples:
dbfh.addField(
("name", "C", 20),
dbf.DbfCharacterFieldDef("surname", 20),
dbf.DbfDateFieldDef("birthdate"),
("member", "L"),
)
dbfh.addField(("price", "N", 5, 2))
dbfh.addField(dbf.DbfNumericFieldDef("origprice", 5, 2))
"""
_oldLen = self.recordLength
self.recordLength += self._addField(*defs)
if not _oldLen:
self.recordLength += 1
# XXX: may be just use:
# self.recordeLength += self._addField(*defs) + bool(not _oldLen)
# recalculate headerLength
self.headerLength = 32 + (32 * len(self.fields)) + 1
self.changed = True
def write(self, stream):
"""Encode and write header to the stream."""
stream.seek(0)
stream.write(self.toString())
fields = [_fld.toString() for _fld in self.fields]
stream.write(''.join(fields).encode(sys.getfilesystemencoding()))
stream.write(b'\x0D') # cr at end of all header data
self.changed = False
def toString(self):
"""Returned 32 chars length string with encoded header."""
return struct.pack("<4BI2H",
self.signature,
self.year - 1900,
self.month,
self.day,
self.recordCount,
self.headerLength,
self.recordLength) + (b'\x00' * 20)
# TODO: figure out if bytes(utf-8) is correct here.
def setCurrentDate(self):
"""Update ``self.lastUpdate`` field with current date value."""
self.lastUpdate = datetime.date.today()
def __getitem__(self, item):
"""Return a field definition by numeric index or name string"""
if isinstance(item, str):
_name = item.upper()
for _field in self.fields:
if _field.name == _name:
return _field
else:
raise KeyError(item)
else:
# item must be field index
return self.fields[item]
# vim: et sts=4 sw=4 :
| DbfHeader |
python | ansible__ansible | lib/ansible/executor/playbook_executor.py | {
"start": 1565,
"end": 14270
} | class ____:
"""
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
"""
def __init__(self, playbooks, inventory, variable_manager, loader, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self.passwords = passwords
self._unreachable_hosts = dict()
if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \
context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'):
self._tqm = None
else:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=self.passwords,
forks=context.CLIARGS.get('forks'),
)
def run(self):
"""
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
"""
result = 0
entrylist = []
entry = {}
try:
# preload become/connection/shell to set config defs cached
list(connection_loader.all(class_only=True))
list(shell_loader.all(class_only=True))
list(become_loader.all(class_only=True))
for playbook in self._playbooks:
# deal with FQCN
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_path = resource[1]
playbook_collection = resource[2]
else:
playbook_path = playbook
# not fqcn, but might still be collection playbook
playbook_collection = _get_collection_name_from_path(playbook)
if playbook_collection:
display.v("running playbook inside collection {0}".format(playbook_collection))
AnsibleCollectionConfig.default_collection = playbook_collection
else:
AnsibleCollectionConfig.default_collection = None
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
# Allow variables to be used in vars_prompt fields.
all_vars = self._variable_manager.get_vars(play=play)
templar = TemplateEngine(loader=self._loader, variables=all_vars)
setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
# FIXME: this should be a play 'sub object' like loop_control
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = boolean(var.get("private", True))
confirm = boolean(var.get("confirm", False))
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
unsafe = boolean(var.get("unsafe", False))
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
default, unsafe)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Post validate so any play level variables are templated
all_vars = self._variable_manager.get_vars(play=play)
templar = TemplateEngine(loader=self._loader, variables=all_vars)
play.post_validate(templar)
if context.CLIARGS['syntax']:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
try:
result = self._tqm.run(play=play)
except AnsibleEndPlay as e:
result = e.result
break
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here and break out if the entire batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.RETRY_FILES_SAVE_PATH
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, ext) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if context.CLIARGS['syntax']:
display.display("No issues encountered")
return result
if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
display.error(
"No matching task \"%s\" found."
" Note: --start-at-task can only follow static includes."
% context.CLIARGS['start_at_task']
)
return result
def _get_serialized_batches(self, play):
"""
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
"""
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts, order=play.order)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
"""
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
"""
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e)))
return False
return True
| PlaybookExecutor |
python | pytorch__pytorch | test/test_transformers.py | {
"start": 10990,
"end": 67412
} | class ____(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
@onlyCUDA
@unittest.skip("4D mask not supported yet - activate when 4D mask supported")
def test_self_attn_TxT_attn_mask(self, device):
embed_dim = 16
num_heads = 4
batch_size = 10
tgt_len = 16
query = torch.rand(batch_size, tgt_len, embed_dim, device=device) # [N, T, D]
attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0)
attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda()
mta_model.eval()
# Generate 3D results
with torch.inference_mode():
output_mask_4d = mta_model(query, query, query, attn_mask=attn_mask_4d)[0]
output_mask_4d = output_mask_4d.transpose(0, 1) # [N, T, D]
output_mask_TxT = mta_model(query, query, query, attn_mask=attn_mask)[0]
output_mask_TxT = output_mask_TxT.transpose(0, 1) # [N, T, D]
self.assertEqual(output_mask_4d, output_mask_TxT)
@slowTest
def test_train_with_pad_and_catch_error(self, device):
iters = 100
pad_mask = torch.tensor([[1, 1, 0, 0]], dtype=torch.bool).to(device)
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=2,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
for _ in range(iters):
encoder.train()
optimizer.zero_grad()
inputs = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
outputs = encoder(inputs, src_key_padding_mask=pad_mask)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
with torch.no_grad():
test = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
# Expect uint8 type not supported
e = None
try:
encoder(test, src_key_padding_mask=pad_mask.to(torch.uint8))
except AssertionError:
continue
self.assertFalse(e, "Failed to catch unsupported uint8 type exception")
test_train_bool = encoder(test, src_key_padding_mask=pad_mask)
encoder.eval()
# Expect long type not supported
e = None
try:
encoder(test, src_key_padding_mask=pad_mask.to(torch.int64))
except AssertionError as e:
continue
self.assertFalse(e, "Failed to catch unsupported Long type exception")
test_eval_bool = encoder(test, src_key_padding_mask=pad_mask)
l1_bool = nn.L1Loss()(test_train_bool[:, 0:2, :], test_eval_bool[:, 0:2, :]).item()
self.assertTrue(l1_bool < 1e-4, "Eval/Train difference in pad_mask BOOL")
@tf32_on_and_off(0.001)
@parametrize("attn_mask_dim", [2, 3, None])
@parametrize("key_padding_mask_dim", [2, None])
@parametrize("mask_dtype", [torch.bool, torch.float32])
def test_multiheadattention_fastpath_attn_mask(self, device, attn_mask_dim, key_padding_mask_dim, mask_dtype):
# MHA converts all
with torch.no_grad():
B = 2
L = 4
D = 8
H = 4
if attn_mask_dim == 2:
attn_mask = make_tensor((L, L), dtype=mask_dtype, device=device)
elif attn_mask_dim == 3:
attn_mask = make_tensor((B, 1, L, L), dtype=mask_dtype, device=device).expand(B, H, L, L).reshape(B * H, L, L)
elif attn_mask_dim is None:
attn_mask = None
if key_padding_mask_dim == 2:
key_padding_mask = make_tensor((B, L), dtype=mask_dtype, device=device)
elif key_padding_mask_dim is None:
key_padding_mask = None
mha = nn.MultiheadAttention(D, H, batch_first=True, device=device)
X = torch.randn(B, L, D, device=device)
mha.train() # disable fast path
out, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
mha.eval() # enable fast path
out_fp, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
# The FP kernel will return NaNs while the sdpa kernel which is ran when the fast path is turned off returns 0 instead
# of NaNs for fully masked rows
self.assertEqual(out, out_fp.nan_to_num())
@parametrize("nhead", [1, 4, 8])
def test_transformerencoderlayer_src_mask(self, device, nhead):
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
model = torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
model(src, src_mask=src_mask)
model.eval()
with torch.no_grad():
model(src, src_mask=src_mask)
@parametrize("nhead", [3, 4])
def test_transformerencoderlayer_no_fastpath_with_hooks(self, device, nhead):
batch_size = 2
seqlen = 4
d_model = 12
model = torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=d_model,
batch_first=True).to(device).eval()
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
cache = []
# forward hook to save output
def hook(module, inputs, output):
cache.append(output[0].detach())
# register hook to get the output of the self-attention layer
handle = model.self_attn.register_forward_hook(hook)
# forward pass
with torch.inference_mode():
model(src)
# output of the self-attention layer
assert len(cache) == 1, f"Expected 1 output, got {len(cache)}"
# remove hook
handle.remove()
@skipIfRocmArch(MI300_ARCH)
@tf32_on_and_off(0.001)
@parametrize("use_torchscript", [False])
@parametrize("enable_nested_tensor", [True, False])
@parametrize("use_autocast", [True, False])
@parametrize("d_model", [12, 256])
def test_transformerencoder_fastpath(self, device, use_torchscript, enable_nested_tensor, use_autocast, d_model):
"""
Test TransformerEncoder fastpath output matches slowpath output
"""
torch.manual_seed(1234)
nhead = 4
dim_feedforward = d_model
batch_first = True
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=batch_first),
num_layers=2,
enable_nested_tensor=enable_nested_tensor
).to(device).eval()
if use_torchscript:
model = torch.jit.script(model)
# each input is (input, mask)
input_mask_pairs = [
(
torch.rand(3, 2, d_model),
[
[0, 1],
[0, 1],
[1, 1]
]
),
(
torch.rand(2, 100, d_model),
[
[0] * 98 + [1] * 2,
[0] * 90 + [1] * 10
]
),
# softmax.cu switches from fast->slowpath at masked seqlen 1024. test 1024.
(
torch.rand(2, 1024, d_model),
[
[0] * 1020 + [1] * 4,
[0] * 1024,
]
),
(
torch.rand(1, 1026, d_model),
[[0] * 1024 + [1] * 2]
),
# softmax.cu switches from fast->slowpath at masked seqlen 1024. test range of masks above 1024.
(
torch.rand(4, 1040, d_model),
[
[0] * 1024 + [1] * 16,
[0] * 1025 + [1] * 15,
[0] * 1031 + [1] * 9,
[0] * 1040,
]
)
]
input_mask_pairs = [
(
torch.tensor(pair[0], device=device, dtype=torch.get_default_dtype()), # float input
torch.tensor(pair[1], device=device, dtype=torch.bool) # bool mask
) for pair in input_mask_pairs
]
maybe_autocast = torch.autocast("cuda", dtype=torch.float16) if use_autocast else contextlib.nullcontext()
with maybe_autocast:
for input, src_key_padding_mask in input_mask_pairs:
with torch.no_grad():
fastpath_output = model(input, src_key_padding_mask=src_key_padding_mask)
slowpath_output = model(input, src_key_padding_mask=src_key_padding_mask) # reference
# Make sure fastpath_output is same shape as slowpath_output and mask.
# When enable_nested_tensor=true, fastpath_output may be smaller than input tensor.
# Eg if input bs=1, seqlen=6, and we mask out 2 tokens, fastpath_output will have bs=1, seqlen=4.
# Expand back to old size to match.
bs, true_seqlen, embed_dim = fastpath_output.shape
expanded_seqlen = src_key_padding_mask.shape[1]
fastpath_output_expanded = torch.zeros(bs, expanded_seqlen, embed_dim, device=device)
fastpath_output_expanded[:, :true_seqlen, :] = fastpath_output
# no garauntees on output corresponding to masked tokens, so they may vary between slow/fast path. set all to 0.
fastpath_output_expanded = fastpath_output_expanded.masked_fill(src_key_padding_mask.unsqueeze(-1), 0)
slowpath_output = slowpath_output.masked_fill(src_key_padding_mask.unsqueeze(-1), 0)
self.assertEqual(fastpath_output_expanded, slowpath_output)
@tf32_on_and_off(0.001)
@parametrize("with_no_grad", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [False])
def test_transformerencoder_square_input(self, with_no_grad, training, enable_nested_tensor, device):
"""
Test for edge cases when input of shape (batch size, sequence length, embedding dimension) has
batch size == sequence length
"""
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(d_model=4, nhead=2, dim_feedforward=16, dropout=0.0, batch_first=True),
num_layers=2,
enable_nested_tensor=enable_nested_tensor
).to(device)
with torch.no_grad():
# set constant weights of the model
for p in model.parameters():
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
if training:
model = model.train()
else:
model = model.eval()
x = torch.arange(0, 16).reshape(2, 2, 4).to(torch.get_default_dtype()).to(device)
src_mask = torch.Tensor([[0, 1], [0, 0]]).to(torch.bool).to(device)
if with_no_grad:
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
result = model(x, mask=src_mask)
ref_output = torch.Tensor([[[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351],
[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351]],
[[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689],
[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689]]]
).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
self.assertEqual(result, ref_output)
@parametrize("batch_first", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [True, False])
def test_transformerencoder(self, batch_first, training, enable_nested_tensor, device):
def get_a_test_layer(activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first,
).to(device)
with torch.no_grad():
# set constant weights of the model
for p in layer.parameters():
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerEncoder
activation = F.relu
def _test(batch_first, training, enable_nested_tensor):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(
encoder_layer, 1, enable_nested_tensor=enable_nested_tensor
).to(device)
if not training:
model = model.eval()
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0 src_mask
src_mask = torch.zeros([5, 5]).to(device) == 1
result = model(encoder_input, mask=src_mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 2, multiple layers no norm
model = nn.TransformerEncoder(encoder_layer, 2, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 3, multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm,
enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm,
enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# TODO: remove set default dtype to double by making ref_output more precise.
# Added because this test was copied from test_nn.py, which has default
# dtype double. If default dtype is float, tests will say tensors not close because
# ref output precision too low
with set_default_dtype(torch.double):
if training:
cm = contextlib.nullcontext()
else:
cm = torch.no_grad() # transformer fast path requires no grad
with cm:
_test(batch_first, training, enable_nested_tensor)
@unittest.skipIf(sys.version_info < (3, 11), "not supported on pre-3.11 Python")
def test_encoder_padding_and_src_mask_bool(self):
encoder_layer = nn.TransformerEncoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
encoder_norm = nn.LayerNorm(16)
encoder = nn.TransformerEncoder(
encoder_layer, 2, encoder_norm
)
inputs = torch.randn(2, 3, 16)
src_mask = torch.ones(3, 3, dtype=torch.bool).triu_(diagonal=1)
input_seq_len = torch.tensor([3, 2])
padding_mask = (
torch.arange(3)[None, :].cpu() >= input_seq_len[:, None]
)
with (self.assertNoLogs(None) if not TEST_WITH_TORCHDYNAMO else contextlib.nullcontext()):
encoder(
inputs,
mask=src_mask,
src_key_padding_mask=padding_mask,
)
@unittest.skipIf(sys.version_info < (3, 11), "not supported on pre-3.11 Python")
def test_decoder_padding_and_src_mask_bool(self):
def transformer_decoder(inputs, input_seq_len, memory):
decoder_layer = nn.TransformerDecoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
decoder_norm = nn.LayerNorm(16)
decoder = nn.TransformerDecoder(
decoder_layer, 2, decoder_norm
)
src_mask = torch.ones(
inputs.shape[1], inputs.shape[1], dtype=torch.bool
).triu_(diagonal=1)
padding_mask = (
torch.arange(inputs.shape[1])[None, :].cpu()
>= input_seq_len[:, None]
)
return decoder(
inputs,
memory,
tgt_mask=src_mask,
tgt_key_padding_mask=padding_mask,
memory_key_padding_mask=padding_mask,
)
inputs = torch.randn(2, 3, 16)
memory = torch.randn(2, 3, 16)
input_seq_len = torch.tensor([3, 2])
with self.assertNoLogs(None):
transformer_decoder(inputs, input_seq_len, memory)
def test_encoder_is_causal(self):
d_model = 3
layer = torch.nn.TransformerEncoderLayer(d_model, 1, 6, batch_first=True)
layer.eval()
x = torch.randn(1, 5, d_model)
mask = torch.nn.Transformer.generate_square_subsequent_mask(x.size(1))
is_causal_output = layer(x, src_mask=mask, is_causal=True)
masked_output = layer(x, src_mask=mask)
self.assertEqual(masked_output, is_causal_output)
@onlyCUDA
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Platform does not supposrt pre-SM80 hardware"
)
def test_math_backend_high_precision(self):
xq = torch.rand([1, 128, 2, 80], device="cuda", dtype=torch.bfloat16) * 5
xk = torch.rand([1, 128, 2, 80], device="cuda", dtype=torch.bfloat16) * 5
xv = torch.randn([1, 128, 2, 80], device="cuda", dtype=torch.bfloat16)
mask = None
def scaled_dot_product_attention(
xq: torch.Tensor, xk: torch.Tensor, xv: torch.Tensor, mask: Optional[torch.Tensor], backend: SDPBackend
) -> torch.Tensor:
n_rep = 1
xq, xk, xv = (tensor.transpose(1, 2) for tensor in (xq, xk, xv))
xk = xk.repeat_interleave(n_rep, dim=1)
xv = xv.repeat_interleave(n_rep, dim=1)
with sdpa_kernel(backends=[backend]):
attn_output = F.scaled_dot_product_attention(
xq, xk, xv, attn_mask=mask, dropout_p=0.0
)
return attn_output.transpose(1, 2)
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
sdp_math_low_prec_out = scaled_dot_product_attention(xq, xk, xv, mask, SDPBackend.MATH)
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(False)
sdp_math_high_prec_out = scaled_dot_product_attention(xq, xk, xv, mask, SDPBackend.MATH)
sdp_math_fp64_out_ref = scaled_dot_product_attention(
xq.double(), xk.double(), xv.double(), mask, SDPBackend.MATH
).bfloat16()
torch.testing.assert_close(sdp_math_high_prec_out, sdp_math_fp64_out_ref, atol=1e-2, rtol=1e-2)
with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close"):
torch.testing.assert_close(sdp_math_low_prec_out, sdp_math_fp64_out_ref, atol=1e-2, rtol=1e-2)
@onlyCUDA
@parametrize("nb_heads", [1, 8])
@parametrize("bias", [True, False])
def test_mha_native_args(self, nb_heads, bias):
B, L, F = 8, 100, 128
batch_first = True
fast_path = True
use_pad_mask = (bias % 2) == 1
mha = nn.MultiheadAttention(
embed_dim=F,
num_heads=nb_heads,
batch_first=batch_first,
bias=bias
).cuda()
mha.eval()
ctx = torch.no_grad if fast_path else contextlib.nullcontext
with ctx():
x = torch.randn(B, L, F).cuda()
if not batch_first:
x = x.transpose(0, 1)
pad_mask = None
if use_pad_mask:
pad_mask = torch.zeros((B, L), dtype=torch.bool).cuda()
mha(query=x, key=x, value=x, key_padding_mask=pad_mask)
def test_kpm_mask_trailing_column_with_nested_tensor(self, device):
encoder_layer = nn.TransformerEncoderLayer(
d_model=256,
nhead=4,
dim_feedforward=512,
activation='gelu',
norm_first=False,
batch_first=False,
)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, enable_nested_tensor=True).to(device)
x = torch.randn(10, 6, 256).to(device)
mask = torch.ones(6, 10)
mask[0, :] = 0 # here I masked 5 columns instead of just one
mask = mask.bool().to(device)
out = transformer_encoder(src=x, src_key_padding_mask=mask)
self.assertEqual(out.shape[1], 6)
# CPU unit test has_torch_functions in test environment,
# preventing successful completion
@onlyCUDA
def test_with_nested_tensor_input(self, device):
encoder_layer = nn.TransformerEncoderLayer(
d_model=256,
nhead=4,
dim_feedforward=512,
activation='gelu',
norm_first=False,
batch_first=True,
)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, enable_nested_tensor=True).to(device)
transformer_encoder.eval()
with torch.no_grad():
x = torch.randn(6, 10, 256).to(device)
mask = torch.ones(6, 10)
mask[0, 0:] = 0 # here I masked 5 columns instead of just one
mask[2, 2:] = 0 # here I masked 5 columns instead of just one
mask[4, 4:] = 0 # here I masked 5 columns instead of just one
mask[5, 8:] = 0 # here I masked 5 columns instead of just one
mask = mask.bool().to(device)
x = torch._nested_tensor_from_mask(x, mask.logical_not(), mask_check=False)
out = transformer_encoder(src=x, src_key_padding_mask=None)
self.assertEqual(out.is_nested, True)
def test_script_encoder_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
encoder = nn.TransformerEncoder(
MyCustomLayer(d_model=256, nhead=8), num_layers=6
).to(device=device)
torch.jit.script(encoder)
# brazenly adapted from test_transformerencoderlayer_src_mask to test execution of
# torchscripted transformerencoderlayer subclass
def test_transformerencoderlayer_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
nhead = 4
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
model = MyCustomLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
script_model = torch.jit.script(model)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
torch.manual_seed(42)
result = model(src, src_mask=src_mask)
torch.manual_seed(42)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
model.eval()
script_model = torch.jit.script(model)
with torch.no_grad():
result = model(src, src_mask=src_mask)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
def test_transformerencoderlayer_subclass_model(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
nhead = 4
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
layer = MyCustomLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True)
model = nn.TransformerEncoder(
layer, num_layers=6
).to(device=device)
script_model = torch.jit.script(model)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
torch.manual_seed(42)
result = model(src, mask=src_mask)
torch.manual_seed(42)
scripted_result = script_model(src, mask=src_mask)
self.assertEqual(result, scripted_result)
model.eval()
script_model = torch.jit.script(model)
with torch.no_grad():
result = model(src, mask=src_mask)
scripted_result = script_model(src, mask=src_mask)
self.assertEqual(result, scripted_result)
@onlyCUDA
@unittest.skipIf(not TEST_FAIRSEQ, "Fairseq not found")
def test_decoder_only_layer(self):
class FairseqDecoder(torch.nn.Module):
def __init__(
self,
embed_dim,
attention_heads,
ffn_embed_dim,
num_layers,
embedding_layer, # torch.nn.Embedding. Must have a padding_idx field
dropout=0,
normalize_before=False,
torch_encoder=None, # torch encoder that you can map weights from
activation="relu",
):
super().__init__()
cfg = fairseq_transformer.TransformerConfig()
cfg.decoder.embed_dim = embed_dim
cfg.decoder.output_dim = embed_dim
cfg.decoder.attention_heads = attention_heads
cfg.decoder.ffn_embed_dim = ffn_embed_dim
cfg.dropout = dropout
cfg.decoder.normalize_before = normalize_before
cfg.decoder.layers = num_layers
# make embedding behavior same as other encoders
cfg.no_token_positional_embeddings = True
cfg.no_scale_embedding = True
cfg.activation_fn = activation
dictionary = {} # TODO: verify what this is
self.decoder = fairseq_transformer.TransformerDecoder(
cfg,
dictionary,
embedding_layer,
no_encoder_attn=True,
output_projection=None,
)
if torch_encoder is not None:
self.decoder = torch_to_fairseq(torch_encoder, self.decoder) # noqa: F821
self.decoder = self.decoder.eval().cuda().half()
def forward(
self,
tokens,
src_lengths=None,
with_triangle_mask=False,
incremental_state=None,
):
return self.decoder(
prev_output_tokens=tokens,
encoder_out=None,
incremental_state=incremental_state,
features_only=True,
full_context_alignment=not with_triangle_mask,
alignment_layer=None,
alignment_heads=None,
src_lengths=src_lengths,
return_all_hiddens=False,
)[0]
@tf32_on_and_off(0.003)
@parametrize("batch_size", [0, 5])
@parametrize("input_dim,attn_mask_dim,is_causal",
[(3, None, False), (3, 2, False), (3, 2, True), (3, 3, False), (3, 3, True),
(4, None, False), (4, 2, False), (4, 2, True), (4, 4, False), (4, 4, True)],
name_fn=lambda input_dim, attn_dim, is_causal: (
f"{input_dim}D_input_dim_" + (
f"{attn_dim}D_{'causal_' if is_causal else ''}attn_mask"
if attn_dim is not None else "no_attn_mask")))
@parametrize("dropout_p", [0.0, 0.2, 0.5])
@sdpa_kernel(backends=[SDPBackend.MATH])
def test_scaled_dot_product_attention(self, device, batch_size, input_dim, attn_mask_dim, is_causal, dropout_p):
def sdp_ref(
q,
k,
v,
attn_mask=None,
dropout_p=0.0):
E = q.size(-1)
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
if attn_mask is not None:
attn = torch.baddbmm(attn_mask, q, k.transpose(-2, -1))
else:
attn = torch.bmm(q, k.transpose(-2, -1))
attn = torch.nn.functional.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = torch.nn.functional.dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output
# TODO: Support cross-device / dtype testing properly when instantiate_device_type_tests() is used.
dtypes = [torch.double, torch.float]
for dtype in dtypes:
N = batch_size
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N_prime, L, S, E = 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
# This test the fully masked out rows case
if torch.isnan(expected).any():
row_sums = attn_mask.sum(dim=-1)
masked_out_rows = (row_sums == 0)
for _ in range((input_dim - attn_mask_dim) - 1):
masked_out_rows = masked_out_rows.unsqueeze(0)
masked_out_rows = masked_out_rows.expand(expected.shape[:-1])
# Slice out the fully masked rows from expected and actual
expected_masked_out = expected[masked_out_rows]
actual_masked_out = actual[masked_out_rows]
expected_all_nan = torch.isnan(expected_masked_out).all()
actual_all_zero = (actual_masked_out.abs().sum() == 0)
self.assertTrue(expected_all_nan)
self.assertTrue(actual_all_zero)
return
self.assertEqual(actual, expected)
if attn_mask_dim is None:
q = q.double().clone()
k = k.double().clone()
v = v.double().clone()
q.requires_grad_()
k.requires_grad_()
v.requires_grad_()
assert gradcheck(lambda *args, **kwargs: wrapper_set_seed(sdp_ref, *args, **kwargs),
(q, k, v, attn_mask, dropout_p))
assert gradcheck(lambda *args, **kwargs:
wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs),
(q, k, v, attn_mask, dropout_p))
def test_incompatible_mask(self, device):
def ones_tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E)))
mha.out_proj.weight = Parameter(torch.ones((E, E)))
qkv = qkv.to(float)
kpm = ones_tensor(S, L) * float("-inf")
am = ones_tensor(L, L).to(bool)
def func():
return mha(qkv, qkv, qkv, need_weights=False, key_padding_mask=kpm, attn_mask=am)
self.assertRaises(RuntimeError, func)
@unittest.skipIf(TEST_WITH_CROSSREF, 'Fastpath not available with crossref')
@torch.no_grad()
def test_mask_check_fastpath(self):
"""
Test that fastpath is executed independently of the masks that are passed.
If the passed key padding mask is left aligned or mask_check=False, test that nested tensors are used
(sparsity fastpath), otherwise use fastpath with traditional tensors.
Also test that fast path is executed with both key padding mask and attention mask passed at the same time.
"""
x = torch.Tensor([[[1, 2], [3, 4], [5, 6]]]).to(torch.float)
def _test_fastpath(model, key_padding_mask, mock_return_value, attn_mask=None, nested_tensors=True):
with patch('torch._transformer_encoder_layer_fwd') as fastpath_mock:
fastpath_mock.return_value = mock_return_value
model(x, src_key_padding_mask=key_padding_mask, mask=attn_mask)
# If mock was called, fastpath was taken
self.assertTrue(fastpath_mock.called)
# If mock was called with nested tensors, sparsity fastpath was taken
for call_args, _ in fastpath_mock.call_args_list:
self.assertEqual(call_args[0].is_nested, nested_tensors)
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=True)
model.eval()
aligned_key_padding_mask = torch.Tensor([[0, 0, 1]]).to(torch.bool)
not_aligned_key_padding_mask = torch.Tensor([[1, 0, 1]]).to(torch.bool)
attn_mask = torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]).to(torch.bool)
nested_tensor_return_value = torch.nested.nested_tensor([torch.ones((2, 2), dtype=torch.float)])
tensor_return_value = torch.ones((1, 3, 2), dtype=torch.float)
# Left aligned mask results in sparsity fastpath
_test_fastpath(model, aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
# Not aligned mask results in fastpath
_test_fastpath(model, not_aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=False, mask_check=True)
model.eval()
# If nested tensor disabled, fastpath is always taken
_test_fastpath(model, aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
_test_fastpath(model, not_aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
# Fast path is taken if both attention mask and key padding mask are present
_test_fastpath(model, aligned_key_padding_mask, tensor_return_value, attn_mask=attn_mask, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=False)
model.eval()
# Mask check disabled results in sparisty fastpath, independently of the mask
_test_fastpath(model, aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
_test_fastpath(model, not_aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
# Test failing MHA when bias was NoneType
def test_bias_is_none(self):
x = torch.rand((1, 5, 10))
model = torch.nn.modules.activation.MultiheadAttention(10, 1, bias=False, batch_first=True)
model.eval()
model(x, x, x)
# completes without error
def test_transformer_bias_is_none(self, device):
batch_size = 2
seqlen = 3
d_model = 8
nhead = 4
encoder_layer = torch.nn.TransformerEncoderLayer(d_model, nhead, bias=False, batch_first=True, device=device)
encoder_layer.eval()
x = torch.randn(batch_size, seqlen, d_model, device=device)
# runs without error
encoder_layer(x)
with self.assertWarnsRegex(UserWarning, "encoder_layer.self_attn was passed bias=False"):
encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=1).eval()
encoder(x)
with self.assertWarnsRegex(UserWarning, "self_attn was passed bias=False"):
transformer = torch.nn.Transformer(
d_model=d_model, nhead=nhead, bias=False, batch_first=True, device=device
).eval()
transformer(x, x)
def test_train_with_is_causal(self, device):
# training with is_causal
S, L, E, H = 1, 2, 2, 1
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=H,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
encoder.train()
optimizer.zero_grad()
inputs = torch.randn(S, L, E).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
inputs.size(1), device=device
)
outputs = encoder(inputs, mask=mask, is_causal=True)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
# inference with is_causal
t_qvk = torch.randn((S, L, E), device=device, dtype=torch.float32)
mha = nn.MultiheadAttention(E, H).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
S, device=device
)
attn_out, _ = mha(t_qvk, t_qvk, t_qvk, attn_mask=mask, is_causal=True)
# Can't give only is_causal
with self.assertRaises(RuntimeError):
mha(t_qvk, t_qvk, t_qvk, is_causal=True)
# # Passing a causal mask sets is_causal to 1
causal_mask = torch.triu(
torch.ones(L, L, device=inputs.device) * float('-inf'), diagonal=1
).to(torch.bool)
mock_layer = MagicMock(torch.nn.MultiheadAttention(E, H), return_value=inputs)
encoder.layers[1] = mock_layer
outputs = encoder(inputs, mask=causal_mask)
mock_layer.assert_called_with(ANY, src_mask=ANY, is_causal=True, src_key_padding_mask=ANY)
# check expected numerical values with all kernels
self.is_causal_kernels([SDPBackend.MATH], device)
def is_causal_kernels(self, kernels, device):
def ones_tensor(*shape):
return torch.ones(shape, device=device, dtype=torch.float32).to(device)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H).to(device)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E), device=device))
mha.out_proj.weight = Parameter(torch.ones((E, E), device=device))
expected = torch.ones(size=(S, L, E)).to(device) * 16
mask = torch.nn.Transformer.generate_square_subsequent_mask(
qkv.size(1), device=device
)
for kernel in kernels:
with sdpa_kernel(backends=[kernel]):
actual, _ = mha(qkv, qkv, qkv, attn_mask=mask, need_weights=False, is_causal=True)
self.assertTrue(torch.equal(actual, expected))
if kernel != SDPBackend.MATH:
# fails with embedding size not multiple of 4
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
qkv_f, mha_f = ones_tensor(S, L, 2), nn.MultiheadAttention(2, H).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
qkv_f.size(1), device=device
)
_ = mha_f(qkv_f, qkv_f, qkv_f, attn_mask=mask, need_weights=False, is_causal=True)
torch.cuda.synchronize()
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Platform does not supposrt fused SDPA or pre-SM80 hardware"
)
def test_is_causal_gpu(self):
device = 'cuda'
self.is_causal_kernels([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION], device)
def test_script_mha_in_proj_weight_none(self):
mha = torch.nn.MultiheadAttention(
embed_dim=128, num_heads=8, kdim=256, vdim=256
).eval()
torch.jit.script(mha)
@unittest.skipIf(TEST_WITH_CROSSREF, 'Fastpath not available with crossref')
@torch.no_grad()
def test_disable_fastpath(self, device):
def _test_te_fastpath_called(model, args, kwargs=None, return_value=None, is_called=True):
if kwargs is None:
kwargs = {}
with patch('torch._transformer_encoder_layer_fwd') as fastpath_mock:
fastpath_mock.return_value = return_value
model(*args, **kwargs)
self.assertTrue(fastpath_mock.called == is_called)
def _test_mha_fastpath_called(model, args, kwargs=None, return_value=None, is_called=True):
if kwargs is None:
kwargs = {}
with patch('torch._native_multi_head_attention') as fastpath_mock:
fastpath_mock.return_value = return_value
model(*args, **kwargs)
self.assertTrue(fastpath_mock.called == is_called)
inp = torch.tensor([[[1, 2], [3, 4], [5, 6]]], dtype=torch.float32, device=device)
src_key_padding_mask = torch.tensor([[1, 0, 1]], dtype=torch.bool, device=device)
te_return_value = torch.ones((1, 3, 2), dtype=torch.float32)
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True)
te = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=True)
te = te.to(device).eval()
t = torch.nn.Transformer(d_model=2, nhead=2, batch_first=True, device=device).eval()
src = torch.tensor([[[0, 1], [2, 3], [4, 5]]], dtype=torch.float32, device=device)
tgt = torch.tensor([[[0, 1], [2, 3], [4, 5], [6, 7]]], dtype=torch.float32, device=device)
t_return_value = torch.ones((1, 3, 2), dtype=torch.float32, device=device)
mha = nn.MultiheadAttention(2, 2, batch_first=True, device=device).eval()
q = torch.tensor([[[0, 1], [2, 3]]], dtype=torch.float32, device=device)
mha_return_value = torch.ones((1, 3, 2), dtype=torch.float32, device=device)
_test_te_fastpath_called(
te, (inp,), kwargs={'src_key_padding_mask': src_key_padding_mask},
return_value=te_return_value, is_called=True
)
_test_te_fastpath_called(t, (src, tgt), return_value=t_return_value, is_called=True)
_test_mha_fastpath_called(mha, (q, q, q,), return_value=mha_return_value, is_called=True)
torch.backends.mha.set_fastpath_enabled(False)
_test_te_fastpath_called(
te, (inp,), kwargs={'src_key_padding_mask': src_key_padding_mask},
return_value=te_return_value, is_called=False
)
_test_te_fastpath_called(t, (src, tgt), return_value=t_return_value, is_called=False)
_test_mha_fastpath_called(mha, (q, q, q,), return_value=mha_return_value, is_called=False)
torch.backends.mha.set_fastpath_enabled(True)
_test_te_fastpath_called(
te, (inp,), kwargs={'src_key_padding_mask': src_key_padding_mask},
return_value=te_return_value, is_called=True
)
_test_te_fastpath_called(t, (src, tgt), return_value=t_return_value, is_called=True)
_test_mha_fastpath_called(mha, (q, q, q,), return_value=mha_return_value, is_called=True)
| TestTransformers |
python | pandas-dev__pandas | pandas/tests/indexing/test_loc.py | {
"start": 71172,
"end": 79921
} | class ____:
@td.skip_if_no("pyarrow")
def test_loc_setitem_with_expansion_preserves_ea_dtype(self):
# GH#41626 retain index.dtype in setitem-with-expansion
idx = Index([Timestamp(0).date()], dtype="date32[pyarrow]")
df = DataFrame({"A": range(1)}, index=idx)
item = Timestamp("1970-01-02").date()
df.loc[item] = 1
exp_index = Index([idx[0], item], dtype=idx.dtype)
tm.assert_index_equal(df.index, exp_index)
ser = df["A"].iloc[:-1]
ser.loc[item] = 1
tm.assert_index_equal(ser.index, exp_index)
def test_loc_setitem_with_expansion_large_dataframe(self, monkeypatch):
# GH#10692
size_cutoff = 50
with monkeypatch.context():
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)
result = DataFrame({"x": range(size_cutoff)}, dtype="int64")
result.loc[size_cutoff] = size_cutoff
expected = DataFrame({"x": range(size_cutoff + 1)}, dtype="int64")
tm.assert_frame_equal(result, expected)
def test_loc_setitem_empty_series(self):
# GH#5226
# partially set with an empty object series
ser = Series(dtype=object)
ser.loc[1] = 1
tm.assert_series_equal(ser, Series([1], index=range(1, 2)))
ser.loc[3] = 3
tm.assert_series_equal(ser, Series([1, 3], index=[1, 3]))
def test_loc_setitem_empty_series_float(self):
# GH#5226
# partially set with an empty object series
ser = Series(dtype=object)
ser.loc[1] = 1.0
tm.assert_series_equal(ser, Series([1.0], index=range(1, 2)))
ser.loc[3] = 3.0
tm.assert_series_equal(ser, Series([1.0, 3.0], index=[1, 3]))
def test_loc_setitem_empty_series_str_idx(self):
# GH#5226
# partially set with an empty object series
ser = Series(dtype=object)
ser.loc["foo"] = 1
tm.assert_series_equal(ser, Series([1], index=Index(["foo"])))
ser.loc["bar"] = 3
tm.assert_series_equal(ser, Series([1, 3], index=Index(["foo", "bar"])))
ser.loc[3] = 4
tm.assert_series_equal(ser, Series([1, 3, 4], index=Index(["foo", "bar", 3])))
def test_loc_setitem_incremental_with_dst(self):
# GH#20724
base = datetime(2015, 11, 1, tzinfo=gettz("US/Pacific"))
idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
result = Series([0], index=[idxs[0]])
for ts in idxs:
result.loc[ts] = 1
expected = Series(1, index=idxs)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"conv",
[
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
],
ids=["self", "to_datetime64", "to_pydatetime", "np.datetime64"],
)
def test_loc_setitem_datetime_keys_cast(self, conv, using_infer_string):
# GH#9516, GH#51363 changed in 3.0 to not cast on Index.insert
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame(
{"one": [100.0, 200.0]},
index=Index(
[conv(dt1), conv(dt2)], dtype=None if using_infer_string else object
),
columns=Index(["one"]),
)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_categorical_column_retains_dtype(self, ordered):
# GH16360
result = DataFrame({"A": [1]})
result.loc[:, "B"] = Categorical(["b"], ordered=ordered)
expected = DataFrame({"A": [1], "B": Categorical(["b"], ordered=ordered)})
tm.assert_frame_equal(result, expected)
def test_loc_setitem_with_expansion_and_existing_dst(self):
# GH#18308
start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid")
end = Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid")
ts = Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid")
idx = date_range(start, end, inclusive="left", freq="h")
assert ts not in idx # i.e. result.loc setitem is with-expansion
result = DataFrame(index=idx, columns=["value"])
result.loc[ts, "value"] = 12
expected = DataFrame(
[np.nan] * len(idx) + [12],
index=idx.append(DatetimeIndex([ts])),
columns=["value"],
dtype=object,
)
tm.assert_frame_equal(result, expected)
def test_setitem_with_expansion(self):
# indexing - setting an element
df = DataFrame(
data=to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# pre-2.0 trying to set a single element on a part of a different
# timezone converted to object; in 2.0 it retains dtype
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0].tz_convert("UTC"), df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + Timedelta("1s").as_unit("s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_loc_setitem_with_expansion_inf_upcast_empty(self):
# Test with np.inf in columns
df = DataFrame()
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
result = df.columns
expected = Index([0, 1, np.inf], dtype=np.float64)
tm.assert_index_equal(result, expected)
@pytest.mark.filterwarnings("ignore:indexing past lexsort depth")
@pytest.mark.parametrize("has_ref", [True, False])
def test_loc_setitem_with_expansion_nonunique_index(self, index, has_ref):
# GH#40096
if not len(index):
pytest.skip("Not relevant for empty Index")
index = index.repeat(2) # ensure non-unique
N = len(index)
arr = np.arange(N).astype(np.int64)
orig = DataFrame(arr, index=index)
# key that will requiring object-dtype casting in the index
key = "kapow"
assert key not in index # otherwise test is invalid
# TODO: using a tuple key breaks here in many cases
exp_index = index.insert(len(index), key)
if isinstance(index, MultiIndex):
assert exp_index[-1][0] == key
else:
assert exp_index[-1] == key
exp_data = np.arange(N + 1).astype(np.float64)
expected = DataFrame(exp_data, index=exp_index)
# Add new row, but no new columns
df = orig.copy()
if has_ref:
view = df[:]
df.loc[key, 0] = N
tm.assert_frame_equal(df, expected)
# add new row on a Series
ser = orig.copy()[0]
if has_ref:
view = ser[:]
ser.loc[key] = N
# the series machinery lets us preserve int dtype instead of float
expected = expected[0].astype(np.int64)
tm.assert_series_equal(ser, expected)
# add new row and new column
df = orig.copy()
if has_ref:
view = df[:] # noqa: F841
df.loc[key, 1] = N
expected = DataFrame(
{0: list(arr) + [np.nan], 1: [np.nan] * N + [float(N)]},
index=exp_index,
)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_with_expansion_preserves_nullable_int(
self, any_numeric_ea_dtype
):
# GH#42099
ser = Series([0, 1, 2, 3], dtype=any_numeric_ea_dtype)
df = DataFrame({"data": ser})
result = DataFrame(index=df.index)
result.loc[df.index, "data"] = ser
tm.assert_frame_equal(result, df, check_column_type=False)
result = DataFrame(index=df.index)
result.loc[df.index, "data"] = ser._values
tm.assert_frame_equal(result, df, check_column_type=False)
def test_loc_setitem_ea_not_full_column(self):
# GH#39163
df = DataFrame({"A": range(5)})
val = date_range("2016-01-01", periods=3, tz="US/Pacific")
df.loc[[0, 1, 2], "B"] = val
bex = val.append(DatetimeIndex([pd.NaT, pd.NaT], dtype=val.dtype))
expected = DataFrame({"A": range(5), "B": bex})
assert expected.dtypes["B"] == val.dtype
tm.assert_frame_equal(df, expected)
| TestLocSetitemWithExpansion |
python | walkccc__LeetCode | solutions/2904. Shortest and Lexicographically Smallest Beautiful String/2904.py | {
"start": 0,
"end": 616
} | class ____:
# Same as 76. Minimum Window Substring
def shortestBeautifulSubstring(self, s: str, k: int) -> str:
bestLeft = -1
minLength = len(s) + 1
ones = 0
l = 0
for r, c in enumerate(s):
if c == '1':
ones += 1
while ones == k:
if r - l + 1 < minLength:
bestLeft = l
minLength = r - l + 1
elif r - l + 1 == minLength and s[l:l + minLength] < s[bestLeft:bestLeft + minLength]:
bestLeft = l
if s[l] == '1':
ones -= 1
l += 1
return "" if bestLeft == -1 else s[bestLeft:bestLeft + minLength]
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datastore.py | {
"start": 6215,
"end": 10197
} | class ____(GoogleCloudBaseOperator):
"""
Import entities from Cloud Storage to Google Cloud Datastore.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreImportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: container in Cloud Storage to store data
:param file: path of the backup metadata file in the specified Cloud Storage bucket.
It should have the extension .overall_export_metadata
:param namespace: optional namespace of the backup metadata file in
the specified Cloud Storage bucket.
:param entity_filter: description of what data from the project is included in
the export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:param labels: client-assigned labels for cloud storage
:param datastore_conn_id: the name of the connection id to use
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"file",
"namespace",
"entity_filter",
"labels",
"impersonation_chain",
)
operator_extra_links = (CloudDatastoreImportExportLink(),)
def __init__(
self,
*,
bucket: str,
file: str,
namespace: str | None = None,
entity_filter: dict | None = None,
labels: dict | None = None,
datastore_conn_id: str = "google_cloud_default",
polling_interval_in_seconds: float = 10,
project_id: str = PROVIDE_PROJECT_ID,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.bucket = bucket
self.file = file
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.project_id = project_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
}
def execute(self, context: Context):
self.log.info("Importing data from Cloud Storage bucket %s", self.bucket)
ds_hook = DatastoreHook(
self.datastore_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.import_from_storage_bucket(
bucket=self.bucket,
file=self.file,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result["name"]
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result["metadata"]["common"]["state"]
if state != "SUCCESSFUL":
raise AirflowException(f"Operation failed: result={result}")
CloudDatastoreImportExportLink.persist(context=context)
return result
| CloudDatastoreImportEntitiesOperator |
python | modin-project__modin | modin/config/envvars.py | {
"start": 32741,
"end": 33440
} | class ____(EnvironmentVariable, type=ExactStr):
"""
Set ``MetricsMode`` value to disable/enable metrics collection.
Metric handlers are registered through `add_metric_handler` and can
be used to record graphite-style timings or values. It is the
responsibility of the handler to define how those emitted metrics
are handled.
"""
varname = "MODIN_METRICS_MODE"
choices = ("enable", "disable")
default = "enable"
@classmethod
def enable(cls) -> None:
"""Enable all metric collection."""
cls.put("enable")
@classmethod
def disable(cls) -> None:
"""Disable all metric collection."""
cls.put("disable")
| MetricsMode |
python | ApeWorX__ape | src/ape_test/config.py | {
"start": 1040,
"end": 2280
} | class ____(PluginConfig):
"""
Configuration related to test gas reports.
"""
exclude: list[GasExclusion] = []
"""
Contract methods patterns to skip. Specify ``contract_name:`` and not
``method_name:`` to skip all methods in the contract. Only specify
``method_name:`` to skip all methods across all contracts. Specify
both to skip methods in a certain contracts. Entries use glob-rules;
use ``prefix_*`` to skip all items with a certain prefix.
"""
reports: list[str] = []
"""
Report-types to use. Currently, only supports `terminal`.
"""
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_TEST_")
@field_validator("reports", mode="before")
@classmethod
def validate_reports(cls, values):
values = list(set(values or []))
valid = ("terminal",)
for val in values:
if val not in valid:
valid_str = ", ".join(valid)
raise ValueError(f"Invalid gas-report format '{val}'. Valid: {valid_str}")
return values
@property
def show(self) -> bool:
return "terminal" in self.reports
_ReportType = Union[bool, dict]
"""Dict is for extra report settings."""
| GasConfig |
python | numba__numba | numba/misc/gdb_print_extension.py | {
"start": 5823,
"end": 6176
} | class ____:
def __init__(self, val):
self.val = val
def to_string(self):
# unituples are arrays
fields = self.val.type.fields()
lo, hi = fields[0].type.range()
buf = []
for i in range(lo, hi + 1):
buf.append(str(self.val[i]))
return "(%s)" % ', '.join(buf)
| NumbaUniTuplePrinter |
python | encode__django-rest-framework | rest_framework/validators.py | {
"start": 9217,
"end": 12010
} | class ____:
message = None
missing_message = _('This field is required.')
requires_context = True
def __init__(self, queryset, field, date_field, message=None):
self.queryset = queryset
self.field = field
self.date_field = date_field
self.message = message or self.message
def enforce_required_fields(self, attrs):
"""
The `UniqueFor<Range>Validator` classes always force an implied
'required' state on the fields they are applied to.
"""
missing_items = {
field_name: self.missing_message
for field_name in [self.field, self.date_field]
if field_name not in attrs
}
if missing_items:
raise ValidationError(missing_items, code='required')
def filter_queryset(self, attrs, queryset, field_name, date_field_name):
raise NotImplementedError('`filter_queryset` must be implemented.')
def exclude_current_instance(self, attrs, queryset, instance):
"""
If an instance is being updated, then do not include
that instance itself as a uniqueness conflict.
"""
if instance is not None:
return queryset.exclude(pk=instance.pk)
return queryset
def __call__(self, attrs, serializer):
# Determine the underlying model field names. These may not be the
# same as the serializer field names if `source=<>` is set.
field_name = serializer.fields[self.field].source_attrs[-1]
date_field_name = serializer.fields[self.date_field].source_attrs[-1]
self.enforce_required_fields(attrs)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset, field_name, date_field_name)
queryset = self.exclude_current_instance(attrs, queryset, serializer.instance)
if qs_exists(queryset):
message = self.message.format(date_field=self.date_field)
raise ValidationError({
self.field: message
}, code='unique')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self.message == other.message
and self.missing_message == other.missing_message
and self.requires_context == other.requires_context
and self.queryset == other.queryset
and self.field == other.field
and self.date_field == other.date_field
)
def __repr__(self):
return '<%s(queryset=%s, field=%s, date_field=%s)>' % (
self.__class__.__name__,
smart_repr(self.queryset),
smart_repr(self.field),
smart_repr(self.date_field)
)
| BaseUniqueForValidator |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/msgraph.py | {
"start": 2686,
"end": 14188
} | class ____(BaseOperator):
"""
A Microsoft Graph API operator which allows you to execute REST call to the Microsoft Graph API.
https://learn.microsoft.com/en-us/graph/use-the-api
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MSGraphAsyncOperator`
:param url: The url being executed on the Microsoft Graph API (templated).
:param response_type: The expected return type of the response as a string. Possible value are: `bytes`,
`str`, `int`, `float`, `bool` and `datetime` (default is None).
:param method: The HTTP method being used to do the REST call (default is GET).
:param conn_id: The HTTP Connection ID to run the operator against (templated).
:param key: The key that will be used to store `XCom's` ("return_value" is default).
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter` (default is None).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param scopes: The scopes to be used (default is ["https://graph.microsoft.com/.default"]).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
:param result_processor: Function to further process the response from MS Graph API
(default is lambda: response, context: response). When the response returned by the
`KiotaRequestAdapterHook` are bytes, then those will be base64 encoded into a string.
:param event_handler: Function to process the event returned from `MSGraphTrigger`. By default, when the
event returned by the `MSGraphTrigger` has a failed status, an AirflowException is being raised with
the message from the event, otherwise the response from the event payload is returned.
:param serializer: Class which handles response serialization (default is ResponseSerializer).
Bytes will be base64 encoded into a string, so it can be stored as an XCom.
"""
template_fields: Sequence[str] = (
"url",
"response_type",
"path_parameters",
"url_template",
"query_parameters",
"headers",
"data",
"conn_id",
)
def __init__(
self,
*,
url: str,
response_type: str | None = None,
path_parameters: dict[str, Any] | None = None,
url_template: str | None = None,
method: str = "GET",
query_parameters: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
data: dict[str, Any] | str | BytesIO | None = None,
conn_id: str = KiotaRequestAdapterHook.default_conn_name,
key: str = XCOM_RETURN_KEY,
timeout: float | None = None,
proxies: dict | None = None,
scopes: str | list[str] | None = None,
api_version: APIVersion | str | None = None,
pagination_function: Callable[[MSGraphAsyncOperator, dict, Context], tuple[str, dict]] | None = None,
result_processor: Callable[[Any, Context], Any] = lambda result, **context: result,
event_handler: Callable[[dict[Any, Any] | None, Context], Any] | None = None,
serializer: type[ResponseSerializer] = ResponseSerializer,
**kwargs: Any,
):
super().__init__(**kwargs)
self.url = url
self.response_type = response_type
self.path_parameters = path_parameters
self.url_template = url_template
self.method = method
self.query_parameters = query_parameters
self.headers = headers
self.data = data
self.conn_id = conn_id
self.key = key
self.timeout = timeout
self.proxies = proxies
self.scopes = scopes
self.api_version = api_version
self.pagination_function = pagination_function or self.paginate
self.result_processor = result_processor
self.event_handler = event_handler or default_event_handler
self.serializer: ResponseSerializer = serializer()
def execute(self, context: Context) -> None:
self.defer(
trigger=MSGraphTrigger(
url=self.url,
response_type=self.response_type,
path_parameters=self.path_parameters,
url_template=self.url_template,
method=self.method,
query_parameters=self.query_parameters,
headers=self.headers,
data=self.data,
conn_id=self.conn_id,
timeout=self.timeout,
proxies=self.proxies,
scopes=self.scopes,
api_version=self.api_version,
serializer=type(self.serializer),
),
method_name=self.execute_complete.__name__,
)
def execute_complete(
self,
context: Context,
event: dict[Any, Any] | None = None,
) -> Any:
"""
Execute callback when MSGraphTrigger finishes execution.
This method gets executed automatically when MSGraphTrigger completes its execution.
"""
self.log.debug("context: %s", context)
if event:
self.log.debug("%s completed with %s: %s", self.task_id, event.get("status"), event)
response = execute_callable(
self.event_handler, # type: ignore
event,
context,
"event_handler signature has changed, event parameter should be defined before context!",
)
self.log.debug("response: %s", response)
results = self.pull_xcom(context=context)
if response:
response = self.serializer.deserialize(response)
self.log.debug("deserialize response: %s", response)
result = execute_callable(
self.result_processor,
response,
context,
"result_processor signature has changed, result parameter should be defined before context!",
)
self.log.debug("processed response: %s", result)
try:
self.trigger_next_link(
response=response, method_name=self.execute_complete.__name__, context=context
)
except TaskDeferred as exception:
self.append_result(
results=results,
result=result,
append_result_as_list_if_absent=True,
)
self.push_xcom(context=context, value=results)
raise exception
if not results:
return result
self.append_result(results=results, result=result)
return results
return None
@classmethod
def append_result(
cls,
results: Any,
result: Any,
append_result_as_list_if_absent: bool = False,
) -> list[Any]:
if isinstance(results, list):
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
else:
if append_result_as_list_if_absent:
if isinstance(result, list):
return result
return [result]
return result
return results
def pull_xcom(self, context: Context | dict[str, Any]) -> list:
map_index = context["ti"].map_index
value = list(
context["ti"].xcom_pull(
key=self.key,
task_ids=self.task_id,
dag_id=self.dag_id,
map_indexes=map_index,
)
or []
)
if map_index:
self.log.info(
"Pulled XCom with task_id '%s' and dag_id '%s' and key '%s' and map_index %s: %s",
self.task_id,
self.dag_id,
self.key,
map_index,
value,
)
else:
self.log.info(
"Pulled XCom with task_id '%s' and dag_id '%s' and key '%s': %s",
self.task_id,
self.dag_id,
self.key,
value,
)
return value
def push_xcom(self, context: Any, value) -> None:
self.log.debug("do_xcom_push: %s", self.do_xcom_push)
if self.do_xcom_push:
self.log.info(
"Pushing XCom with task_id '%s' and dag_id '%s' and key '%s': %s",
self.task_id,
self.dag_id,
self.key,
value,
)
context["ti"].xcom_push(key=self.key, value=value)
@staticmethod
def paginate(
operator: MSGraphAsyncOperator, response: dict, **context
) -> tuple[Any, dict[str, Any] | None]:
odata_count = response.get("@odata.count")
if odata_count and operator.query_parameters:
query_parameters = deepcopy(operator.query_parameters)
top = query_parameters.get("$top")
if top and odata_count:
if len(response.get("value", [])) == top and context:
results = operator.pull_xcom(context)
skip = sum([len(result["value"]) for result in results]) + top if results else top
query_parameters["$skip"] = skip
return operator.url, query_parameters
return response.get("@odata.nextLink"), operator.query_parameters
def trigger_next_link(self, response, method_name: str, context: Context) -> None:
if isinstance(response, dict):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
url, query_parameters = self.pagination_function(self, response, **context) # type: ignore
except TypeError:
warnings.warn(
"pagination_function signature has changed, context parameter should be a kwargs argument!",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
url, query_parameters = self.pagination_function(self, response, context) # type: ignore
self.log.debug("url: %s", url)
self.log.debug("query_parameters: %s", query_parameters)
if url:
self.defer(
trigger=MSGraphTrigger(
url=url,
method=self.method,
query_parameters=query_parameters,
response_type=self.response_type,
conn_id=self.conn_id,
timeout=self.timeout,
proxies=self.proxies,
api_version=self.api_version,
serializer=type(self.serializer),
),
method_name=method_name,
)
| MSGraphAsyncOperator |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 32636,
"end": 33709
} | class ____(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
| SparseTopKCategoricalAccuracy |
python | jina-ai__jina | tests/integration/gateway_clients/test_clients_gateways.py | {
"start": 3069,
"end": 17539
} | class ____:
def send_requests_once(
self,
requests,
deployment: str,
head: bool,
metadata: dict = None,
shard_id=None,
endpoint: str = None,
timeout: float = 1.0,
retries: int = -1,
) -> asyncio.Task:
assert head
request = requests[0]
response_msg = copy.deepcopy(request)
new_docs = DocumentArray()
docs = request.docs
for doc in docs:
clientid = doc.text[0:7]
new_doc = Document(id=doc.id, text=doc.text + f'-{clientid}-{deployment}')
new_docs.append(new_doc)
response_msg.data.docs = new_docs
async def task_wrapper():
import random
await asyncio.sleep(1 / (random.randint(1, 3) * 10))
return response_msg, {}
return asyncio.create_task(task_wrapper())
def send_discover_endpoint(self, *args, **kwargs):
async def task_wrapper():
from jina.constants import __default_endpoint__
from jina.proto import jina_pb2
ep = jina_pb2.EndpointsProto()
ep.endpoints.extend([__default_endpoint__])
return ep, None
return asyncio.create_task(task_wrapper())
def create_runtime(
graph_dict: Dict, protocol: str, port: int, call_counts=None, monkeypatch=None
):
import json
graph_description = json.dumps(graph_dict)
if call_counts:
def decompress_wo_data(self):
from jina.proto import jina_pb2
call_counts.put_nowait('called')
self._pb_body = jina_pb2.DataRequestProtoWoData()
self._pb_body.ParseFromString(self.buffer)
self.buffer = None
def decompress(self):
from jina.proto import jina_pb2
call_counts.put_nowait('called')
if self.buffer:
self._pb_body = jina_pb2.DataRequestProto()
self._pb_body.ParseFromString(self.buffer)
self.buffer = None
elif self.is_decompressed_wo_data:
self._pb_body_old = self._pb_body
self._pb_body = jina_pb2.DataRequestProto()
self._pb_body.ParseFromString(
self._pb_body_old.SerializePartialToString()
)
else:
raise ValueError('the buffer is already decompressed')
monkeypatch.setattr(
DataRequest,
'_decompress',
decompress,
)
monkeypatch.setattr(
DataRequest,
'_decompress_wo_data',
decompress_wo_data,
)
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--port',
f'{port}',
'--graph-description',
f'{graph_description}',
'--deployments-addresses',
'{}',
'--protocol',
protocol,
]
),
req_handler_cls=GatewayRequestHandler,
) as runtime:
runtime.run_forever()
def client_send(client_id: int, port: int, protocol: str):
from jina.clients import Client
c = Client(protocol=protocol, port=port)
# send requests
return c.post(
on='/',
inputs=DocumentArray([Document(text=f'client{client_id}-Request')]),
return_responses=True,
)
NUM_PARALLEL_CLIENTS = 10
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_linear(
linear_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-deployment2-client{client_id}-deployment3'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': linear_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch):
call_counts = multiprocessing.Queue()
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyNoDocAccessMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, 'grpc')
assert len(responses) > 0
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': 'grpc',
'port': port,
'graph_dict': linear_graph_dict,
'call_counts': call_counts,
'monkeypatch': monkeypatch,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
assert (
_queue_length(call_counts) == NUM_PARALLEL_CLIENTS * 3
) # request should be decompressed at start and end and when accessing parameters
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_bifurcation(
bifurcation_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
# reducing is supposed to happen in the deployments, in the test it will get a single doc in non deterministic order
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3'
or responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': bifurcation_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_gateway(
merge_graph_dict_directly_merge_in_gateway, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger'
in responses[0].docs[0].text
)
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger'
in responses[0].docs[0].text
)
assert deployment1_path or deployment2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': merge_graph_dict_directly_merge_in_gateway,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_last_deployment(
merge_graph_dict_directly_merge_in_last_deployment, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
deployment1_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger-client{client_id}-deployment_last'
in responses[0].docs[0].text
)
deployment2_path = (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-merger-client{client_id}-deployment_last'
in responses[0].docs[0].text
)
assert deployment1_path or deployment2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': merge_graph_dict_directly_merge_in_last_deployment,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_complete_graph_dict(
complete_graph_dict, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
# there are 3 incoming paths to merger, it could be any
assert (
f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment1-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-deployment0-client{client_id}-deployment2-client{client_id}-deployment3-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-deployment4-client{client_id}-deployment5-client{client_id}-merger-client{client_id}-deployment_last'
== responses[0].docs[0].text
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': complete_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def _queue_length(queue: 'multiprocessing.Queue'):
# Pops elements from the queue and counts them
# Used if the underlying queue is sensitive to ordering
# This is used instead of multiprocessing.Queue.qsize() since it is not supported on MacOS
length = 0
while not queue.empty():
queue.get()
length += 1
return length
| DummyMockConnectionPool |
python | mlflow__mlflow | mlflow/data/filesystem_dataset_source.py | {
"start": 110,
"end": 2540
} | class ____(DatasetSource):
"""
Represents the source of a dataset stored on a filesystem, e.g. a local UNIX filesystem,
blob storage services like S3, etc.
"""
@property
@abstractmethod
def uri(self):
"""The URI referring to the dataset source filesystem location.
Returns:
The URI referring to the dataset source filesystem location,
e.g "s3://mybucket/path/to/mydataset", "/tmp/path/to/my/dataset" etc.
"""
@staticmethod
@abstractmethod
def _get_source_type() -> str:
"""
Returns:
A string describing the filesystem containing the dataset, e.g. "local", "s3", ...
"""
@abstractmethod
def load(self, dst_path=None) -> str:
"""Downloads the dataset source to the local filesystem.
Args:
dst_path: Path of the local filesystem destination directory to which to download the
dataset source. If the directory does not exist, it is created. If
unspecified, the dataset source is downloaded to a new uniquely-named
directory on the local filesystem, unless the dataset source already
exists on the local filesystem, in which case its local path is returned
directly.
Returns:
The path to the downloaded dataset source on the local filesystem.
"""
@staticmethod
@abstractmethod
def _can_resolve(raw_source: Any) -> bool:
"""
Args:
raw_source: The raw source, e.g. a string like "s3://mybucket/path/to/iris/data".
Returns:
True if this DatasetSource can resolve the raw source, False otherwise.
"""
@classmethod
@abstractmethod
def _resolve(cls, raw_source: Any) -> "FileSystemDatasetSource":
"""
Args:
raw_source: The raw source, e.g. a string like "s3://mybucket/path/to/iris/data".
"""
@abstractmethod
def to_dict(self) -> dict[Any, Any]:
"""
Returns:
A JSON-compatible dictionary representation of the FileSystemDatasetSource.
"""
@classmethod
@abstractmethod
def from_dict(cls, source_dict: dict[Any, Any]) -> "FileSystemDatasetSource":
"""
Args:
source_dict: A dictionary representation of the FileSystemDatasetSource.
"""
| FileSystemDatasetSource |
python | ray-project__ray | python/ray/train/tests/test_torch_predictor.py | {
"start": 643,
"end": 782
} | class ____(torch.nn.Module):
def forward(self, input_tensor):
return {"a": input_tensor, "b": input_tensor}
| DummyModelMultiOutput |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_workflow_group_history.py | {
"start": 685,
"end": 5155
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-workflow-group-history"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.group = self.create_group()
self.project = self.group.project
self.organization = self.project.organization
self.history: list[WorkflowFireHistory] = []
self.workflow = self.create_workflow(organization=self.organization)
self.detector = self.create_detector(
project=self.project,
type=ErrorGroupType.slug,
)
DetectorGroup.objects.create(
detector=self.detector,
group=self.group,
)
for i in range(3):
self.history.append(
WorkflowFireHistory(
workflow=self.workflow,
group=self.group,
event_id=uuid4().hex,
)
)
self.group_2 = self.create_group()
self.detector_2 = self.create_detector(
project=self.project,
type=MetricIssue.slug,
)
DetectorGroup.objects.create(
detector=self.detector_2,
group=self.group_2,
)
self.history.append(
WorkflowFireHistory(
workflow=self.workflow,
group=self.group_2,
event_id=uuid4().hex,
)
)
histories: list[WorkflowFireHistory] = WorkflowFireHistory.objects.bulk_create(self.history)
# manually update date_added
for i in range(3):
histories[i].update(date_added=before_now(days=i + 1))
histories[-1].update(date_added=before_now(days=1))
self.base_triggered_date = before_now(days=1)
self.login_as(self.user)
def test_simple(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.workflow.id,
start=before_now(days=6),
end=before_now(days=0),
)
assert resp.data == serialize(
[
WorkflowGroupHistory(
self.group,
3,
self.base_triggered_date,
self.history[0].event_id,
detector=self.detector,
),
WorkflowGroupHistory(
self.group_2,
1,
self.base_triggered_date,
self.history[-1].event_id,
detector=self.detector_2,
),
],
self.user,
WorkflowGroupHistorySerializer(),
)
def test_pagination(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.workflow.id,
start=before_now(days=6),
end=before_now(days=0),
per_page=1,
)
assert resp.data == serialize(
[
WorkflowGroupHistory(
self.group,
3,
self.base_triggered_date,
self.history[0].event_id,
detector=self.detector,
)
],
self.user,
WorkflowGroupHistorySerializer(),
)
assert resp["X-Hits"] == "2" # 2 unique groups, not 4 total history records
resp = self.get_success_response(
self.organization.slug,
self.workflow.id,
start=before_now(days=6),
end=before_now(days=0),
per_page=1,
cursor=self.get_cursor_headers(resp)[1],
)
assert resp.data == serialize(
[
WorkflowGroupHistory(
self.group_2,
1,
self.base_triggered_date,
self.history[-1].event_id,
detector=self.detector_2,
)
],
self.user,
WorkflowGroupHistorySerializer(),
)
assert resp["X-Hits"] == "2" # 2 unique groups, not 4 total history records
def test_invalid_dates_error(self) -> None:
self.get_error_response(
self.organization.slug,
self.workflow.id,
start=before_now(days=0),
end=before_now(days=6),
status_code=400,
)
| WorkflowGroupHistoryEndpointTest |
python | pytorch__pytorch | torch/fx/experimental/unification/multipledispatch/variadic.py | {
"start": 2276,
"end": 2962
} | class ____(metaclass=VariadicSignatureMeta):
"""A class whose getitem method can be used to generate a new type
representing a specific variadic signature.
Examples
--------
>>> # xdoctest: +SKIP
>>> Variadic[int] # any number of int arguments
<class 'multipledispatch.variadic.Variadic[int]'>
>>> Variadic[(int, str)] # any number of one of int or str arguments
<class 'multipledispatch.variadic.Variadic[(int, str)]'>
>>> issubclass(int, Variadic[int])
True
>>> issubclass(int, Variadic[(int, str)])
True
>>> issubclass(str, Variadic[(int, str)])
True
>>> issubclass(float, Variadic[(int, str)])
False
"""
| Variadic |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 4126,
"end": 4399
} | class ____(Child):
def method(self):
print("Grandchild")
super(GrandChild, self).method()
super(Child, self).method()
super(Niece, self).method() # [bad-super-call]
# Reported in https://github.com/pylint-dev/pylint/issues/4922
| GrandChild |
python | fastai__fastai | fastai/medical/imaging.py | {
"start": 1670,
"end": 5879
} | class ____(PILBase):
_open_args,_tensor_cls,_show_args = {},TensorDicom,TensorDicom._show_args
@classmethod
def create(cls, fn:Path|str|bytes, mode=None)->None:
"Open a `DICOM file` from path `fn` or bytes `fn` and load it as a `PIL Image`"
if isinstance(fn,bytes): im = Image.fromarray(pydicom.dcmread(pydicom.filebase.DicomBytesIO(fn)).pixel_array)
if isinstance(fn,(Path,str)): im = Image.fromarray(pydicom.dcmread(fn).pixel_array)
im.load()
im = im._new(im.im)
return cls(im.convert(mode) if mode else im)
PILDicom._tensor_cls = TensorDicom
# %% ../../nbs/60_medical.imaging.ipynb 16
@patch
def png16read(self:Path): return array(Image.open(self), dtype=np.uint16)
# %% ../../nbs/60_medical.imaging.ipynb 17
@patch(as_prop=True)
def pixels(self:DcmDataset):
"`pixel_array` as a tensor"
return tensor(self.pixel_array.astype(np.float32))
# %% ../../nbs/60_medical.imaging.ipynb 19
@patch(as_prop=True)
def scaled_px(self:DcmDataset):
"`pixels` scaled by `RescaleSlope` and `RescaleIntercept`"
img = self.pixels
if hasattr(self, 'RescaleSlope') and hasattr(self, 'RescaleIntercept') is not None:
return img * self.RescaleSlope + self.RescaleIntercept
else: return img
# %% ../../nbs/60_medical.imaging.ipynb 25
def array_freqhist_bins(self, n_bins=100):
"A numpy based function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = np.sort(self.flatten())
t = np.array([0.001])
t = np.append(t, np.arange(n_bins)/n_bins+(1/2/n_bins))
t = np.append(t, 0.999)
t = (len(imsd)*t+0.5).astype(int)
return np.unique(imsd[t])
# %% ../../nbs/60_medical.imaging.ipynb 26
@patch
def freqhist_bins(self:Tensor, n_bins=100):
"A function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = self.view(-1).sort()[0]
t = torch.cat([tensor([0.001]),
torch.arange(n_bins).float()/n_bins+(1/2/n_bins),
tensor([0.999])])
t = (len(imsd)*t).long()
return imsd[t].unique()
# %% ../../nbs/60_medical.imaging.ipynb 33
@patch
def hist_scaled_pt(self:Tensor, brks=None):
# Pytorch-only version - switch to this if/when interp_1d can be optimized
if brks is None: brks = self.freqhist_bins()
brks = brks.to(self.device)
ys = torch.linspace(0., 1., len(brks)).to(self.device)
return self.flatten().interp_1d(brks, ys).reshape(self.shape).clamp(0.,1.)
# %% ../../nbs/60_medical.imaging.ipynb 34
@patch
def hist_scaled(self:Tensor, brks=None):
"Scales a tensor using `freqhist_bins` to values between 0 and 1"
if self.device.type=='cuda': return self.hist_scaled_pt(brks)
if brks is None: brks = self.freqhist_bins()
ys = np.linspace(0., 1., len(brks))
x = self.numpy().flatten()
x = np.interp(x, brks.numpy(), ys)
return tensor(x).reshape(self.shape).clamp(0.,1.)
# %% ../../nbs/60_medical.imaging.ipynb 39
@patch
def hist_scaled(self:DcmDataset, brks=None, min_px=None, max_px=None):
"Pixels scaled to a `min_px` and `max_px` value"
px = self.scaled_px
if min_px is not None: px[px<min_px] = min_px
if max_px is not None: px[px>max_px] = max_px
return px.hist_scaled(brks=brks)
# %% ../../nbs/60_medical.imaging.ipynb 43
@patch
def windowed(self:Tensor, w, l):
"Scale pixel intensity by window width and window level"
px = self.clone()
px_min = l - w//2
px_max = l + w//2
px[px<px_min] = px_min
px[px>px_max] = px_max
return (px-px_min) / (px_max-px_min)
# %% ../../nbs/60_medical.imaging.ipynb 44
@patch
def windowed(self:DcmDataset, w, l):
return self.scaled_px.windowed(w,l)
# %% ../../nbs/60_medical.imaging.ipynb 45
# From https://radiopaedia.org/articles/windowing-ct
dicom_windows = types.SimpleNamespace(
brain=(80,40),
subdural=(254,100),
stroke=(8,32),
brain_bone=(2800,600),
brain_soft=(375,40),
lungs=(1500,-600),
mediastinum=(350,50),
abdomen_soft=(400,50),
liver=(150,30),
spine_soft=(250,50),
spine_bone=(1800,400)
)
# %% ../../nbs/60_medical.imaging.ipynb 47
| PILDicom |
python | pandas-dev__pandas | pandas/core/resample.py | {
"start": 63531,
"end": 67319
} | class ____(Resampler):
ax: DatetimeIndex
@property
def _resampler_for_grouping(self) -> type[DatetimeIndexResamplerGroupby]:
return DatetimeIndexResamplerGroupby
def _get_binner_for_time(self):
# this is how we are actually creating the bins
return self._timegrouper._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function.
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
ax = self.ax
# Excludes `on` column when provided
obj = self._obj_with_exclusions
if not len(ax):
# reset to the new freq
obj = obj.copy()
obj.index = obj.index._with_freq(self.freq)
assert obj.index.freq == self.freq, (obj.index.freq, self.freq)
return obj
# we are downsampling
# we want to call the actual grouper method here
result = obj.groupby(self._grouper).aggregate(how, **kwargs)
return self._wrap_result(result)
def _adjust_binner_for_upsample(self, binner):
"""
Adjust our binner when upsampling.
The range of a new index should not be outside specified range
"""
if self.closed == "right":
binner = binner[1:]
else:
binner = binner[:-1]
return binner
def _upsample(self, method, limit: int | None = None, fill_value=None):
"""
Parameters
----------
method : string {'backfill', 'bfill', 'pad',
'ffill', 'asfreq'} method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
"""
if self._from_selection:
raise ValueError(
"Upsampling from level= or on= selection "
"is not supported, use .set_index(...) "
"to explicitly set index to datetime-like"
)
ax = self.ax
obj = self._selected_obj
binner = self.binner
res_index = self._adjust_binner_for_upsample(binner)
# if index exactly matches target grid (same freq & alignment), use fast path
if (
limit is None
and to_offset(ax.inferred_freq) == self.freq
and len(obj) == len(res_index)
and obj.index.equals(res_index)
):
result = obj.copy()
result.index = res_index
else:
if method == "asfreq":
method = None
result = obj.reindex(
res_index, method=method, limit=limit, fill_value=fill_value
)
return self._wrap_result(result)
def _wrap_result(self, result):
result = super()._wrap_result(result)
# we may have a different kind that we were asked originally
# convert if needed
if isinstance(self.ax, PeriodIndex) and not isinstance(
result.index, PeriodIndex
):
if isinstance(result.index, MultiIndex):
# GH 24103 - e.g. groupby resample
if not isinstance(result.index.levels[-1], PeriodIndex):
new_level = result.index.levels[-1].to_period(self.freq)
result.index = result.index.set_levels(new_level, level=-1)
else:
result.index = result.index.to_period(self.freq)
return result
@set_module("pandas.api.typing")
# error: Definition of "ax" in base class "_GroupByMixin" is incompatible
# with definition in base class "DatetimeIndexResampler"
| DatetimeIndexResampler |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 1894,
"end": 107152
} | class ____(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_inserts = "once"
run_deletes = None
__dialect__ = "default"
def test_basic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=Address.id,
)
},
)
sess = fixture_session()
q = sess.query(User)
eq_(
[
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
)
],
q.filter(User.id == 7).all(),
)
eq_(self.static.user_address_result, q.order_by(User.id).all())
@testing.combinations(True, False)
def test_from_statement(self, legacy):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
stmt = select(User).where(User.id == 7)
def go():
if legacy:
ret = (
sess.query(User)
.from_statement(stmt)
.options(joinedload(User.addresses))
.all()
)
else:
ret = sess.scalars(
select(User)
.from_statement(stmt)
.options(joinedload(User.addresses))
).all()
eq_(self.static.user_address_result[0:1], ret)
# joinedload can't be applied here so this necessarily
# has to lazy load the addresses
self.assert_sql_count(testing.db, go, 2)
@testing.combinations(True, False)
def test_from_statement_contains_eager(self, legacy):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
# for contains_eager, Address.id is enough for it to be picked up
stmt = (
select(User, Address.id).where(User.id == 7).join(User.addresses)
)
def go():
if legacy:
ret = (
sess.query(User)
.from_statement(stmt)
.options(contains_eager(User.addresses))
.all()
)
else:
ret = sess.scalars(
select(User)
.from_statement(stmt)
.options(contains_eager(User.addresses))
).all()
eq_(self.static.user_address_result[0:1], ret)
# joinedload can't be applied here so this necessarily
# has to lazy load the addresses
self.assert_sql_count(testing.db, go, 1)
def test_column_property_adaptation(self, decl_base):
"""test #2316 in support of #8064"""
class A(decl_base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String(40), nullable=False)
__mapper_args__ = {"polymorphic_on": type}
A.anything = column_property(A.id + 1000)
class B(A):
__tablename__ = "b"
account_id = Column(Integer, ForeignKey("a.id"), primary_key=True)
x_id = Column(Integer, ForeignKey("x.id"), nullable=False)
__mapper_args__ = {"polymorphic_identity": "named"}
class X(decl_base):
__tablename__ = "x"
id = Column(Integer, primary_key=True)
b = relationship("B")
self.assert_compile(
select(X).options(joinedload(X.b)),
"SELECT x.id, a_1.id AS id_1, a_1.type, a_1.id + :id_2 AS anon_1, "
"b_1.account_id, b_1.x_id FROM x "
"LEFT OUTER JOIN "
"(a AS a_1 JOIN b AS b_1 ON a_1.id = b_1.account_id) "
"ON x.id = b_1.x_id",
)
def test_no_render_in_subquery(self):
"""test #6378"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=Address.id,
)
},
)
stmt = select(User)
self.assert_compile(
select(stmt.subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1",
)
self.assert_compile(
stmt,
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM users "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id ORDER BY addresses_1.id",
)
def test_late_compile(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
m = self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sess.query(User).all()
m.add_property(
"addresses",
relationship(
self.mapper_registry.map_imperatively(Address, addresses)
),
)
sess.expunge_all()
def go():
eq_(
[
User(
id=7,
addresses=[
Address(id=1, email_address="jack@bean.com")
],
)
],
sess.query(User)
.options(joinedload(User.addresses))
.filter(User.id == 7)
.all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_no_orphan(self):
"""An eagerly loaded child object is not marked as an orphan"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all,delete-orphan", lazy="joined"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
user = sess.get(User, 7)
assert getattr(User, "addresses").hasparent(
sa.orm.attributes.instance_state(user.addresses[0]),
optimistic=True,
)
assert not sa.orm.class_mapper(Address)._is_orphan(
sa.orm.attributes.instance_state(user.addresses[0])
)
def test_orderby(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.email_address,
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).all(),
)
def test_orderby_multi(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=[addresses.c.email_address, addresses.c.id],
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).all(),
)
def test_orderby_related(self):
"""A regular mapper select on a single table can
order by a relationship to a second table"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
)
),
)
q = fixture_session().query(User)
result = (
q.filter(User.id == Address.user_id)
.order_by(Address.email_address)
.all()
)
eq_(
[
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=7, addresses=[Address(id=1)]),
],
result,
)
def test_orderby_desc(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="joined",
order_by=[sa.desc(addresses.c.email_address)],
)
),
)
sess = fixture_session()
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=3, email_address="ed@bettyboop.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
sess.query(User).order_by(User.id).all(),
)
def test_no_ad_hoc_orderby(self):
"""part of #2992; make sure string label references can't
access an eager loader, else an eager load can corrupt the query.
This behavior relies upon the allow_label_resolve flag to disable
a column expression from being resolvable in an "order by label"
context.
"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User, users, properties=dict(addresses=relationship(Address))
)
sess = fixture_session()
q = (
sess.query(User)
.join(User.addresses)
.options(joinedload(User.addresses))
.order_by("email_address")
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, addresses_1.user_id AS "
"addresses_1_user_id, addresses_1.email_address AS "
"addresses_1_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id LEFT OUTER JOIN addresses "
"AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses.email_address",
)
q = (
sess.query(User)
.options(joinedload(User.addresses))
.order_by("email_address")
)
assert_raises_message(
sa.exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY.",
q.all,
)
def test_deferred_fk_col(self):
users, Dingaling, User, dingalings, Address, addresses = (
self.tables.users,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user_id": deferred(addresses.c.user_id),
"user": relationship(User, lazy="joined"),
},
)
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
for q in [
sess.query(Address)
.filter(Address.id.in_([1, 4, 5]))
.order_by(Address.id),
sess.query(Address)
.filter(Address.id.in_([1, 4, 5]))
.order_by(Address.id)
.limit(3),
]:
sess.expunge_all()
eq_(
q.all(),
[
Address(id=1, user=User(id=7)),
Address(id=4, user=User(id=8)),
Address(id=5, user=User(id=9)),
],
)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).all()[0]
# 1.0 change! we don't automatically undefer user_id here.
# if the user wants a column undeferred, add the option.
def go():
eq_(a.user_id, 7)
# self.assert_sql_count(testing.db, go, 0)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).first()
def go():
eq_(a.user_id, 7)
# same, 1.0 doesn't check these
# self.assert_sql_count(testing.db, go, 0)
self.assert_sql_count(testing.db, go, 1)
# do the mapping in reverse
# (we would have just used an "addresses" backref but the test
# fixtures then require the whole backref to be set up, lazy loaders
# trigger, etc.)
sa.orm.clear_mappers()
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"user_id": deferred(addresses.c.user_id)},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="joined")},
)
for q in [
sess.query(User).filter(User.id == 7),
sess.query(User).filter(User.id == 7).limit(1),
]:
sess.expunge_all()
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
sess.expunge_all()
u = sess.get(User, 7)
def go():
eq_(u.addresses[0].user_id, 7)
# assert that the eager loader didn't have to affect 'user_id' here
# and that its still deferred
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, lazy="joined", order_by=addresses.c.id
)
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user_id": deferred(addresses.c.user_id),
"dingalings": relationship(Dingaling, lazy="joined"),
},
)
self.mapper_registry.map_imperatively(
Dingaling,
dingalings,
properties={"address_id": deferred(dingalings.c.address_id)},
)
sess.expunge_all()
def go():
u = sess.get(User, 8)
eq_(
User(
id=8,
addresses=[
Address(id=2, dingalings=[Dingaling(id=1)]),
Address(id=3),
Address(id=4),
],
),
u,
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_stmt_includes_unnamed_fn(self):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="joined")},
)
self.mapper_registry.map_imperatively(Address, addresses)
s = fixture_session()
# issue #6086
# statement wrapped in a subquery by limit() and group_by()
# func.count() is unlabeled (in 1.3 the _ColumnEntity would label it,
# in the ORM layer, hence there was no problem here).
# the _ColumnEntity needs to adapt func.count(User.id) to the anon
# count_1 label on the outside, corresponding_column can do it.
# but ClauseAdapter has to treat the FunctionElement as a ColumnElement
# whereas previously it was treating it as a FromClause (and
# FunctionElement should really not even be a FromClause but there's
# legacy baggage on that)
q = (
s.query(User, func.count(User.id))
.order_by(User.id)
.group_by(User.id, User.name)
.limit(1)
)
eq_(q.first(), (User(id=7), 1))
def test_we_adapt_for_compound_for_getter(self):
"""test #6596.
Ensure loading.py uses the compound eager adapter on the target
column before looking for a populator, rather than creating
a new populator.
"""
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, order_by=addresses.c.id)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
s = fixture_session()
q = (
select(User)
.options(joinedload(User.addresses))
.order_by(User.id)
.limit(2)
)
def strict_getter(self, key, raiseerr=True):
try:
rec = self._keymap[key]
except KeyError:
assert False
index = rec[0]
return operator.itemgetter(index)
with mock.patch(
"sqlalchemy.engine.result.ResultMetaData._getter", strict_getter
):
result = s.execute(q).unique().scalars().all()
eq_(result, self.static.user_address_result[0:2])
def test_options_pathing(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(Order, order_by=orders.c.id) # o2m, m2o
},
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
for opt, count in [
((joinedload(User.orders, Order.items),), 10),
(
(
joinedload(User.orders),
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
),
1,
),
((joinedload(User.orders, Order.items, Item.keywords),), 10),
(
(
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
),
5,
),
]:
with fixture_session() as sess:
def go():
eq_(
sess.query(User).options(*opt).order_by(User.id).all(),
self.static.user_item_keyword_result,
)
self.assert_sql_count(testing.db, go, count)
def test_disable_dynamic(self):
"""test no joined option on a dynamic."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="dynamic")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
assert_raises_message(
sa.exc.InvalidRequestError,
"User.addresses' does not support object "
"population - eager loading cannot be applied.",
sess.query(User).options(joinedload(User.addresses)).first,
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="joined",
order_by=keywords.c.id,
)
),
)
q = fixture_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(
self.static.item_keyword_result[0:2],
q.join(Item.keywords).filter(Keyword.name == "red").all(),
)
self.assert_sql_count(testing.db, go, 1)
def go():
ka = aliased(Keyword)
eq_(
self.static.item_keyword_result[0:2],
(
q.join(Item.keywords.of_type(ka)).filter(ka.name == "red")
).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_eager_option(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="select",
order_by=keywords.c.id,
)
),
)
q = fixture_session().query(Item)
def go():
eq_(
self.static.item_keyword_result[0:2],
(
q.options(joinedload(Item.keywords))
.join(Item.keywords)
.filter(keywords.c.name == "red")
)
.order_by(Item.id)
.all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_cyclical(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="joined",
backref=sa.orm.backref("user", lazy="joined"),
order_by=Address.id,
)
),
)
eq_(sa.orm.class_mapper(User).get_property("addresses").lazy, "joined")
eq_(sa.orm.class_mapper(Address).get_property("user").lazy, "joined")
sess = fixture_session()
eq_(
self.static.user_address_result,
sess.query(User).order_by(User.id).all(),
)
def test_double_w_ac(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(Order, orders)
closed_mapper = aliased(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
open_mapper.isopen == 1,
users.c.id == open_mapper.user_id,
),
lazy="joined",
order_by=open_mapper.id,
viewonly=True,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closed_mapper.isopen == 0,
users.c.id == closed_mapper.user_id,
),
lazy="joined",
order_by=closed_mapper.id,
viewonly=True,
),
),
)
self._run_double_test()
def test_double_w_ac_against_subquery(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 1).alias()
)
closed_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 0).alias()
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper, lazy="joined", order_by=open_mapper.id
),
closed_orders=relationship(
closed_mapper, lazy="joined", order_by=closed_mapper.id
),
),
)
self._run_double_test()
def test_double_same_mappers(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
Order,
primaryjoin=sa.and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="joined",
order_by=orders.c.id,
viewonly=True,
),
closed_orders=relationship(
Order,
primaryjoin=sa.and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="joined",
order_by=orders.c.id,
viewonly=True,
),
),
)
self._run_double_test()
def _run_double_test(self, no_items=False):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = fixture_session().query(User).order_by(User.id)
def items(*ids):
if no_items:
return {}
else:
return {"items": [Item(id=id_) for id_ in ids]}
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3, **items(3, 4, 5))],
closed_orders=[
Order(id=1, **items(1, 2, 3)),
Order(id=5, **items(5)),
],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4, **items(1, 5))],
closed_orders=[Order(id=2, **items(1, 2, 3))],
),
User(id=10),
],
q.all(),
)
self.assert_sql_count(testing.db, go, 1)
@testing.combinations(
("plain",), ("cte", testing.requires.ctes), ("subquery",), id_="s"
)
def test_map_to_cte_subq(self, type_):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
if type_ == "plain":
target = users
elif type_ == "cte":
target = select(users).cte()
elif type_ == "subquery":
target = select(users).subquery()
self.mapper_registry.map_imperatively(
User,
target,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
q = (
sess.query(Address)
.options(joinedload(Address.user))
.order_by(Address.id)
)
eq_(q.all(), self.static.address_user_result)
def test_no_false_hits(self):
"""Eager loaders don't interpret main table columns as
part of their eager load."""
addresses, orders, User, Address, Order, users = (
self.tables.addresses,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, lazy="joined"),
"orders": relationship(Order, lazy="joined"),
},
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Order, orders)
self.allusers = fixture_session().query(User).all()
# using a textual select, the columns will be 'id' and 'name'. the
# eager loaders have aliases which should not hit on those columns,
# they should be required to locate only their aliased/fully table
# qualified column name.
noeagers = (
fixture_session()
.query(User)
.from_statement(text("select * from users"))
.all()
)
assert "orders" not in noeagers[0].__dict__
assert "addresses" not in noeagers[0].__dict__
def test_limit(self):
"""Limit operations combined with lazy-load relationships."""
(
users,
items,
order_items,
orders,
Item,
User,
Address,
Order,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.id,
),
"orders": relationship(
Order, lazy="select", order_by=orders.c.id
),
},
)
sess = fixture_session()
q = sess.query(User)
result = q.order_by(User.id).limit(2).offset(1).all()
eq_(self.static.user_all_result[1:3], result)
def test_distinct(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
# this is an involved 3x union of the users table to get a lot of rows.
# then see if the "distinct" works its way out. you actually get
# the same result with or without the distinct, just via less or
# more rows.
u2 = users.alias("u2")
s = sa.union_all(
u2.select(),
u2.select(),
u2.select(),
).alias("u")
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.id,
)
},
)
sess = fixture_session()
q = sess.query(User)
def go():
result = (
q.filter(s.c.id == User.id).distinct().order_by(User.id).all()
)
eq_(self.static.user_address_result, result)
self.assert_sql_count(testing.db, go, 1)
def test_group_by_only(self):
# like distinct(), a group_by() has a similar effect so the
# joined eager load needs to subquery for this as well
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.email_address,
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).group_by(User).all(), # group by all columns
)
def test_limit_2(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="joined",
order_by=[keywords.c.id],
)
),
)
sess = fixture_session()
q = sess.query(Item)
result = (
q.filter(
(Item.description == "item 2")
| (Item.description == "item 5")
| (Item.description == "item 3")
)
.order_by(Item.id)
.limit(2)
.all()
)
eq_(self.static.item_keyword_result[1:3], result)
def test_limit_3(self):
"""test that the ORDER BY is propagated from the inner
select to the outer select, when using the
'wrapped' select statement resulting from the combination of
eager loading and limit/offset clauses."""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(Item, secondary=order_items, lazy="joined")
),
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
orders=relationship(
Order, lazy="joined", order_by=orders.c.id
),
),
)
sess = fixture_session()
q = sess.query(User)
if not testing.against("mssql"):
result = (
q.join(User.orders)
.order_by(Order.user_id.desc())
.limit(2)
.offset(1)
)
eq_(
[
User(
id=9,
orders=[Order(id=2), Order(id=4)],
addresses=[Address(id=5)],
),
User(
id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)],
),
],
result.all(),
)
result = (
q.join(User.addresses)
.order_by(Address.email_address.desc())
.limit(1)
.offset(0)
)
eq_(
[
User(
id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)],
)
],
result.all(),
)
def test_limit_4(self):
User, Order, addresses, users, orders = (
self.classes.User,
self.classes.Order,
self.tables.addresses,
self.tables.users,
self.tables.orders,
)
# tests the LIMIT/OFFSET aliasing on a mapper
# against a select. original issue from ticket #904
sel = (
sa.select(users, addresses.c.email_address)
.where(
users.c.id == addresses.c.user_id,
)
.alias("useralias")
)
self.mapper_registry.map_imperatively(
User,
sel,
properties={
"orders": relationship(
Order,
primaryjoin=sel.c.id == orders.c.user_id,
lazy="joined",
order_by=orders.c.id,
)
},
)
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
eq_(
sess.query(User).first(),
User(
name="jack",
orders=[
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
],
email_address="jack@bean.com",
id=7,
),
)
def test_useget_cancels_eager(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, lazy="joined", backref="addresses")
},
)
sess = fixture_session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
with testing.expect_warnings():
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_useget_cancels_eager_propagated_present(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side, even when a propagated
option is present."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, lazy="joined", backref="addresses")
},
)
from sqlalchemy.orm.interfaces import MapperOption
class MyBogusOption(MapperOption):
propagate_to_loaders = True
sess = fixture_session()
u1 = (
sess.query(User)
.options(MyBogusOption())
.filter(User.id == 8)
.one()
)
def go():
eq_(u1.addresses[0].user, u1)
with testing.expect_warnings():
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_manytoone_limit(self):
"""test that the subquery wrapping only occurs with
limit/offset and m2m or o2m joins present."""
(
users,
items,
order_items,
Order,
Item,
User,
Address,
orders,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=odict(orders=relationship(Order, backref="user")),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=odict(
[
(
"items",
relationship(
Item, secondary=order_items, backref="orders"
),
),
("address", relationship(Address)),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS "
"orders_1 ON anon_1.users_id = orders_1.user_id",
{"param_1": 10},
)
self.assert_compile(
sess.query(Order).options(joinedload(Order.user)).limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{"param_1": 10},
)
self.assert_compile(
sess.query(Order)
.options(joinedload(Order.user, innerjoin=True))
.limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{"param_1": 10},
)
self.assert_compile(
sess.query(User)
.options(joinedload(User.orders).joinedload(Order.address))
.limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN orders AS orders_1 "
"ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN "
"addresses AS addresses_1 ON addresses_1.id = orders_1.address_id",
{"param_1": 10},
)
self.assert_compile(
sess.query(User).options(
joinedload(User.orders).joinedload(Order.items),
joinedload(User.orders).joinedload(Order.address),
),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS "
"addresses_1_email_address, orders_1.id AS orders_1_id, "
"orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description "
"AS orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id",
)
self.assert_compile(
sess.query(User)
.options(
joinedload(User.orders),
joinedload(User.orders, Order.address, innerjoin=True),
)
.limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users"
" LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"(orders AS orders_1 JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id) ON "
"anon_1.users_id = orders_1.user_id",
{"param_1": 10},
)
self.assert_compile(
sess.query(User)
.options(
joinedload(User.orders, innerjoin=True),
joinedload(User.orders, Order.address, innerjoin=True),
)
.limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 JOIN orders "
"AS orders_1 ON anon_1.users_id = "
"orders_1.user_id JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id",
{"param_1": 10},
)
def test_one_to_many_scalar(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
uselist=False,
)
),
)
q = fixture_session().query(User)
def go():
result = q.filter(users.c.id == 7).all()
eq_([User(id=7, address=Address(id=1))], result)
self.assert_sql_count(testing.db, go, 1)
def test_one_to_many_scalar_subq_wrapping(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
uselist=False,
)
),
)
q = fixture_session().query(User)
q = q.filter(users.c.id == 7).limit(1)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"WHERE users.id = :id_1 "
"LIMIT :param_1",
checkparams={"id_1": 7, "param_1": 1},
)
def test_many_to_one(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(
self.mapper_registry.map_imperatively(User, users),
lazy="joined",
)
),
)
sess = fixture_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id == 1).one()
is_not(a.user, None)
u1 = sess.get(User, 7)
is_(a.user, u1)
self.assert_sql_count(testing.db, go, 1)
def test_many_to_one_null(self):
"""test that a many-to-one eager load which loads None does
not later trigger a lazy load.
"""
Order, Address, addresses, orders = (
self.classes.Order,
self.classes.Address,
self.tables.addresses,
self.tables.orders,
)
# use a primaryjoin intended to defeat SA's usage of
# query.get() for a many-to-one lazyload
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
primaryjoin=and_(
addresses.c.id == orders.c.address_id,
addresses.c.email_address != None, # noqa
),
lazy="joined",
)
),
)
sess = fixture_session()
def go():
o1 = (
sess.query(Order)
.options(lazyload(Order.address))
.filter(Order.id == 5)
.one()
)
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
o1 = sess.query(Order).filter(Order.id == 5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 1)
def test_one_and_many(self):
"""tests eager load for a parent object with a child object that
contains a many-to-many relationship to a third object."""
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order, lazy="joined", order_by=orders.c.id
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
),
)
q = fixture_session().query(User)
result = q.filter(text("users.id in (7, 8, 9)")).order_by(
text("users.id")
)
def go():
eq_(self.static.user_order_result[0:3], result.all())
self.assert_sql_count(testing.db, go, 1)
def test_double_with_aggregate(self):
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
max_orders_by_user = (
sa.select(sa.func.max(orders.c.id).label("order_id"))
.group_by(orders.c.user_id)
.alias("max_orders_by_user")
)
max_orders = (
orders.select()
.where(orders.c.id == max_orders_by_user.c.order_id)
.alias("max_orders")
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order, backref="user", lazy="joined", order_by=orders.c.id
),
"max_order": relationship(
aliased(Order, max_orders), lazy="joined", uselist=False
),
},
)
q = fixture_session().query(User)
def go():
eq_(
[
User(
id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
max_order=Order(id=5),
),
User(id=8, orders=[]),
User(
id=9,
orders=[Order(id=2), Order(id=4)],
max_order=Order(id=4),
),
User(id=10),
],
q.order_by(User.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_uselist_false_warning(self):
"""test that multiple rows received by a
uselist=False raises a warning."""
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"order": relationship(Order, uselist=False)},
)
self.mapper_registry.map_imperatively(Order, orders)
s = fixture_session()
assert_warns(
sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all
)
def test_wide(self):
(
users,
items,
order_items,
Order,
Item,
User,
Address,
orders,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy=False,
order_by=addresses.c.id,
),
orders=relationship(Order, lazy=False, order_by=orders.c.id),
),
)
q = fixture_session().query(User)
def go():
eq_(self.static.user_all_result, q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_against_select(self):
"""test eager loading of a mapper which is against a select"""
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order,
)
s = sa.select(orders).where(orders.c.isopen == 1).alias("openorders")
self.mapper_registry.map_imperatively(
Order, s, properties={"user": relationship(User, lazy="joined")}
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(Item, items)
q = fixture_session().query(Order)
eq_(
[Order(id=3, user=User(id=7)), Order(id=4, user=User(id=9))],
q.all(),
)
q = q.select_from(s.join(order_items).join(items)).filter(
~Item.id.in_([1, 2, 5])
)
eq_([Order(id=3, user=User(id=7))], q.all())
def test_aliasing(self):
"""test that eager loading uses aliases to insulate the eager
load from regular criterion against those tables."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.id,
)
),
)
q = fixture_session().query(User)
result = (
q.filter(addresses.c.email_address == "ed@lala.com")
.filter(Address.user_id == User.id)
.order_by(User.id)
)
eq_(self.static.user_address_result[1:2], result.all())
def test_inner_join(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
innerjoin=True,
order_by=addresses.c.id,
)
),
)
sess = fixture_session()
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
],
sess.query(User).all(),
)
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users JOIN "
"addresses AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses_1.id",
)
def test_inner_join_unnested_chaining_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
orders=relationship(Order, innerjoin="unnested", lazy=False)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy=False,
innerjoin="unnested",
)
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items AS order_items_1 "
"ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id",
)
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id",
)
self.assert_compile(
sess.query(User).options(
joinedload(User.orders, Order.items, innerjoin=False)
),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id",
)
@testing.fixture
def _inner_join_nested_fixture(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
orders=relationship(
Order, innerjoin=True, lazy=False, order_by=orders.c.id
)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy=False,
innerjoin=True,
order_by=items.c.id,
)
),
)
self.mapper_registry.map_imperatively(Item, items)
return User, Order, Item
def test_inner_join_nested_chaining_negative_options_one(
self, _inner_join_nested_fixture
):
User, Order, Item = _inner_join_nested_fixture
sess = fixture_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items "
"AS order_items_1 ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id ORDER BY orders_1.id, items_1.id",
)
def test_inner_join_nested_chaining_negative_options_two(
self, _inner_join_nested_fixture
):
User, Order, Item = _inner_join_nested_fixture
sess = fixture_session()
q = sess.query(User).options(joinedload(User.orders, innerjoin=False))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id ORDER BY orders_1.id, items_1.id",
)
eq_(
[
User(
id=7,
orders=[
Order(
id=1, items=[Item(id=1), Item(id=2), Item(id=3)]
),
Order(
id=3, items=[Item(id=3), Item(id=4), Item(id=5)]
),
Order(id=5, items=[Item(id=5)]),
],
),
User(id=8, orders=[]),
User(
id=9,
orders=[
Order(
id=2, items=[Item(id=1), Item(id=2), Item(id=3)]
),
Order(id=4, items=[Item(id=1), Item(id=5)]),
],
),
User(id=10, orders=[]),
],
q.order_by(User.id).all(),
)
def test_inner_join_nested_chaining_negative_options_three(
self, _inner_join_nested_fixture
):
User, Order, Item = _inner_join_nested_fixture
sess = fixture_session()
self.assert_compile(
sess.query(User).options(
joinedload(User.orders, Order.items, innerjoin=False)
),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY "
"orders_1.id, items_1.id",
)
def test_inner_join_nested_chaining_positive_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item, secondary=order_items, order_by=items.c.id
)
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.orders, innerjoin=False).joinedload(
Order.items, innerjoin=True
)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, items_1.description "
"AS items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id "
"AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN (orders AS orders_1 "
"JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS "
"items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id "
"ORDER BY orders_1.id, items_1.id",
)
eq_(
[
User(
id=7,
orders=[
Order(
id=1, items=[Item(id=1), Item(id=2), Item(id=3)]
),
Order(
id=3, items=[Item(id=3), Item(id=4), Item(id=5)]
),
Order(id=5, items=[Item(id=5)]),
],
),
User(id=8, orders=[]),
User(
id=9,
orders=[
Order(
id=2, items=[Item(id=1), Item(id=2), Item(id=3)]
),
Order(id=4, items=[Item(id=1), Item(id=5)]),
],
),
User(id=10, orders=[]),
],
q.order_by(User.id).all(),
)
def test_unnested_outerjoin_propagation_only_on_correct_path(self):
# test #3131
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties=odict(
[
("orders", relationship(Order)),
("addresses", relationship(Address)),
]
),
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.orders),
joinedload(User.addresses, innerjoin="unnested"),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id",
)
def test_nested_outerjoin_propagation_only_on_correct_path(self):
# test #3131
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties=odict(
[
("orders", relationship(Order)),
("addresses", relationship(Address)),
]
),
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.orders), joinedload(User.addresses, innerjoin=True)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id",
)
def test_catch_the_right_target(self):
# test eager join chaining to the "nested" join on the left,
# a new feature as of [ticket:2369]
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(Order, backref="user")
}, # o2m, m2o
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
q = (
sess.query(User)
.join(User.orders)
.join(Order.items)
.options(
joinedload(User.orders)
.joinedload(Order.items)
.joinedload(Item.keywords)
)
)
# here, the eager join for keywords can catch onto
# join(Order.items) or the nested (orders LEFT OUTER JOIN items),
# it should catch the latter
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"keywords_1.id AS keywords_1_id, keywords_1.name "
"AS keywords_1_name, "
"items_1.id AS items_1_id, items_1.description AS "
"items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_2 "
"JOIN items AS items_1 ON items_1.id = order_items_2.item_id) "
"ON orders_1.id = order_items_2.order_id "
"LEFT OUTER JOIN (item_keywords AS item_keywords_1 "
"JOIN keywords AS keywords_1 ON keywords_1.id = "
"item_keywords_1.keyword_id) "
"ON items_1.id = item_keywords_1.item_id "
"ORDER BY items_1.id, keywords_1.id",
)
def test_inner_join_unnested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, lazy=False)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy=False,
innerjoin="unnested",
)
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
# joining from user, its all LEFT OUTER JOINs
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN "
"orders AS orders_1 ON "
"users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 ON "
"items_1.id = "
"order_items_1.item_id) ON orders_1.id = "
"order_items_1.order_id",
)
# joining just from Order, innerjoin=True can be respected
self.assert_compile(
sess.query(Order),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, orders.description AS "
"orders_description, orders.isopen AS orders_isopen, items_1.id "
"AS items_1_id, items_1.description AS items_1_description FROM "
"orders JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id",
)
def test_inner_join_nested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, lazy=False)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item, secondary=order_items, lazy=False, innerjoin="nested"
)
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id",
)
def test_inner_join_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
orders=relationship(
Order,
backref=backref("user", innerjoin=True),
order_by=orders.c.id,
)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item, secondary=order_items, order_by=items.c.id
)
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=True)),
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id ORDER BY orders_1.id",
)
self.assert_compile(
sess.query(User).options(
joinedload(User.orders, innerjoin=True).joinedload(
Order.items, innerjoin=True
)
),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON "
"items_1.id = order_items_1.item_id ORDER BY orders_1.id, "
"items_1.id",
)
def go():
eq_(
sess.query(User)
.options(
joinedload(User.orders, innerjoin=True),
joinedload(User.orders, Order.items, innerjoin=True),
)
.order_by(User.id)
.all(),
[
User(
id=7,
orders=[
Order(
id=1,
items=[Item(id=1), Item(id=2), Item(id=3)],
),
Order(
id=3,
items=[Item(id=3), Item(id=4), Item(id=5)],
),
Order(id=5, items=[Item(id=5)]),
],
),
User(
id=9,
orders=[
Order(
id=2,
items=[Item(id=1), Item(id=2), Item(id=3)],
),
Order(id=4, items=[Item(id=1), Item(id=5)]),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
# test that default innerjoin setting is used for options
self.assert_compile(
sess.query(Order)
.options(joinedload(Order.user))
.filter(Order.description == "foo"),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS "
"orders_isopen, users_1.id AS users_1_id, users_1.name "
"AS users_1_name "
"FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id "
"WHERE orders.description = :description_1",
)
def test_propagated_lazyload_wildcard_unbound(self):
self._test_propagated_lazyload_wildcard(False)
def test_propagated_lazyload_wildcard_bound(self):
self._test_propagated_lazyload_wildcard(True)
def _test_propagated_lazyload_wildcard(self, use_load):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, lazy="select")),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(Item, secondary=order_items, lazy="joined")
),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
if use_load:
opt = Load(User).defaultload(User.orders).lazyload("*")
else:
opt = defaultload(User.orders).lazyload("*")
q = sess.query(User).filter(User.id == 7).options(opt)
def go():
for u in q:
u.orders
self.sql_eq_(
go,
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1",
{"id_1": 7},
),
(
"SELECT orders.id, "
"orders.user_id, "
"orders.address_id, "
"orders.description, "
"orders.isopen FROM orders "
"WHERE :param_1 = orders.user_id",
{"param_1": 7},
),
],
)
@testing.fixture
def issue_11226_fixture(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item, secondary=order_items, order_by=items.c.id
),
user=relationship(User),
),
)
self.mapper_registry.map_imperatively(Item, items)
def test_nested_for_group_by(self, issue_11226_fixture):
"""test issue #11226"""
Order, Item = self.classes("Order", "Item")
stmt = (
select(Order, func.count(Item.id))
.join(Order.items)
.group_by(Order.id)
.options(joinedload(Order.user))
)
# the query has a many-to-one joinedload, but also a GROUP BY.
# eager loading needs to use nested form so that the eager joins
# can be added to the outside of the GROUP BY query.
# change #11226 liberalizes the conditions where we do nested form
# to include non-multi-row eager loads, when the columns list is
# otherwise sensitive to more columns being added.
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.user_id, anon_1.address_id, "
"anon_1.description, anon_1.isopen, anon_1.count_1, "
"users_1.id AS id_1, users_1.name "
"FROM (SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen, "
"count(items.id) AS count_1 "
"FROM orders "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"GROUP BY orders.id) "
"AS anon_1 "
"LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.user_id",
)
def test_nested_for_distinct(self, issue_11226_fixture):
"""test issue #11226"""
Order, Item = self.classes("Order", "Item")
stmt = select(Order).distinct().options(joinedload(Order.user))
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.user_id, anon_1.address_id, "
"anon_1.description, anon_1.isopen, "
"users_1.id AS id_1, users_1.name "
"FROM (SELECT DISTINCT orders.id AS id, "
"orders.user_id AS user_id, orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen "
"FROM orders) AS anon_1 "
"LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.user_id",
)
| EagerTest |
python | getsentry__sentry | src/sentry/seer/models.py | {
"start": 2249,
"end": 2490
} | class ____(Exception):
def __init__(self, message: str, status: int):
self.message = message
self.status = status
def __str__(self):
return f"Seer API error: {self.message} (status: {self.status})"
| SeerApiError |
python | Textualize__textual | src/textual/binding.py | {
"start": 1055,
"end": 1123
} | class ____(Exception):
"""A binding related error."""
| BindingError |
python | kamyu104__LeetCode-Solutions | Python/find-winner-on-a-tic-tac-toe-game.py | {
"start": 29,
"end": 614
} | class ____(object):
def tictactoe(self, moves):
"""
:type moves: List[List[int]]
:rtype: str
"""
row, col = [[0]*3 for _ in xrange(2)], [[0]*3 for _ in xrange(2)]
diag, anti_diag = [0]*2, [0]*2
p = 0
for r, c in moves:
row[p][r] += 1
col[p][c] += 1
diag[p] += r == c
anti_diag[p] += r+c == 2
if 3 in (row[p][r], col[p][c], diag[p], anti_diag[p]):
return "AB"[p]
p ^= 1
return "Draw" if len(moves) == 9 else "Pending"
| Solution |
python | python__mypy | mypy/test/testreports.py | {
"start": 315,
"end": 1773
} | class ____(Suite):
@pytest.mark.skipif(lxml is None, reason="Cannot import lxml. Is it installed?")
def test_get_line_rate(self) -> None:
assert_equal("1.0", get_line_rate(0, 0))
assert_equal("0.3333", get_line_rate(1, 3))
@pytest.mark.skipif(lxml is None, reason="Cannot import lxml. Is it installed?")
def test_as_xml(self) -> None:
import lxml.etree as etree # type: ignore[import-untyped]
cobertura_package = CoberturaPackage("foobar")
cobertura_package.covered_lines = 21
cobertura_package.total_lines = 42
child_package = CoberturaPackage("raz")
child_package.covered_lines = 10
child_package.total_lines = 10
child_package.classes["class"] = etree.Element("class")
cobertura_package.packages["raz"] = child_package
expected_output = textwrap.dedent(
"""\
<package complexity="1.0" name="foobar" branch-rate="0" line-rate="0.5000">
<classes/>
<packages>
<package complexity="1.0" name="raz" branch-rate="0" line-rate="1.0000">
<classes>
<class/>
</classes>
</package>
</packages>
</package>
"""
).encode("ascii")
assert_equal(
expected_output, etree.tostring(cobertura_package.as_xml(), pretty_print=True)
)
| CoberturaReportSuite |
python | redis__redis-py | redis/multidb/healthcheck.py | {
"start": 536,
"end": 696
} | class ____(ABC):
@abstractmethod
def check_health(self, database) -> bool:
"""Function to determine the health status."""
pass
| HealthCheck |
python | conda__conda | conda/base/constants.py | {
"start": 6746,
"end": 7010
} | class ____(ValueEnum, metaclass=ChannelPriorityMeta):
__name__ = "ChannelPriority"
STRICT = "strict"
# STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed
FLEXIBLE = "flexible"
DISABLED = "disabled"
| ChannelPriority |
python | jschneier__django-storages | storages/backends/ftp.py | {
"start": 852,
"end": 918
} | class ____(Exception):
pass
@deconstructible
| FTPStorageException |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 22014,
"end": 22906
} | class ____(Node):
__slots__ = ('loc', 'operation', 'type',)
_fields = ('operation', 'type',)
def __init__(self, operation, type, loc=None):
self.operation = operation
self.type = type
self.loc = loc
def __eq__(self, other):
return (
self is other or (
isinstance(other, OperationTypeDefinition) and
self.operation == other.operation and
self.type == other.type
)
)
def __repr__(self):
return ('OperationTypeDefinition('
'operation={self.operation!r}'
', type={self.type!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.operation,
self.type,
self.loc
)
def __hash__(self):
return id(self)
| OperationTypeDefinition |
python | getsentry__sentry | src/sentry/feedback/apps.py | {
"start": 36,
"end": 133
} | class ____(AppConfig):
name = "sentry.feedback"
def ready(self) -> None:
pass
| Config |
python | matplotlib__matplotlib | lib/matplotlib/streamplot.py | {
"start": 11721,
"end": 14070
} | class ____:
"""
Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
# Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = (mask.nx - 1) / (grid.nx - 1)
self.y_grid2mask = (mask.ny - 1) / (grid.ny - 1)
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = 1. / grid.dx
self.y_data2grid = 1. / grid.dy
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return round(xi * self.x_grid2mask), round(yi * self.y_grid2mask)
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def grid2data(self, xg, yg):
return xg / self.x_data2grid, yg / self.y_data2grid
def start_trajectory(self, xg, yg, broken_streamlines=True):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym, broken_streamlines)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg, broken_streamlines=True):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym, broken_streamlines)
def undo_trajectory(self):
self.mask._undo_trajectory()
| DomainMap |
python | streamlit__streamlit | lib/streamlit/elements/widgets/button_group.py | {
"start": 2967,
"end": 3891
} | class ____(Generic[T]):
"""Only meant to be used internally for the button_group element.
This serde is inspired by the MultiSelectSerde from multiselect.py. That serde has
been updated since then to support the accept_new_options parameter, which is not
required by the button_group element. If this changes again at some point,
the two elements can share the same serde again.
"""
options: Sequence[T]
default_value: list[int] = field(default_factory=list)
def serialize(self, value: list[T]) -> list[int]:
indices = check_and_convert_to_indices(self.options, value)
return indices if indices is not None else []
def deserialize(self, ui_value: list[int] | None) -> list[T]:
current_value: list[int] = (
ui_value if ui_value is not None else self.default_value
)
return [self.options[i] for i in current_value]
| _MultiSelectSerde |
python | tensorflow__tensorflow | tensorflow/python/keras/losses.py | {
"start": 8815,
"end": 10795
} | class ____(Loss):
"""Wraps a loss function in the `Loss` class."""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
"""Initializes `LossFunctionWrapper` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super().__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())
return ag_fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in self._fn_kwargs.items():
config[k] = backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| LossFunctionWrapper |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/base.py | {
"start": 156,
"end": 680
} | class ____(BaseModel):
"""Base callback handler that can be used to track event starts and ends."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "BaseEventHandler"
@abstractmethod
def handle(self, event: BaseEvent, **kwargs: Any) -> Any:
"""Logic for handling event."""
async def ahandle(self, event: BaseEvent, **kwargs: Any) -> Any:
return self.handle(event, **kwargs)
| BaseEventHandler |
python | huggingface__transformers | src/transformers/activations.py | {
"start": 5712,
"end": 6259
} | class ____(nn.Module):
"""
Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
https://github.com/hendrycks/GELUs
Implemented along with MEGA (Moving Average Equipped Gated Attention)
"""
def __init__(self):
super().__init__()
self.precomputed_constant = math.sqrt(2 / math.pi)
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
| AccurateGELUActivation |
python | google__pytype | pytype/overlays/chex_overlay.py | {
"start": 839,
"end": 3154
} | class ____(dataclass_overlay.Dataclass):
"""Implements the @dataclass decorator."""
DEFAULT_ARGS = {
**dataclass_overlay.Dataclass.DEFAULT_ARGS,
"mappable_dataclass": True,
}
def _add_replace_method(self, node, cls):
cls.members["replace"] = classgen.make_replace_method(
self.ctx, node, cls, kwargs_name="changes"
)
def _add_from_tuple_method(self, node, cls):
# from_tuple is discouraged anyway, so we provide only bare-bones types.
cls.members["from_tuple"] = overlay_utils.make_method(
ctx=self.ctx,
node=node,
name="from_tuple",
params=[overlay_utils.Param("args")],
return_type=cls,
kind=pytd.MethodKind.STATICMETHOD,
)
def _add_to_tuple_method(self, node, cls):
# to_tuple is discouraged anyway, so we provide only bare-bones types.
cls.members["to_tuple"] = overlay_utils.make_method(
ctx=self.ctx,
node=node,
name="to_tuple",
return_type=self.ctx.convert.tuple_type,
)
def _add_mapping_methods(self, node, cls):
if "__getitem__" not in cls.members:
cls.members["__getitem__"] = overlay_utils.make_method(
ctx=self.ctx,
node=node,
name="__getitem__",
params=[overlay_utils.Param("key")],
return_type=self.ctx.convert.unsolvable,
)
if "__iter__" not in cls.members:
cls.members["__iter__"] = overlay_utils.make_method(
ctx=self.ctx,
node=node,
name="__iter__",
return_type=self.ctx.convert.lookup_value("typing", "Iterator"),
)
if "__len__" not in cls.members:
cls.members["__len__"] = overlay_utils.make_method(
ctx=self.ctx,
node=node,
name="__len__",
return_type=self.ctx.convert.int_type,
)
def decorate(self, node, cls):
super().decorate(node, cls)
if not isinstance(cls, abstract.InterpreterClass):
return
self._add_replace_method(node, cls)
self._add_from_tuple_method(node, cls)
self._add_to_tuple_method(node, cls)
if not self.args[cls]["mappable_dataclass"]:
return
mapping = self.ctx.convert.lookup_value("typing", "Mapping")
overlay_utils.add_base_class(node, cls, mapping)
self._add_mapping_methods(node, cls)
| Dataclass |
python | wandb__wandb | wandb/sdk/artifacts/_generated/run_output_artifacts.py | {
"start": 240,
"end": 328
} | class ____(GQLResult):
project: Optional[RunOutputArtifactsProject]
| RunOutputArtifacts |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 12121,
"end": 12618
} | class ____(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be
located.
"""
def __init__(self, message: str, tname: str, cname: str):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self) -> Union[str, Tuple[Any, ...]]:
return (
self.__class__,
(self.args[0], self.table_name, self.column_name),
)
| NoReferencedColumnError |
python | walkccc__LeetCode | solutions/2052. Minimum Cost to Separate Sentence Into Rows/2052.py | {
"start": 0,
"end": 817
} | class ____:
def minimumCost(self, sentence: str, k: int) -> int:
if len(sentence) <= k:
return 0
words = sentence.split()
# dp[i] := the minimum cost of the first i words
dp = [0] * (len(words) + 1)
for i in range(1, len(words) + 1):
n = len(words[i - 1]) # the length of the current row
dp[i] = dp[i - 1] + (k - n)**2
# Gradually add words[j - 1], words[j - 2], ....
for j in range(i - 1, 0, -1):
n += len(words[j - 1]) + 1
if n > k:
break
dp[i] = min(dp[i], dp[j - 1] + (k - n)**2)
lastRowLen = len(words[-1])
i = len(words) - 2 # Greedily put words into last row
while i > 0 and lastRowLen + len(words[i]) + 1 <= k:
lastRowLen += len(words[i]) + 1
i -= 1
return min(dp[i + 1:len(words)])
| Solution |
python | nedbat__coveragepy | tests/test_data.py | {
"start": 26496,
"end": 37321
} | class ____(CoverageTest):
"""Tests of CoverageData file handling."""
def test_reading_missing(self) -> None:
self.assert_doesnt_exist(".coverage")
covdata = DebugCoverageData()
covdata.read()
assert_line_counts(covdata, {})
def test_writing_and_reading(self) -> None:
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
def test_debug_output_with_debug_option(self) -> None:
# With debug option dataio, we get debug output about reading and
# writing files.
debug = DebugControlString(options=["dataio"])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
print(debug.get_output())
assert re.search(
r"^"
+ r"Closing dbs, force=False: {}\n"
+ r"Erasing data file '.*\.coverage' \(does not exist\)\n"
+ r"Opening data file '.*\.coverage' \(does not exist\)\n"
+ r"Initing data file '.*\.coverage' \(0 bytes, modified [-:. 0-9]+\)\n"
+ r"Writing \(no-op\) data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n"
+ r"Opening data file '.*\.coverage' \(\d+ bytes, modified [-:. 0-9]+\)\n"
+ r"$",
debug.get_output(),
)
def test_debug_output_without_debug_option(self) -> None:
# With a debug object, but not the dataio option, we don't get debug
# output.
debug = DebugControlString(options=[])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
assert debug.get_output() == ""
def test_explicit_suffix(self) -> None:
self.assert_doesnt_exist(".coverage.SUFFIX")
covdata = DebugCoverageData(suffix="SUFFIX")
covdata.add_lines(LINES_1)
covdata.write()
self.assert_exists(".coverage.SUFFIX")
self.assert_doesnt_exist(".coverage")
def test_true_suffix(self) -> None:
self.assert_file_count(".coverage.*", 0)
# suffix=True will make a randomly named data file.
covdata1 = DebugCoverageData(suffix=True)
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_doesnt_exist(".coverage")
data_files1 = glob.glob(".coverage.*")
assert len(data_files1) == 1
# Another suffix=True will choose a different name.
covdata2 = DebugCoverageData(suffix=True)
covdata2.add_lines(LINES_1)
covdata2.write()
self.assert_doesnt_exist(".coverage")
data_files2 = glob.glob(".coverage.*")
assert len(data_files2) == 2
# In addition to being different, the suffixes have the pid in them.
assert all(str(os.getpid()) in fn for fn in data_files2)
def test_combining(self) -> None:
self.assert_file_count(".coverage.*", 0)
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_exists(".coverage.1")
self.assert_file_count(".coverage.*", 1)
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines(LINES_2)
covdata2.write()
self.assert_exists(".coverage.2")
self.assert_file_count(".coverage.*", 2)
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_file_count(".coverage.*", 0)
def test_erasing(self) -> None:
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata1.erase()
assert_line_counts(covdata1, {})
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, {})
def test_erasing_parallel(self) -> None:
self.make_file("datafile.1")
self.make_file("datafile.2")
self.make_file(".coverage")
data = DebugCoverageData("datafile")
data.erase(parallel=True)
self.assert_file_count("datafile.*", 0)
self.assert_exists(".coverage")
def test_combining_with_aliases(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(
{
"/home/ned/proj/src/a.py": {1, 2},
"/home/ned/proj/src/sub/b.py": {3},
"/home/ned/proj/src/template.html": {10},
}
)
covdata1.add_file_tracers(
{
"/home/ned/proj/src/template.html": "html.plugin",
}
)
covdata1.write()
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines(
{
r"c:\ned\test\a.py": {4, 5},
r"c:\ned\test\sub\b.py": {3, 6},
}
)
covdata2.write()
self.assert_file_count(".coverage.*", 2)
self.make_file("a.py", "")
self.make_file("sub/b.py", "")
self.make_file("template.html", "")
covdata3 = DebugCoverageData()
aliases = PathAliases()
aliases.add("/home/ned/proj/src/", "./")
aliases.add(r"c:\ned\test", "./")
combine_parallel_data(covdata3, aliases=aliases)
self.assert_file_count(".coverage.*", 0)
self.assert_exists(".coverage")
apy = canonical_filename("./a.py")
sub_bpy = canonical_filename("./sub/b.py")
template_html = canonical_filename("./template.html")
assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True)
assert_measured_files(covdata3, [apy, sub_bpy, template_html])
assert covdata3.file_tracer(template_html) == "html.plugin"
def test_combining_from_different_directories(self) -> None:
os.makedirs("cov1")
covdata1 = DebugCoverageData("cov1/.coverage.1")
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs("cov2")
covdata2 = DebugCoverageData("cov2/.coverage.2")
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData(".coverage.xxx")
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=["cov1", "cov2"])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
def test_combining_from_files(self) -> None:
os.makedirs("cov1")
covdata1 = DebugCoverageData("cov1/.coverage.1")
covdata1.add_lines(LINES_1)
covdata1.write()
# Journal files should never be included in the combining.
self.make_file("cov1/.coverage.1-journal", "xyzzy")
os.makedirs("cov2")
covdata2 = DebugCoverageData("cov2/.coverage.2")
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData(".coverage.xxx")
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata_2xxx = DebugCoverageData("cov2/.coverage.xxx")
covdata_2xxx.add_arcs(ARCS_3)
covdata_2xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=["cov1", "cov2/.coverage.2"])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
self.assert_exists("cov2/.coverage.xxx")
def test_combining_from_nonexistent_directories(self) -> None:
covdata = DebugCoverageData()
msg = "Couldn't combine from non-existent path 'xyzzy'"
with pytest.raises(NoDataError, match=msg):
combine_parallel_data(covdata, data_paths=["xyzzy"])
def test_interleaved_erasing_bug716(self) -> None:
# pytest-cov could produce this scenario. #716
covdata1 = DebugCoverageData()
covdata2 = DebugCoverageData()
# this used to create the .coverage database file..
covdata2.set_context("")
# then this would erase it all..
covdata1.erase()
# then this would try to use tables that no longer exist.
# "no such table: meta"
covdata2.add_lines(LINES_1)
@pytest.mark.parametrize(
"dpart, fpart",
[
("", "[b-a]"),
("[3-1]", ""),
("[3-1]", "[b-a]"),
],
)
def test_combining_with_crazy_filename(self, dpart: str, fpart: str) -> None:
dirname = f"py{dpart}"
basename = f"{dirname}/.coverage{fpart}"
os.makedirs(dirname)
covdata1 = CoverageData(basename=basename, suffix="1")
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(basename=basename, suffix="2")
covdata2.add_lines(LINES_2)
covdata2.write()
covdata3 = CoverageData(basename=basename)
combine_parallel_data(covdata3)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_file_count(glob.escape(basename) + ".*", 0)
def test_meta_data(self) -> None:
# The metadata written to the data file shouldn't interfere with
# hashing to remove duplicates, except for debug=process, which
# writes debugging info as metadata.
debug = DebugControlString(options=[])
covdata1 = CoverageData(basename="meta.1", debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
with sqlite3.connect("meta.1") as con:
data = sorted(k for (k,) in con.execute("select key from meta"))
assert data == ["has_arcs", "version"]
debug = DebugControlString(options=["process"])
covdata2 = CoverageData(basename="meta.2", debug=debug)
covdata2.add_lines(LINES_1)
covdata2.write()
with sqlite3.connect("meta.2") as con:
data = sorted(k for (k,) in con.execute("select key from meta"))
assert data == ["has_arcs", "sys_argv", "version", "when"]
| CoverageDataFilesTest |
python | TheAlgorithms__Python | data_compression/huffman.py | {
"start": 49,
"end": 297
} | class ____:
def __init__(self, letter: str, freq: int):
self.letter: str = letter
self.freq: int = freq
self.bitstring: dict[str, str] = {}
def __repr__(self) -> str:
return f"{self.letter}:{self.freq}"
| Letter |
python | run-llama__llama_index | llama-index-integrations/callbacks/llama-index-callbacks-agentops/llama_index/callbacks/agentops/base.py | {
"start": 5723,
"end": 8660
} | class ____(BaseEventHandler):
_shared_handler_state: AgentOpsHandlerState = PrivateAttr()
_ao_client: AOClient = PrivateAttr()
def __init__(
self, shared_handler_state: AgentOpsHandlerState, ao_client: AOClient
) -> None:
super().__init__()
self._shared_handler_state = shared_handler_state
self._ao_client = ao_client
@classmethod
def class_name(cls) -> str:
return "AgentOpsEventHandler"
def handle(self, event: BaseEvent) -> None:
# We only track chat events that are emitted while using an agent
is_agent_chat_event = self._shared_handler_state.check_is_agent_chat_span(
event.span_id
)
if isinstance(event, AgentRunStepStartEvent):
self._shared_handler_state.is_agent_chat_span[event.span_id] = True
if isinstance(event, LLMChatStartEvent) and is_agent_chat_event:
self._shared_handler_state.agent_chat_start_event[event.span_id] = event
elif isinstance(event, LLMChatEndEvent) and is_agent_chat_event:
message_dicts = []
for message in event.messages:
message_dicts.append(
{
"content": message.content,
"role": message.role,
}
)
result_dict = None
usage = {
"prompt_tokens": None,
"completion_tokens": None,
}
if event.response:
result_dict = {
"content": event.response.message.content,
"role": event.response.message.role,
}
if event.response.raw:
usage = dict(event.response.raw.get("usage", {}))
completion_tokens = usage.get("completion_tokens", None)
prompt_tokens = usage.get("prompt_tokens", None)
usage["prompt_tokens"] = prompt_tokens
usage["completion_tokens"] = completion_tokens
event_params: Dict[str, Any] = {
"prompt": message_dicts,
"completion": result_dict,
**usage,
}
# Get model info from chat start event corresponding to this chat end event
start_event = self._shared_handler_state.get_chat_start_event(event.span_id)
if start_event:
event_params["model"] = (
start_event.model_dict["model"]
if "model" in start_event.model_dict
else None
)
self._ao_client.record(LLMEvent(**event_params))
elif isinstance(event, AgentToolCallEvent):
params = json.loads(event.arguments) if event.arguments else None
self._ao_client.record(ToolEvent(name=event.tool.name, params=params))
| AgentOpsEventHandler |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 3882,
"end": 4100
} | class ____(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return bool(request.user and request.user.is_authenticated)
| IsAuthenticated |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 471054,
"end": 471486
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AddUpvote"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "subject")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
subject = sgqlc.types.Field(Votable, graphql_name="subject")
"""The votable subject."""
| AddUpvotePayload |
python | hynek__structlog | tests/processors/test_renderers.py | {
"start": 14688,
"end": 15347
} | class ____:
def test_overwrite(self):
"""
If there is a timestamp, leave it.
"""
mts = MaybeTimeStamper()
assert {"timestamp": 42} == mts(None, None, {"timestamp": 42})
def test_overwrite_custom_key(self):
"""
If there is a timestamp with a custom key, leave it.
"""
mts = MaybeTimeStamper(key="timestamp2")
assert {"timestamp2": 42} == mts(None, None, {"timestamp2": 42})
def test_none(self):
"""
If there is no timestamp, add one.
"""
mts = MaybeTimeStamper()
assert "timestamp" in mts(None, None, {})
| TestMaybeTimeStamper |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 59305,
"end": 60002
} | class ____(RootModel[str]):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(frozen=True)
TaskKeyStr = Annotated[
str,
Field(
...,
description=(
"A unique name for the task. This field is used to refer to this task from"
" other tasks.\nThis field is required and must be unique within its parent"
" job.\nOn Update or Reset, this field is used to reference the tasks to be"
" updated or reset.\nThe maximum length is 100 characters."
),
examples=["Task_Key"],
max_length=100,
min_length=1,
pattern="^[\\w\\-]+$",
),
]
| TaskDescription |
python | huggingface__transformers | src/transformers/models/visual_bert/modeling_visual_bert.py | {
"start": 19716,
"end": 21092
} | class ____(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the sentence-image prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation
before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
seq_relationship_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
)
| VisualBertForPreTrainingOutput |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 92621,
"end": 96591
} | class ____(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
For reference, the total power integrated radially to infinity [1]_
over the plane is given by:
.. math:: P = \\int_0^{2 \\pi} \\int_0^\\infty f(r) r dr d\\theta
= \\frac{A 4 R^2}{\\pi R_z^2}
One may therefore calculate the amplitude for a given power and
radius as:
.. math:: A = \\frac{\\pi P R_z^2}{4 R^2}
See [2]_ for more details about the Airy disk.
References
----------
.. [1] https://www.wolframalpha.com
.. [2] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| AiryDisk2D |
python | viewflow__viewflow | viewflow/contrib/admin/__init__.py | {
"start": 435,
"end": 1382
} | class ____(AppMenuMixin, Application):
"""
Django administration Viewset adapter::
from django.contrib import admin
from viewflow.contrib.admin import Admin
site = Site(viewsets=[
Admin()
])
urls = [
path("admin/", admin.site.urls),
path('', site.urls)
]
"""
app_name = "admin"
namespace = "admin"
prefix = "admin"
title = _("Administration")
icon = Icon("build")
turbo_disabled = True
def __init__(self, *, admin_site=None, **kwargs):
self.admin_site = admin_site or admin.site
super().__init__(**kwargs)
def has_perm(self, user):
return user.is_staff
@property
def urls(self):
url_patterns, app_name, namespace = self.admin_site.urls
assert self.app_name == app_name
assert self.namespace == namespace
return url_patterns, app_name, namespace
| Admin |
python | huggingface__transformers | src/transformers/models/xlm/modeling_xlm.py | {
"start": 66878,
"end": 73795
} | class ____(XLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = XLMModel(config)
self.sequence_summary = XLMSequenceSummary(config)
self.logits_proj = nn.Linear(config.num_labels, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
langs (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
langs = langs.view(-1, langs.size(-1)) if langs is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
if lengths is not None:
logger.warning(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead."
)
lengths = None
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
logits = self.logits_proj(logits)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
| XLMForMultipleChoice |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/base.py | {
"start": 46337,
"end": 50606
} | class ____(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.:
.. sourcecode:: text
12:05:57.10558
The incoming storage format is by default parsed using the
Python ``time.fromisoformat()`` function.
.. versionchanged:: 2.0 ``time.fromisoformat()`` is used for default
time string parsing.
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?"),
)
:param truncate_microseconds: when ``True`` microseconds will be truncated
from the time. Can't be specified together with ``storage_format``
or ``regexp``.
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming
strings. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super().__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.DOUBLE,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
| TIME |
python | great-expectations__great_expectations | tests/core/test_config_substitutor.py | {
"start": 393,
"end": 695
} | class ____:
def __init__(self, secret_response):
self.secret_response = secret_response
def get_secret_value(self, *args, **kwargs):
return self.secret_response
def get_parameter(self, *args, **kwargs):
return self.get_secret_value(*args, **kwargs)
| MockedBoto3Client |
python | RaRe-Technologies__gensim | gensim/topic_coherence/text_analysis.py | {
"start": 10634,
"end": 13787
} | class ____(WindowedTextsAnalyzer):
"""Accumulate word occurrences and co-occurrences from a sequence of corpus texts."""
def __init__(self, *args):
super(WordOccurrenceAccumulator, self).__init__(*args)
self._occurrences = np.zeros(self._vocab_size, dtype='uint32')
self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32')
self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token
self._counter = Counter()
def __str__(self):
return self.__class__.__name__
def accumulate(self, texts, window_size):
self._co_occurrences = self._co_occurrences.tolil()
self.partial_accumulate(texts, window_size)
self._symmetrize()
return self
def partial_accumulate(self, texts, window_size):
"""Meant to be called several times to accumulate partial results.
Notes
-----
The final accumulation should be performed with the `accumulate` method as opposed to this one.
This method does not ensure the co-occurrence matrix is in lil format and does not
symmetrize it after accumulation.
"""
self._current_doc_num = -1
self._token_at_edge = None
self._counter.clear()
super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)
for combo, count in self._counter.items():
self._co_occurrences[combo] += count
return self
def analyze_text(self, window, doc_num=None):
self._slide_window(window, doc_num)
mask = self._uniq_words[:-1] # to exclude none token
if mask.any():
self._occurrences[mask] += 1
self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))
def _slide_window(self, window, doc_num):
if doc_num != self._current_doc_num:
self._uniq_words[:] = False
self._uniq_words[np.unique(window)] = True
self._current_doc_num = doc_num
else:
self._uniq_words[self._token_at_edge] = False
self._uniq_words[window[-1]] = True
self._token_at_edge = window[0]
def _symmetrize(self):
"""Word pairs may have been encountered in (i, j) and (j, i) order.
Notes
-----
Rather than enforcing a particular ordering during the update process,
we choose to symmetrize the co-occurrence matrix after accumulation has completed.
"""
co_occ = self._co_occurrences
co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts
self._co_occurrences = \
co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')
def _get_occurrences(self, word_id):
return self._occurrences[word_id]
def _get_co_occurrences(self, word_id1, word_id2):
return self._co_occurrences[word_id1, word_id2]
def merge(self, other):
self._occurrences += other._occurrences
self._co_occurrences += other._co_occurrences
self._num_docs += other._num_docs
| WordOccurrenceAccumulator |
python | huggingface__transformers | tests/models/swin2sr/test_modeling_swin2sr.py | {
"start": 5606,
"end": 10941
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Swin2SRModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Swin2SRConfig,
embed_dim=37,
has_text_modality=False,
common_properties=["image_size", "patch_size", "num_channels"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_for_image_super_resolution(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs)
# TODO: check if this works again for PyTorch 2.x.y
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="Swin2SR does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Swin2SR does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="Swin2SR does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
@slow
def test_model_from_pretrained(self):
model_name = "caidas/swin2SR-classical-sr-x2-64"
model = Swin2SRModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = len(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
window_size_squared = config.window_size**2
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_heads[0], window_size_squared, window_size_squared],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_heads[0], window_size_squared, window_size_squared],
)
@require_vision
@require_torch
@slow
| Swin2SRModelTest |
python | django__django | tests/expressions/models.py | {
"start": 2565,
"end": 2891
} | class ____(models.Model):
start = models.ForeignKey(Time, models.CASCADE, null=True, related_name="+")
end = models.ForeignKey(Time, models.CASCADE, null=True, related_name="+")
midpoint = models.TimeField()
def __str__(self):
return "%s (%s to %s)" % (self.midpoint, self.start, self.end)
| SimulationRun |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instance.py | {
"start": 3794,
"end": 4606
} | class ____(graphene.ObjectType):
runId = graphene.NonNull(graphene.String)
stepKey = graphene.NonNull(graphene.String)
enqueuedTimestamp = graphene.NonNull(graphene.Float)
assignedTimestamp = graphene.Float()
priority = graphene.Int()
class Meta:
name = "PendingConcurrencyStep"
def __init__(self, pending_step_info: PendingStepInfo):
super().__init__(
runId=pending_step_info.run_id,
stepKey=pending_step_info.step_key,
enqueuedTimestamp=pending_step_info.enqueued_timestamp.timestamp(),
assignedTimestamp=pending_step_info.assigned_timestamp.timestamp()
if pending_step_info.assigned_timestamp
else None,
priority=pending_step_info.priority,
)
| GraphenePendingConcurrencyStep |
python | TheAlgorithms__Python | matrix/count_islands_in_matrix.py | {
"start": 174,
"end": 1480
} | class ____: # Public class to implement a graph
def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None:
self.ROW = row
self.COL = col
self.graph = graph
def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None:
# Checking all 8 elements surrounding nth element
row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1]
visited[i][j] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited):
self.diffs(i + row_nbr[k], j + col_nbr[k], visited)
def count_islands(self) -> int: # And finally, count all islands.
visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
count = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(i, j, visited)
count += 1
return count
| Matrix |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 7253,
"end": 7507
} | class ____(EllipticCurve):
name = "sect571k1"
key_size = 570
group_order = 0x20000000000000000000000000000000000000000000000000000000000000000000000131850E1F19A63E4B391A8DB917F4138B630D84BE5D639381E91DEB45CFE778F637C1001 # noqa: E501
| SECT571K1 |
python | python-attrs__attrs | bench/test_benchmarks.py | {
"start": 1127,
"end": 1914
} | class ____:
x: int = 0
y: str = "foo"
z: dict[str, int] = attrs.Factory(dict)
def test_instantiate_no_defaults():
"""
Benchmark instantiating a class without using any defaults.
"""
for _ in range(ROUNDS):
C(1, "2", {})
def test_instantiate_with_defaults():
"""
Benchmark instantiating a class relying on defaults.
"""
for _ in range(ROUNDS):
C()
def test_eq_equal():
"""
Benchmark comparing two equal instances for equality.
"""
c1 = C()
c2 = C()
for _ in range(ROUNDS):
c1 == c2
def test_eq_unequal():
"""
Benchmark comparing two unequal instances for equality.
"""
c1 = C()
c2 = C(1, "bar", {"baz": 42})
for _ in range(ROUNDS):
c1 == c2
@attrs.frozen
| C |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 30318,
"end": 30448
} | class ____(AbstractSingleRange[int]):
"""Represent the PostgreSQL INT4RANGE type."""
__visit_name__ = "INT4RANGE"
| INT4RANGE |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/grpc_server_registry.py | {
"start": 2023,
"end": 12048
} | class ____(AbstractContextManager):
def __init__(
self,
instance_ref: Optional[InstanceRef],
server_command: "GrpcServerCommand",
# How long the process can live without a heartbeat before it dies. You should ensure
# that any processes returned by this registry have at least one
# GrpcServerCodeLocation hitting the server with a heartbeat while you want the
# process to stay running.
heartbeat_ttl: int,
# How long to wait for the server to start up and receive connections before timing out
startup_timeout: int,
wait_for_processes_on_shutdown: bool,
log_level: str = "INFO",
inject_env_vars_from_instance: bool = True,
container_image: Optional[str] = None,
container_context: Optional[dict[str, Any]] = None,
additional_timeout_msg: Optional[str] = None,
defs_state_info: Optional[DefsStateInfo] = None,
):
self.instance_ref = instance_ref
self.server_command = server_command
# map of servers being currently returned, keyed by origin ID
self._active_entries: dict[str, Union[ServerRegistryEntry, ErrorRegistryEntry]] = {}
self._waited_for_processes = False
self._heartbeat_ttl = check.int_param(heartbeat_ttl, "heartbeat_ttl")
self._startup_timeout = check.int_param(startup_timeout, "startup_timeout")
self._additional_timeout_msg = check.opt_str_param(
additional_timeout_msg, "additional_timeout_msg"
)
self._defs_state_info = check.opt_inst_param(
defs_state_info, "defs_state_info", DefsStateInfo
)
self._lock = threading.Lock()
self._all_processes: list[GrpcServerProcess] = []
self._cleanup_thread_shutdown_event: Optional[threading.Event] = None
self._cleanup_thread: Optional[threading.Thread] = None
self._log_level = check.str_param(log_level, "log_level")
self._inject_env_vars_from_instance = inject_env_vars_from_instance
self._container_image = container_image
self._container_context = container_context
self._wait_for_processes_on_shutdown = wait_for_processes_on_shutdown
self._cleanup_thread_shutdown_event = threading.Event()
self._cleanup_thread = threading.Thread(
target=self._clear_old_processes,
name="grpc-server-registry-cleanup",
args=(self._cleanup_thread_shutdown_event,),
daemon=True,
)
self._cleanup_thread.start()
def supports_origin(
self, code_location_origin: CodeLocationOrigin
) -> TypeGuard[ManagedGrpcPythonEnvCodeLocationOrigin]:
return isinstance(code_location_origin, ManagedGrpcPythonEnvCodeLocationOrigin)
@property
def supports_reload(self) -> bool:
return True
def clear_all_grpc_endpoints(self):
# Free the map entry for all origins so that subsequent calls to _get_grpc_endpoint wil
# create a new process
with self._lock:
self._active_entries.clear()
def reload_grpc_endpoint(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> GrpcServerEndpoint:
check.inst_param(code_location_origin, "code_location_origin", CodeLocationOrigin)
with self._lock:
origin_id = code_location_origin.get_id()
if origin_id in self._active_entries:
# Free the map entry for this origin so that _get_grpc_endpoint will create
# a new process
del self._active_entries[origin_id]
return self._get_grpc_endpoint(code_location_origin)
def get_grpc_endpoint(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> GrpcServerEndpoint:
check.inst_param(code_location_origin, "code_location_origin", CodeLocationOrigin)
with self._lock:
return self._get_grpc_endpoint(code_location_origin)
def get_grpc_server_entry(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> Union[ServerRegistryEntry, ErrorRegistryEntry]:
check.inst_param(code_location_origin, "code_location_origin", CodeLocationOrigin)
with self._lock:
return self._get_grpc_server_entry(code_location_origin)
def _get_loadable_target_origin(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> LoadableTargetOrigin:
check.inst_param(
code_location_origin,
"code_location_origin",
ManagedGrpcPythonEnvCodeLocationOrigin,
)
return code_location_origin.loadable_target_origin
def _get_grpc_server_entry(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> Union[ServerRegistryEntry, ErrorRegistryEntry]:
# deferred for import perf
from dagster._grpc.server import GrpcServerProcess
origin_id = code_location_origin.get_id()
loadable_target_origin = self._get_loadable_target_origin(code_location_origin)
if not loadable_target_origin:
raise Exception(
"No Python file/module information available for location"
f" {code_location_origin.location_name}"
)
if origin_id not in self._active_entries:
refresh_server = True
else:
active_entry = self._active_entries[origin_id]
refresh_server = loadable_target_origin != active_entry.loadable_target_origin
if refresh_server:
try:
server_process = GrpcServerProcess(
instance_ref=self.instance_ref,
server_command=self.server_command,
location_name=code_location_origin.location_name,
loadable_target_origin=loadable_target_origin,
heartbeat=True,
heartbeat_timeout=self._heartbeat_ttl,
startup_timeout=self._startup_timeout,
log_level=self._log_level,
inject_env_vars_from_instance=self._inject_env_vars_from_instance,
container_image=self._container_image,
container_context=self._container_context,
additional_timeout_msg=self._additional_timeout_msg,
defs_state_info=self._defs_state_info,
)
self._all_processes.append(server_process)
self._active_entries[origin_id] = ServerRegistryEntry(
process=server_process,
loadable_target_origin=loadable_target_origin,
creation_timestamp=get_current_timestamp(),
)
except Exception:
self._active_entries[origin_id] = ErrorRegistryEntry(
error=serializable_error_info_from_exc_info(sys.exc_info()),
loadable_target_origin=loadable_target_origin,
creation_timestamp=get_current_timestamp(),
)
return self._active_entries[origin_id]
def _get_grpc_endpoint(
self, code_location_origin: ManagedGrpcPythonEnvCodeLocationOrigin
) -> GrpcServerEndpoint:
active_entry = self._get_grpc_server_entry(code_location_origin)
if isinstance(active_entry, ErrorRegistryEntry):
raise DagsterUserCodeProcessError(
active_entry.error.to_string(),
user_code_process_error_infos=[active_entry.error],
)
return GrpcServerEndpoint(
host="localhost",
port=active_entry.process.port,
socket=active_entry.process.socket,
)
# Clear out processes from the map periodically so that they'll be re-created the next
# time the origins are requested. Lack of any heartbeats will ensure that the server will
# eventually die once they're no longer being held by any threads.
def _clear_old_processes(self, shutdown_event: threading.Event) -> None:
while True:
shutdown_event.wait(5)
if shutdown_event.is_set():
break
with self._lock:
# Remove any dead processes from the all_processes map
dead_process_indexes: list[int] = []
for index in range(len(self._all_processes)):
process = self._all_processes[index]
if process.server_process.poll() is not None:
dead_process_indexes.append(index)
for index in reversed(dead_process_indexes):
self._all_processes[index].wait()
del self._all_processes[index]
def __exit__(self, exception_type, exception_value, traceback):
if self._cleanup_thread:
cast("threading.Event", self._cleanup_thread_shutdown_event).set()
self._cleanup_thread.join()
self.shutdown_all_processes()
if self._wait_for_processes_on_shutdown:
self.wait_for_processes()
def shutdown_all_processes(self):
for process in self._all_processes:
process.shutdown_server()
def are_all_servers_shut_down(self) -> bool:
for process in self._all_processes:
try:
process.create_client().ping("")
return False
except DagsterUserCodeUnreachableError:
pass
return True
def wait_for_processes(self) -> None:
# Wait for any processes created by this registry. Generally not needed outside
# of tests, since the processes have heartbeats and will end on their own once
# they finish any outstanding executions.
if self._waited_for_processes:
return
self._waited_for_processes = True
for process in self._all_processes:
process.wait()
| GrpcServerRegistry |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/typing_extensions/test_backported_types.py | {
"start": 6751,
"end": 6800
} | class ____(Story, total=False):
pages: int
| Book |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_python_ast_rule.py | {
"start": 8478,
"end": 9733
} | class ____:
def __init__(self, name):
self.name = name
def method(self):
for i in range(10):
if i % 2 == 0:
yield i
try:
result = self.complex_operation()
except Exception as e:
print(f"Error: {e}")
return None
return result
def complex_operation(self):
return [x**2 for x in range(5) if x > 2]
"""
result = _validate_python_syntax(code)
assert result is None # Complex but valid code
def test_async_def_syntax(self):
"""Test validation of async/await syntax."""
code = """
async def async_function():
result = await some_async_call()
return result
"""
result = _validate_python_syntax(code)
assert result is None # Async syntax is valid
def test_syntax_error_with_line_info(self):
"""Test that syntax errors include line information."""
code = """
line1 = "valid"
line2 = "also valid"
line3 = invalid syntax here
line4 = "this won't be reached"
"""
result = _validate_python_syntax(code)
assert result is not None
assert "line 3" in result or "line 4" in result # Error location reported
| MyClass |
python | run-llama__llama_index | llama-index-core/llama_index/core/postprocessor/node.py | {
"start": 12628,
"end": 13785
} | class ____(BaseNodePostprocessor):
"""
Models struggle to access significant details found
in the center of extended contexts. A study
(https://arxiv.org/abs/2307.03172) observed that the best
performance typically arises when crucial data is positioned
at the start or conclusion of the input context. Additionally,
as the input context lengthens, performance drops notably, even
in models designed for long contexts.".
"""
@classmethod
def class_name(cls) -> str:
return "LongContextReorder"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
reordered_nodes: List[NodeWithScore] = []
ordered_nodes: List[NodeWithScore] = sorted(
nodes, key=lambda x: x.score if x.score is not None else 0
)
for i, node in enumerate(ordered_nodes):
if i % 2 == 0:
reordered_nodes.insert(0, node)
else:
reordered_nodes.append(node)
return reordered_nodes
| LongContextReorder |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0005_remove-version-alias.py | {
"start": 189,
"end": 3075
} | class ____(migrations.Migration):
safe = Safe.always()
dependencies = [
("builds", "0004_add-apiversion-proxy-model"),
]
operations = [
migrations.RemoveField(
model_name="versionalias",
name="project",
),
migrations.AlterField(
model_name="build",
name="error",
field=models.TextField(blank=True, default="", verbose_name="Error"),
),
migrations.AlterField(
model_name="build",
name="output",
field=models.TextField(blank=True, default="", verbose_name="Output"),
),
migrations.AlterField(
model_name="build",
name="state",
field=models.CharField(
choices=[
("triggered", "Triggered"),
("cloning", "Cloning"),
("installing", "Installing"),
("building", "Building"),
("finished", "Finished"),
],
default="finished",
max_length=55,
verbose_name="State",
),
),
migrations.AlterField(
model_name="build",
name="type",
field=models.CharField(
choices=[
("html", "HTML"),
("pdf", "PDF"),
("epub", "Epub"),
("man", "Manpage"),
("dash", "Dash"),
],
default="html",
max_length=55,
verbose_name="Type",
),
),
migrations.AlterField(
model_name="version",
name="privacy_level",
field=models.CharField(
choices=[
("public", "Public"),
("protected", "Protected"),
("private", "Private"),
],
default="public",
help_text="Level of privacy for this Version.",
max_length=20,
verbose_name="Privacy Level",
),
),
migrations.AlterField(
model_name="version",
name="slug",
field=readthedocs.builds.version_slug.VersionSlugField(
db_index=True,
max_length=255,
verbose_name="Slug",
),
),
migrations.AlterField(
model_name="version",
name="type",
field=models.CharField(
choices=[("branch", "Branch"), ("tag", "Tag"), ("unknown", "Unknown")],
default="unknown",
max_length=20,
verbose_name="Type",
),
),
migrations.DeleteModel(
name="VersionAlias",
),
]
| Migration |
python | streamlit__streamlit | lib/tests/streamlit/form_test.py | {
"start": 10449,
"end": 17556
} | class ____(DeltaGeneratorTestCase):
"""Test form submit button."""
def test_disabled_submit_button(self):
"""Test that a submit button can be disabled."""
with st.form("foo"):
st.form_submit_button(disabled=True)
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.disabled
def test_submit_button_outside_form(self):
"""Test that a submit button is not allowed outside a form."""
with pytest.raises(StreamlitAPIException) as ctx:
st.form_submit_button()
assert "`st.form_submit_button()` must be used inside an `st.form()`" in str(
ctx.value
)
def test_submit_button_inside_form(self):
"""Test that a submit button is allowed inside a form."""
with st.form("foo"):
st.form_submit_button()
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.form_id == "foo"
def test_submit_button_called_directly_on_form_block(self):
"""Test that a submit button can be called directly on a form block."""
form = st.form("foo")
form.form_submit_button()
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.form_id == "foo"
def test_submit_button_default_type(self):
"""Test that a submit button with no explicit type has default of "secondary"."""
form = st.form("foo")
form.form_submit_button()
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.type == "secondary"
def test_submit_button_with_key(self):
"""Test that a submit button can have a custom key."""
form = st.form("foo")
form.form_submit_button(key="submit_button")
last_delta = self.get_delta_from_queue()
assert "submit_button" in last_delta.new_element.button.id
@parameterized.expand(["primary", "secondary", "tertiary"])
def test_submit_button_types(self, type):
"""Test that a submit button can be called with different types."""
form = st.form("foo")
form.form_submit_button(type=type)
last_delta = self.get_delta_from_queue()
assert type == last_delta.new_element.button.type
def test_submit_button_emoji_icon(self):
"""Test that a submit button can be called with an emoji icon."""
form = st.form("foo")
form.form_submit_button(icon="⚡")
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.icon == "⚡"
def test_submit_button_material_icon(self):
"""Test that a submit button can be called with a Material icon."""
form = st.form("foo")
form.form_submit_button(icon=":material/thumb_up:")
last_delta = self.get_delta_from_queue()
assert last_delta.new_element.button.icon == ":material/thumb_up:"
def test_submit_button_does_not_use_container_width_by_default(self):
"""Test that a submit button does not use_use_container width by default."""
form = st.form("foo")
form.form_submit_button(type="primary")
last_delta = self.get_delta_from_queue()
assert not last_delta.new_element.button.use_container_width
def test_return_false_when_not_submitted(self):
with st.form("form1"):
submitted = st.form_submit_button("Submit")
assert not submitted
@patch(
"streamlit.elements.widgets.button.register_widget",
MagicMock(return_value=RegisterWidgetResult(True, False)),
)
def test_return_true_when_submitted(self):
with st.form("form"):
submitted = st.form_submit_button("Submit")
assert submitted
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
@st.cache_data
def cached_function():
with st.form("form"):
st.form_submit_button("Submit")
cached_function()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_use_container_width_true(self):
"""Test use_container_width=True is mapped to width='stretch'."""
for width in ["stretch", "content", 200]:
with self.subTest(f"width={width}"):
with st.form(f"test_form {width} use_container_width = true"):
st.form_submit_button(
"Submit use_container_width=true",
use_container_width=True,
width=width,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
with self.subTest("no width"):
with st.form("test_form no width and use_container_width = true"):
st.form_submit_button(
"Submit no width but use_container_width=true",
use_container_width=True,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_use_container_width_false(self):
"""Test use_container_width=False is mapped to width='content'."""
for width in ["stretch", "content", 200]:
with self.subTest(f"width={width}"):
with st.form(f"test_form {width} use_container_width = false"):
st.form_submit_button(
"Submit use_container_width = false",
use_container_width=False,
width=width,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
with self.subTest("no width"):
with st.form("test_form no width and use_container_width = false"):
st.form_submit_button(
"Submit no width and use_container_width = false",
use_container_width=False,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
| FormSubmitButtonTest |
python | mamba-org__mamba | docs/source/tools/mermaid_inheritance.py | {
"start": 4613,
"end": 10205
} | class ____(InheritanceDiagram):
"""
Run when the mermaid_inheritance directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
"parts": int,
"private-bases": directives.flag,
"caption": directives.unchanged,
"top-classes": directives.unchanged_required,
}
def run(self) -> list[Node]:
node = mermaid_inheritance()
node.document = self.state.document
class_names = self.arguments[0].split()
class_role = self.env.get_domain("py").role("class")
# Store the original content for use as a hash
node["parts"] = self.options.get("parts", 0)
node["content"] = ", ".join(class_names)
node["top-classes"] = []
for cls in self.options.get("top-classes", "").split(","):
cls = cls.strip()
if cls:
node["top-classes"].append(cls)
# Create a graph starting with the list of classes
try:
graph = MermaidGraph(
class_names,
self.env.ref_context.get("py:module"),
parts=node["parts"],
private_bases="private-bases" in self.options,
aliases=self.config.inheritance_alias,
top_classes=node["top-classes"],
)
except InheritanceException as err:
return [node.document.reporter.warning(err, line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role( # type: ignore
"class", ":class:`%s`" % name, name, 0, self.state
) # type: ignore
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node["graph"] = graph
if "caption" not in self.options:
self.add_name(node)
return [node]
else:
figure = figure_wrapper(self, node, self.options["caption"])
self.add_name(figure)
return [figure]
def html_visit_mermaid_inheritance(self: HTMLTranslator, node: inheritance_diagram) -> None:
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node["graph"]
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
# Create a mapping from fully-qualified class names to URLs.
mermaid_output_format = self.builder.env.config.mermaid_output_format.upper()
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
pending_xrefs = cast(Iterable[addnodes.pending_xref], node)
for child in pending_xrefs:
if child.get("refuri") is not None:
if mermaid_output_format == "SVG":
urls[child["reftitle"]] = "../" + child.get("refuri")
else:
urls[child["reftitle"]] = child.get("refuri")
elif child.get("refid") is not None:
if mermaid_output_format == "SVG":
urls[child["reftitle"]] = "../" + current_filename + "#" + child.get("refid")
else:
urls[child["reftitle"]] = "#" + child.get("refid")
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_mm_html(
self,
node,
dotcode,
{},
"inheritance",
"inheritance",
alt="Inheritance diagram of " + node["content"],
)
raise nodes.SkipNode
def latex_visit_mermaid_inheritance(self: LaTeXTranslator, node: inheritance_diagram) -> None:
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node["graph"]
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dotcode = graph.generate_dot(
name,
env=self.builder.env,
)
# graph_attrs={'size': '"6.0,6.0"'})
render_mm_latex(self, node, dotcode, {}, "inheritance")
raise nodes.SkipNode
def texinfo_visit_mermaid_inheritance(self: TexinfoTranslator, node: inheritance_diagram) -> None:
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node["graph"]
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dotcode = graph.generate_dot(
name,
env=self.builder.env,
)
# graph_attrs={'size': '"6.0,6.0"'})
render_mm_texinfo(self, node, dotcode, {}, "inheritance")
raise nodes.SkipNode
def setup(app: Sphinx) -> dict[str, Any]:
app.setup_extension("mermaid")
app.add_node(
mermaid_inheritance,
latex=(latex_visit_mermaid_inheritance, None),
html=(html_visit_mermaid_inheritance, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_mermaid_inheritance, None),
)
app.add_directive("mermaid-inheritance", MermaidDiagram)
# app.add_config_value('mermaid_inheritance_graph_attrs', {}, False)
# app.add_config_value('mermaid_inheritance_node_attrs', {}, False)
# app.add_config_value('mermaid_inheritance_edge_attrs', {}, False)
app.add_config_value("inheritance_alias", {}, False)
return {"version": sphinx.__display_version__, "parallel_read_safe": True}
| MermaidDiagram |
python | walkccc__LeetCode | solutions/3393. Count Paths With the Given XOR Value/3393-3.py | {
"start": 0,
"end": 773
} | class ____:
def countPathsWithXorValue(self, grid, k):
MOD = 1_000_000_007
MAX = 15
m = len(grid)
n = len(grid[0])
# dp[i][j][xors] := the number of paths from (0, 0) to (i, j) with XOR
# value `xors`
dp = [[[0] * (MAX + 1)
for _ in range(n)]
for _ in range(m)]
dp[0][0][grid[0][0]] = 1
for i in range(m):
for j in range(n):
for xors in range(MAX + 1):
if i + 1 < m:
newXor = xors ^ grid[i + 1][j]
dp[i + 1][j][newXor] += dp[i][j][xors]
dp[i + 1][j][newXor] %= MOD
if j + 1 < n:
newXor = xors ^ grid[i][j + 1]
dp[i][j + 1][newXor] += dp[i][j][xors]
dp[i][j + 1][newXor] %= MOD
return dp[-1][-1][k]
| Solution |
python | openai__openai-python | src/openai/resources/videos.py | {
"start": 1428,
"end": 14958
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> VideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return VideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> VideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return VideosWithStreamingResponse(self)
def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModel | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use. Defaults to `sora-2`.
seconds: Clip duration in seconds. Defaults to 4 seconds.
size: Output resolution formatted as width x height. Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/videos",
body=maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModel | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Retrieve a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Video]:
"""
List videos
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=SyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Delete a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""Download video content
Args:
variant: Which downloadable asset to return.
Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video remix
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._post(
f"/videos/{video_id}/remix",
body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
| Videos |
python | django__django | tests/forms_tests/widget_tests/test_multiwidget.py | {
"start": 456,
"end": 998
} | class ____(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=WidgetTest.beatles),
SplitDateTimeWidget(),
)
super().__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(",")
return [
data[0],
list(data[1]),
datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S"),
]
return [None, None, None]
| ComplexMultiWidget |
python | Netflix__metaflow | metaflow/client/core.py | {
"start": 6252,
"end": 26551
} | class ____(object):
"""
Base class for all Metaflow objects.
Creates a new object of a specific type (Flow, Run, Step, Task, DataArtifact) given
a path to it (its `pathspec`).
Accessing Metaflow objects is done through one of two methods:
- either by directly instantiating it with this class
- or by accessing it through its parent (iterating over
all children or accessing directly using the [] operator)
With this class, you can:
- Get a `Flow`; use `Flow('FlowName')`.
- Get a `Run` of a flow; use `Run('FlowName/RunID')`.
- Get a `Step` of a run; use `Step('FlowName/RunID/StepName')`.
- Get a `Task` of a step, use `Task('FlowName/RunID/StepName/TaskID')`
- Get a `DataArtifact` of a task; use
`DataArtifact('FlowName/RunID/StepName/TaskID/ArtifactName')`.
Attributes
----------
tags : FrozenSet[str]
Tags associated with the run this object belongs to (user and system tags).
user_tags: FrozenSet[str]
User tags associated with the run this object belongs to.
system_tags: FrozenSet[str]
System tags associated with the run this object belongs to.
created_at : datetime
Date and time this object was first created.
parent : MetaflowObject
Parent of this object. The parent of a `Run` is a `Flow` for example
pathspec : str
Pathspec of this object (for example: 'FlowName/RunID' for a `Run`)
path_components : List[str]
Components of the pathspec
origin_pathspec : str, optional
Pathspec of the original object this object was cloned from (in the case of a resume).
None if not applicable.
"""
_NAME = "base"
_CHILD_CLASS = None
_PARENT_CLASS = None
def __init__(
self,
pathspec: Optional[str] = None,
attempt: Optional[int] = None,
_object: Optional["MetaflowObject"] = None,
_parent: Optional["MetaflowObject"] = None,
_namespace_check: bool = True,
_metaflow: Optional["Metaflow"] = None,
_current_namespace: Optional[str] = None,
_current_metadata: Optional[str] = None,
):
# the default namespace is activated lazily at the first
# get_namespace(). The other option of activating
# the namespace at the import time is problematic, since there
# may be other modules that alter environment variables etc.
# which may affect the namespace setting.
self._metaflow = Metaflow(_current_metadata) or _metaflow
self._parent = _parent
self._path_components = None
self._attempt = attempt
self._current_namespace = _current_namespace or get_namespace()
self._namespace_check = _namespace_check
# If the current namespace is False, we disable checking for namespace for this
# and all children objects. Not setting namespace_check to False has the consequence
# of preventing access to children objects after the namespace changes
if self._current_namespace is None:
self._namespace_check = False
if self._attempt is not None:
if self._NAME not in ["task", "artifact"]:
raise MetaflowNotFound(
"Attempts can only be specified for Task or DataArtifact"
)
try:
self._attempt = int(self._attempt)
except ValueError:
raise MetaflowNotFound("Attempt can only be an integer")
if self._attempt < 0:
raise MetaflowNotFound("Attempt can only be non-negative")
elif self._attempt >= MAX_ATTEMPTS:
raise MetaflowNotFound(
"Attempt can only be smaller than %d" % MAX_ATTEMPTS
)
# NOTE: It is possible that no attempt exists, but we can't
# distinguish between "attempt will happen" and "no such
# attempt exists".
if pathspec and _object is None:
ids = pathspec.split("/")
if self._NAME == "flow" and len(ids) != 1:
raise MetaflowInvalidPathspec("Expects Flow('FlowName')")
elif self._NAME == "run" and len(ids) != 2:
raise MetaflowInvalidPathspec("Expects Run('FlowName/RunID')")
elif self._NAME == "step" and len(ids) != 3:
raise MetaflowInvalidPathspec("Expects Step('FlowName/RunID/StepName')")
elif self._NAME == "task" and len(ids) != 4:
raise MetaflowInvalidPathspec(
"Expects Task('FlowName/RunID/StepName/TaskID')"
)
elif self._NAME == "artifact" and len(ids) != 5:
raise MetaflowInvalidPathspec(
"Expects DataArtifact('FlowName/RunID/StepName/TaskID/ArtifactName')"
)
self.id = ids[-1]
self._pathspec = pathspec
self._object = self._get_object(*ids)
else:
self._object = _object
self._pathspec = pathspec
if self._NAME in ("flow", "task"):
self.id = str(self._object[self._NAME + "_id"])
elif self._NAME == "run":
self.id = str(self._object["run_number"])
elif self._NAME == "step":
self.id = str(self._object["step_name"])
elif self._NAME == "artifact":
self.id = str(self._object["name"])
else:
raise MetaflowInternalError(msg="Unknown type: %s" % self._NAME)
self._created_at = datetime.fromtimestamp(self._object["ts_epoch"] / 1000.0)
self._tags = frozenset(
chain(self._object.get("system_tags") or [], self._object.get("tags") or [])
)
self._user_tags = frozenset(self._object.get("tags") or [])
self._system_tags = frozenset(self._object.get("system_tags") or [])
if self._namespace_check and not self._is_in_namespace(self._current_namespace):
raise MetaflowNamespaceMismatch(self._current_namespace)
def _get_object(self, *path_components):
result = self._metaflow.metadata.get_object(
self._NAME, "self", None, self._attempt, *path_components
)
if not result:
raise MetaflowNotFound("%s does not exist" % self)
return result
def __iter__(self) -> Iterator["MetaflowObject"]:
"""
Iterate over all child objects of this object if any.
Note that only children present in the current namespace are returned if and
only if _namespace_check is set.
Yields
------
MetaflowObject
Children of this object
"""
query_filter = {}
# skip namespace filtering if _namespace_check is unset.
if self._namespace_check and self._current_namespace:
query_filter = {"any_tags": self._current_namespace}
unfiltered_children = self._metaflow.metadata.get_object(
self._NAME,
_CLASSES[self._CHILD_CLASS]._NAME,
query_filter,
self._attempt,
*self.path_components,
)
unfiltered_children = unfiltered_children if unfiltered_children else []
children = filter(
lambda x: self._iter_filter(x),
(
_CLASSES[self._CHILD_CLASS](
attempt=self._attempt,
_object=obj,
_parent=self,
_metaflow=self._metaflow,
_namespace_check=self._namespace_check,
_current_namespace=(
self._current_namespace if self._namespace_check else None
),
)
for obj in unfiltered_children
),
)
if children:
return iter(sorted(children, reverse=True, key=lambda x: x.created_at))
else:
return iter([])
def _iter_filter(self, x):
return True
def _filtered_children(self, *tags):
"""
Returns an iterator over all children.
If tags are specified, only children associated with all specified tags
are returned.
"""
for child in self:
if all(tag in child.tags for tag in tags):
yield child
def _ipython_key_completions_(self):
"""Returns available options for ipython auto-complete."""
return [child.id for child in self._filtered_children()]
@classmethod
def _url_token(cls):
return "%ss" % cls._NAME
def is_in_namespace(self) -> bool:
"""
Returns whether this object is in the current namespace.
If the current namespace is None, this will always return True.
Returns
-------
bool
Whether or not the object is in the current namespace
"""
return self._is_in_namespace(current_namespace)
def _is_in_namespace(self, ns: str) -> bool:
"""
Returns whether this object is in namespace passed in.
If the current namespace is None, this will always return True.
Parameters
----------
ns : str
Namespace to check if the object is in.
Returns
-------
bool
Whether or not the object is in the current namespace
"""
if self._NAME == "flow":
return any(True for _ in self)
else:
return ns is None or ns in self._tags
def __str__(self):
if self._attempt is not None:
return "%s('%s', attempt=%d)" % (
self.__class__.__name__,
self.pathspec,
self._attempt,
)
return "%s('%s')" % (self.__class__.__name__, self.pathspec)
def __repr__(self):
return str(self)
def _get_child(self, id):
result = []
for p in self.path_components:
result.append(p)
result.append(id)
return self._metaflow.metadata.get_object(
_CLASSES[self._CHILD_CLASS]._NAME, "self", None, self._attempt, *result
)
def __getitem__(self, id: str) -> "MetaflowObject":
"""
Returns the child object named 'id'.
Parameters
----------
id : str
Name of the child object
Returns
-------
MetaflowObject
Child object
Raises
------
KeyError
If the name does not identify a valid child object
"""
obj = self._get_child(id)
if obj:
return _CLASSES[self._CHILD_CLASS](
attempt=self._attempt,
_object=obj,
_parent=self,
_metaflow=self._metaflow,
_namespace_check=self._namespace_check,
_current_namespace=(
self._current_namespace if self._namespace_check else None
),
)
else:
raise KeyError(id)
def __contains__(self, id: str):
"""
Tests whether a child named 'id' exists.
Parameters
----------
id : str
Name of the child object
Returns
-------
bool
True if the child exists or False otherwise
"""
return bool(self._get_child(id))
def _unpickle_284(self, data):
if len(data) != 3:
raise MetaflowInternalError(
"Unexpected size of array: {}".format(len(data))
)
pathspec, attempt, namespace_check = data
self.__init__(
pathspec=pathspec, attempt=attempt, _namespace_check=namespace_check
)
def _unpickle_2124(self, data):
if len(data) != 4:
raise MetaflowInternalError(
"Unexpected size of array: {}".format(len(data))
)
pathspec, attempt, ns, namespace_check = data
self.__init__(
pathspec=pathspec,
attempt=attempt,
_namespace_check=namespace_check,
_current_namespace=ns,
)
def _unpickle_21227(self, data):
if len(data) != 5:
raise MetaflowInternalError(
"Unexpected size of array: {}".format(len(data))
)
pathspec, attempt, md, ns, namespace_check = data
self.__init__(
pathspec=pathspec,
attempt=attempt,
_namespace_check=namespace_check,
_current_metadata=md,
_current_namespace=ns,
)
_UNPICKLE_FUNC = {
"2.8.4": _unpickle_284,
"2.12.4": _unpickle_2124,
"2.12.27": _unpickle_21227,
}
def __setstate__(self, state):
"""
This function is used during the unpickling operation.
More info here https://docs.python.org/3/library/pickle.html#object.__setstate__
"""
if "version" in state and "data" in state:
version = state["version"]
if version not in self._UNPICKLE_FUNC:
# this happens when an object pickled using a newer version of Metaflow is
# being un-pickled using an older version of Metaflow
raise MetaflowInternalError(
"Unpickling this object requires a Metaflow version greater than or equal to {}".format(
version
)
)
self._UNPICKLE_FUNC[version](self, state["data"])
else:
# For backward compatibility: handles pickled objects that were serialized without a __getstate__ override
# We set namespace_check to False if it doesn't exist so that the user can
# continue accessing this object once unpickled.
self.__init__(
pathspec=state.get("_pathspec", None),
attempt=state.get("_attempt", None),
_namespace_check=state.get("_namespace_check", False),
_current_namespace=None,
)
def __getstate__(self):
"""
This function is used during the pickling operation.
More info here https://docs.python.org/3/library/pickle.html#object.__getstate__
This function is not forward compatible i.e., if this object (or any of the objects deriving
from this object) are pickled (serialized) in a later version of Metaflow, it may not be possible
to unpickle (deserialize) them in a previous version of Metaflow.
"""
# Note that we now record the namespace at the time of the object creation so
# we don't need to force namespace_check to be False and can properly continue
# checking for the namespace even after unpickling since we will know which
# namespace to check.
return {
"version": "2.12.27",
"data": [
self.pathspec,
self._attempt,
self._metaflow.metadata.metadata_str(),
self._current_namespace,
self._namespace_check,
],
}
@property
def tags(self) -> FrozenSet[str]:
"""
Tags associated with this object.
Tags can be user defined or system defined. This returns all tags associated
with the object.
Returns
-------
Set[str]
Tags associated with the object
"""
return self._tags
@property
def system_tags(self) -> FrozenSet[str]:
"""
System defined tags associated with this object.
Returns
-------
Set[str]
System tags associated with the object
"""
return self._system_tags
@property
def user_tags(self) -> FrozenSet[str]:
"""
User defined tags associated with this object.
Returns
-------
Set[str]
User tags associated with the object
"""
return self._user_tags
@property
def created_at(self) -> datetime:
"""
Creation time for this object.
This corresponds to the time the object's existence was first created which typically means
right before any code is run.
Returns
-------
datetime
Date time of this object's creation.
"""
return self._created_at
@property
def origin_pathspec(self) -> Optional[str]:
"""
The pathspec of the object from which the current object was cloned.
Returns:
str, optional
pathspec of the origin object from which current object was cloned.
"""
origin_pathspec = None
if self._NAME == "run":
latest_step = next(self.steps())
if latest_step and latest_step.task:
# If we had a step
task = latest_step.task
origin_run_id = [
m.value for m in task.metadata if m.name == "origin-run-id"
]
if origin_run_id:
origin_pathspec = "%s/%s" % (self.parent.id, origin_run_id[0])
else:
parent_pathspec = self.parent.origin_pathspec if self.parent else None
if parent_pathspec:
my_id = self.id
origin_task_id = None
if self._NAME == "task":
origin_task_id = [
m.value for m in self.metadata if m.name == "origin-task-id"
]
if origin_task_id:
my_id = origin_task_id[0]
else:
my_id = None
if my_id is not None:
origin_pathspec = "%s/%s" % (parent_pathspec, my_id)
return origin_pathspec
@property
def parent(self) -> Optional["MetaflowObject"]:
"""
Returns the parent object of this object or None if none exists.
Returns
-------
MetaflowObject, optional
The parent of this object
"""
if self._NAME == "flow":
return None
# Compute parent from pathspec and cache it.
if self._parent is None:
pathspec = self.pathspec
parent_pathspec = pathspec[: pathspec.rfind("/")]
# Only artifacts and tasks have attempts right now, so we get the
# right parent if we are an artifact.
attempt_to_pass = self._attempt if self._NAME == "artifact" else None
# We can skip the namespace check because if self._NAME = 'run',
# the parent object is guaranteed to be in namespace.
# Otherwise the check is moot for Flow since parent is singular.
self._parent = _CLASSES[self._PARENT_CLASS](
parent_pathspec, attempt=attempt_to_pass, _namespace_check=False
)
return self._parent
@property
def pathspec(self) -> str:
"""
Returns a string representation uniquely identifying this object.
The string is the same as the one you would pass into the constructor
to build this object except if you are looking for a specific attempt of
a task or a data artifact (in which case you need to add `attempt=<attempt>`
in the constructor).
Returns
-------
str
Unique representation of this object
"""
if self._pathspec is None:
if self.parent is None:
self._pathspec = self.id
else:
parent_pathspec = self.parent.pathspec
self._pathspec = os.path.join(parent_pathspec, self.id)
return self._pathspec
@property
def path_components(self) -> List[str]:
"""
List of individual components of the pathspec.
Returns
-------
List[str]
Individual components of the pathspec
"""
if self._path_components is None:
ids = self.pathspec.split("/")
self._path_components = ids
return list(self._path_components)
| MetaflowObject |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/objects/package_entry.py | {
"start": 1742,
"end": 1913
} | class ____(ABC):
@property
@abstractmethod
def feature(self) -> EnvRegistryObjectFeature:
pass
@whitelist_for_serdes
@record
| EnvRegistryObjectFeatureData |
python | python-excel__xlrd | xlrd/xldate.py | {
"start": 1342,
"end": 1415
} | class ____(XLDateError):
"Gregorian year 10000 or later"
| XLDateTooLarge |
python | PrefectHQ__prefect | src/integrations/prefect-docker/tests/test_images.py | {
"start": 153,
"end": 2530
} | class ____:
async def test_tag_and_all_tags(self, mock_docker_client_from_env: MagicMock):
pull_kwargs = dict(repository="prefecthq/prefect", tag="latest", all_tags=True)
with pytest.raises(
ValueError, match="Cannot pass `tags` and `all_tags` together"
):
with disable_run_logger():
await pull_docker_image.fn(**pull_kwargs)
async def test_defaults(self, mock_docker_client_from_env: MagicMock):
with disable_run_logger():
image = await pull_docker_image.fn(repository="prefecthq/prefect")
assert image.id == "id_1"
async def test_host(self, mock_docker_host: MagicMock):
pull_kwargs = dict(
repository="prefecthq/prefect",
)
with disable_run_logger():
image = await pull_docker_image.fn(
docker_host=mock_docker_host, **pull_kwargs
)
assert image.id == "id_1"
client = mock_docker_host.get_client()
client.__enter__.return_value.images.pull.assert_called_once_with(
all_tags=False, **pull_kwargs
)
async def test_login(
self,
mock_docker_host: MagicMock,
mock_docker_registry_credentials: MagicMock,
):
pull_kwargs = dict(
repository="prefecthq/prefect",
tag="latest",
)
with disable_run_logger():
image = await pull_docker_image.fn(
docker_host=mock_docker_host,
docker_registry_credentials=mock_docker_registry_credentials,
**pull_kwargs,
)
assert image.id == "id_1"
client = mock_docker_host.get_client()
client.__enter__.return_value.images.pull.assert_called_once_with(
all_tags=False, **pull_kwargs
)
async def test_all_tags(self, mock_docker_host: MagicMock):
pull_kwargs = dict(repository="prefecthq/prefect", all_tags=True)
with disable_run_logger():
images = await pull_docker_image.fn(
docker_host=mock_docker_host, **pull_kwargs
)
images = [image.id for image in images]
assert images == ["id_1", "id_2"]
client = mock_docker_host.get_client()
client.__enter__.return_value.images.pull.assert_called_once_with(**pull_kwargs)
| TestPullDockerImage |
python | django__django | tests/contenttypes_tests/models.py | {
"start": 517,
"end": 720
} | class ____(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
date_created = models.DateTimeField()
| Article |
python | kamyu104__LeetCode-Solutions | Python/number-of-distinct-subarrays-with-at-most-k-odd-integers.py | {
"start": 108,
"end": 909
} | class ____(object):
def distinctSubarraysWithAtMostKOddIntegers(self, A, K):
def countDistinct(A, left, right, trie): # Time: O(n), Space: O(t)
result = 0
for i in reversed(xrange(left, right+1)):
if A[i] not in trie:
result += 1
trie = trie[A[i]]
return result
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
result, left, count = 0, 0, 0
for right in xrange(len(A)):
count += A[right]%2
while count > K:
count -= A[left]%2
left += 1
result += countDistinct(A, left, right, trie)
return result
# Time: O(n^2)
# Space: O(t), t is the size of trie
# suffix tree solution
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_server__embed.py | {
"start": 10944,
"end": 11262
} | class ____:
def test_True(self) -> None:
assert bes._process_relative_urls(True, "") == ""
assert bes._process_relative_urls(True, "/stuff") == ""
def test_Flase(self) -> None:
assert bes._process_relative_urls(False, "/stuff") == "&bokeh-absolute-url=/stuff"
| Test__process_relative_urls |
python | kamyu104__LeetCode-Solutions | Python/create-components-with-same-value.py | {
"start": 53,
"end": 1293
} | class ____(object):
def componentValue(self, nums, edges):
"""
:type nums: List[int]
:type edges: List[List[int]]
:rtype: int
"""
def bfs(target):
total = nums[:]
lookup = [len(adj[u]) for u in xrange(len(adj))]
q = [u for u in xrange(len(adj)) if lookup[u] == 1]
while q:
new_q = []
for u in q:
if total[u] > target:
return False
if total[u] == target:
total[u] = 0
for v in adj[u]:
total[v] += total[u]
lookup[v] -= 1
if lookup[v] == 1:
new_q.append(v)
q = new_q
return True
result = 0
adj = [[] for _ in xrange(len(nums))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
total = sum(nums)
for cnt in reversed(xrange(2, len(nums)+1)):
if total%cnt == 0 and bfs(total//cnt):
return cnt-1
return 0
# Time: O(n * sqrt(n))
# Space: O(n)
# iterative dfs, greedy
| Solution |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-substrings-with-dominant-ones.py | {
"start": 83,
"end": 800
} | class ____(object):
def numberOfSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
idxs = [-1]+[i for i, x in enumerate(s) if x == '0']+[len(s)]
curr = 1
for i in xrange(len(s)):
if idxs[curr] == i:
curr += 1
for c in xrange(min(int((-1+(1+4*(i+1))**0.5)/2)+1, curr)): # since c^2 <= (i+1)-c, thus c <= (-1+(1+4*(i+1))**0.5)/2
if c**2 <= (i-idxs[(curr-c)-1])-c:
result += min(min(idxs[curr-c], i)-idxs[(curr-c)-1], ((i-idxs[(curr-c)-1])-c)-c**2+1)
return result
# Time: O(n * sqrt(n)) = O(n^(3/2))
# Space: O(n)
# two pointers, sliding window
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 2565,
"end": 3143
} | class ____[T1 = str, T2 = T1, *Ts1 = Unpack[tuple[T1, T2]]]: ...
ta1 = ClassTA()
reveal_type(ta1, expected_text="ClassTA[str, str, str, str]")
ta2 = ClassTA[int]()
reveal_type(ta2, expected_text="ClassTA[int, int, int, int]")
ta3 = ClassTA[int, float]()
reveal_type(ta3, expected_text="ClassTA[int, float, int, float]")
ta4 = ClassTA[int, float, *tuple[None, ...]]()
reveal_type(ta4, expected_text="ClassTA[int, float, *tuple[None, ...]]")
# This should generate an error because Ts1 depends on T2.
# It will generate a second error because T2 follows a TypeVarTuple.
| ClassTA |
python | kamyu104__LeetCode-Solutions | Python/steps-to-make-array-non-decreasing.py | {
"start": 539,
"end": 1024
} | class ____(object):
def totalSteps(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0]*len(nums) # dp[i]: number of rounds for nums[i] to be removed
stk = []
for i in xrange(len(nums)):
curr = 0
while stk and nums[stk[-1]] <= nums[i]:
curr = max(curr, dp[stk.pop()])
if stk:
dp[i] = curr+1
stk.append(i)
return max(dp)
| Solution2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess8.py | {
"start": 396,
"end": 655
} | class ____:
bar: Column[str] = Column()
baz: Column[list[int]] = Column()
foo = Foo()
v1 = foo.bar
reveal_type(v1, expected_text="str")
foo.bar = ""
del foo.bar
v2 = foo.baz
reveal_type(v2, expected_text="list[int]")
foo.baz = [1]
del foo.baz
| Foo |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/compute.py | {
"start": 23759,
"end": 26286
} | class ____(ComputeEngineBaseOperator):
"""
Starts an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStartInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Compute Engine instance resource.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_start_template_fields]
template_fields: Sequence[str] = (
"project_id",
"zone",
"resource_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_start_template_fields]
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
ComputeInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
hook.start_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
| ComputeEngineStartInstanceOperator |
python | sympy__sympy | sympy/sets/contains.py | {
"start": 264,
"end": 1829
} | class ____(Boolean):
"""
Asserts that x is an element of the set S.
Examples
========
>>> from sympy import Symbol, Integer, S, Contains
>>> Contains(Integer(2), S.Integers)
True
>>> Contains(Integer(-2), S.Naturals)
False
>>> i = Symbol('i', integer=True)
>>> Contains(i, S.Naturals)
Contains(i, Naturals)
References
==========
.. [1] https://en.wikipedia.org/wiki/Element_%28mathematics%29
"""
def __new__(cls, x, s, evaluate=None):
x = sympify(x)
s = sympify(s)
if evaluate is None:
evaluate = global_parameters.evaluate
if not isinstance(s, Set):
raise TypeError('expecting Set, not %s' % func_name(s))
if evaluate:
# _contains can return symbolic booleans that would be returned by
# s.contains(x) but here for Contains(x, s) we only evaluate to
# true, false or return the unevaluated Contains.
result = s._contains(x)
if isinstance(result, Boolean):
if result in (S.true, S.false):
return result
elif result is not None:
raise TypeError("_contains() should return Boolean or None")
return super().__new__(cls, x, s)
@property
def binary_symbols(self):
return set().union(*[i.binary_symbols
for i in self.args[1].args
if i.is_Boolean or i.is_Symbol or
isinstance(i, (Eq, Ne))])
def as_set(self):
return self.args[1]
| Contains |
python | getsentry__sentry | src/sentry/integrations/vsts/client.py | {
"start": 6814,
"end": 19372
} | class ____(IntegrationProxyClient, RepositoryClient):
integration_name = IntegrationProviderSlug.AZURE_DEVOPS.value
api_version = "4.1" # TODO: update api version
api_version_preview = "-preview.1"
_identity: Identity | None = None
def __init__(
self,
base_url: str,
oauth_redirect_url: str,
org_integration_id: int,
identity_id: int | None = None,
) -> None:
self.base_url = base_url
self.identity_id = identity_id
self.oauth_redirect_url = oauth_redirect_url
super().__init__(org_integration_id=org_integration_id)
@property
def identity(self):
if self._identity:
return self._identity
self._identity = Identity.objects.get(id=self.identity_id)
return self._identity
def request(self, method: str, *args: Any, **kwargs: Any) -> Any:
api_preview = kwargs.pop("api_preview", False)
base_headers = prepare_headers(
api_version=self.api_version,
method=method,
api_version_preview=self.api_version_preview if api_preview else "",
)
kwargs["headers"] = {**base_headers, **(kwargs.get("headers", {}))}
return self._request(method, *args, **kwargs)
@control_silo_function
def _refresh_auth_if_expired(self) -> None:
"""
Checks if auth is expired and if so refreshes it
"""
time_expires = self.identity.data.get("expires")
if time_expires is None:
raise InvalidIdentity("VstsApiClient requires identity with specified expired time")
if int(time_expires) <= int(time()):
# TODO(iamrajjoshi): Remove this after migration
# Need this here because there is no way to get any identifier which would tell us which method we should use to refresh the token
from sentry.identity.vsts.provider import VSTSNewIdentityProvider
from sentry.integrations.vsts.integration import VstsIntegrationProvider
integration = integration_service.get_integration(
organization_integration_id=self.org_integration_id, status=ObjectStatus.ACTIVE
)
if integration is None:
return
# check if integration has migrated to new identity provider
migration_version = integration.metadata.get("integration_migration_version", 0)
if migration_version < VstsIntegrationProvider.CURRENT_MIGRATION_VERSION:
self.identity.get_provider().refresh_identity(
self.identity, redirect_url=self.oauth_redirect_url
)
else:
VSTSNewIdentityProvider().refresh_identity(
self.identity, redirect_url=self.oauth_redirect_url
)
@control_silo_function
def authorize_request(
self,
prepared_request: PreparedRequest,
) -> PreparedRequest:
self._refresh_auth_if_expired()
access_token = self.identity.data["access_token"]
headers = prepare_auth_header(
access_token=access_token,
)
prepared_request.headers.update(headers)
return prepared_request
def create_subscription(self, shared_secret: str) -> dict[str, Any]:
return self.post(
VstsApiPath.subscriptions.format(instance=self.base_url),
data=_create_subscription_data(shared_secret),
)
def create_work_item(
self,
project: Project,
item_type: str | None = None,
title: str | None = None,
description: str | None = None,
comment: str | None = None,
) -> dict[str, Any]:
data = []
if title:
data.append({"op": "add", "path": FIELD_MAP["title"], "value": title})
if description:
data.append({"op": "add", "path": FIELD_MAP["description"], "value": description})
if comment:
data.append({"op": "add", "path": FIELD_MAP["comment"], "value": comment})
return self.patch(
VstsApiPath.work_items_create.format(
instance=self.base_url, project=project, type=item_type
),
data=data,
)
def update_work_item(
self,
id: str,
title: UnsettableString = UNSET,
description: UnsettableString = UNSET,
link: UnsettableString = UNSET,
comment: UnsettableString = UNSET,
assigned_to: UnsettableString = UNSET,
state: UnsettableString = UNSET,
) -> dict[str, Any]:
data: list[Mapping[str, Any]] = []
for f_name, f_value in (
("title", title),
("description", description),
("link", link),
("assigned_to", assigned_to),
("state", state),
):
if f_name == "link":
# XXX: Link is not yet used, as we can't explicitly bind it to Sentry.
continue
elif f_value is None:
data.append({"op": "remove", "path": FIELD_MAP[f_name]})
elif f_value is not UNSET:
data.append(
{
# TODO(dcramer): this is problematic when the link already exists
"op": "replace" if f_name != "link" else "add",
"path": FIELD_MAP[f_name],
"value": (
{"rel": "Hyperlink", "url": f_value} if f_name == "link" else f_value
),
}
)
if comment is not UNSET and comment:
data.append({"op": "add", "path": FIELD_MAP["comment"], "value": comment})
return self.patch(VstsApiPath.work_items.format(instance=self.base_url, id=id), data=data)
def get_work_item(self, id: int) -> dict[str, Any]:
return self.get(VstsApiPath.work_items.format(instance=self.base_url, id=id))
def get_work_item_states(self, project: str) -> dict[str, Any]:
# XXX: Until we add the option to enter the 'WorkItemType' for syncing status changes from
# Sentry to Azure DevOps, we need will attempt to use the sequence below. There are certain
# ADO configurations which don't have 'Bug' or 'Issue', hence iterating until we find a match.
for check_type in ("Bug", "Issue", "Task"):
response = self.get(
VstsApiPath.work_item_states.format(
instance=self.base_url,
project=project,
type=check_type,
),
api_preview=True,
)
if response.get("count", 0) > 0:
break
return response
def get_work_item_categories(self, project: str) -> dict[str, Any]:
return self.get(
VstsApiPath.work_item_categories.format(instance=self.base_url, project=project)
)
def get_repo(self, name_or_id: str, project: str | None = None) -> dict[str, Any]:
return self.get(
VstsApiPath.repository.format(
instance=self.base_url,
project=f"{project}/" if project else "",
repo_id=name_or_id,
)
)
def get_repos(self, project: str | None = None) -> dict[str, Any]:
return self.get(
VstsApiPath.repositories.format(
instance=self.base_url, project=f"{project}/" if project else ""
),
timeout=5,
)
def get_commits(self, repo_id: str, commit: str, limit: int = 100) -> dict[str, Any]:
return self.get(
VstsApiPath.commits.format(instance=self.base_url, repo_id=repo_id),
params={"commit": commit, "$top": limit},
)
def get_commit(self, repo_id: str, commit: str) -> dict[str, Any]:
return self.get(
VstsApiPath.commit.format(instance=self.base_url, repo_id=repo_id, commit_id=commit)
)
def get_commit_filechanges(self, repo_id: str, commit: str) -> list[dict[str, Any]]:
resp = self.get(
VstsApiPath.commits_changes.format(
instance=self.base_url, repo_id=repo_id, commit_id=commit
)
)
changes = resp["changes"]
return changes
def get_commit_range(self, repo_id: str, start_sha: str, end_sha: str) -> dict[str, Any]:
return self.post(
VstsApiPath.commits_batch.format(instance=self.base_url, repo_id=repo_id),
data={
"itemVersion": {"versionType": "commit", "version": start_sha},
"compareVersion": {"versionType": "commit", "version": end_sha},
},
)
def get_project(self, project_id: str) -> dict[str, Any]:
return self.get(
VstsApiPath.project.format(instance=self.base_url, project_id=project_id),
params={"stateFilter": "WellFormed"},
)
def get_projects(self) -> list[dict[str, Any]]:
def gen_params(page_number: int, page_size: int) -> Mapping[str, str | int]:
# ADO supports a continuation token in the response but only in the newer API version (
# https://docs.microsoft.com/en-us/rest/api/azure/devops/core/projects/list?view=azure-devops-rest-6.1
# ). The token comes as a response header instead of the body and our API clients
# currently only return the body we can use count, $skip, and $top to get the same result.
offset = self.page_size * page_number
return {"stateFilter": "WellFormed", "$skip": offset, "$top": page_size}
def get_results(resp: Response) -> Sequence[Any]:
return resp["value"]
return self.get_with_pagination(
VstsApiPath.projects.format(instance=self.base_url),
gen_params=gen_params,
get_results=get_results,
)
def get_users(self, account_name: str, continuation_token: str | None = None) -> dict[str, Any]:
"""
Gets Users with access to a given account/organization
https://docs.microsoft.com/en-us/rest/api/azure/devops/graph/users/list?view=azure-devops-rest-4.1
"""
return self.get(
VstsApiPath.users.format(account_name=account_name),
api_preview=True,
params={"continuationToken": continuation_token},
)
def get_subscription(self, subscription_id: str) -> dict[str, Any]:
return self.get(
VstsApiPath.subscription.format(instance=self.base_url, subscription_id=subscription_id)
)
def delete_subscription(self, subscription_id: str) -> dict[str, Any]:
return self.delete(
VstsApiPath.subscription.format(instance=self.base_url, subscription_id=subscription_id)
)
def update_subscription(self, subscription_id: str) -> dict[str, Any]:
return self.put(
VstsApiPath.subscription.format(instance=self.base_url, subscription_id=subscription_id)
)
def search_issues(self, account_name: str, query: str | None = None) -> dict[str, Any]:
return self.post(
VstsApiPath.work_item_search.format(account_name=account_name),
data={"searchText": query, "$top": 1000},
api_preview=True,
)
def check_file(self, repo: Repository, path: str, version: str | None) -> object | None:
return self.get_cached(
path=VstsApiPath.items.format(
instance=repo.config["instance"],
project=quote(repo.config["project"]),
repo_id=quote(repo.config["name"]),
),
params={
"path": path,
"api-version": "7.0",
"versionDescriptor.version": version,
},
)
def get_file(
self, repo: Repository, path: str, ref: str | None, codeowners: bool = False
) -> str:
response = self.get_cached(
path=VstsApiPath.items.format(
instance=repo.config["instance"],
project=quote(repo.config["project"]),
repo_id=quote(repo.config["name"]),
),
params={
"path": path,
"api-version": "7.0",
"versionDescriptor.version": ref,
"download": "true",
},
headers={"Accept": "*/*"},
raw_response=True,
)
return response.text
| VstsApiClient |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/random.py | {
"start": 12779,
"end": 13419
} | class ____(SearchStrategy[HypothesisRandom]):
def __init__(self, *, note_method_calls: bool, use_true_random: bool) -> None:
super().__init__()
self.__note_method_calls = note_method_calls
self.__use_true_random = use_true_random
def do_draw(self, data: ConjectureData) -> HypothesisRandom:
if self.__use_true_random:
seed = data.draw_integer(0, 2**64 - 1)
return TrueRandom(seed=seed, note_method_calls=self.__note_method_calls)
else:
return ArtificialRandom(
note_method_calls=self.__note_method_calls, data=data
)
| RandomStrategy |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_rule_enable.py | {
"start": 578,
"end": 7120
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-rule-enable"
method = "PUT"
def setUp(self) -> None:
self.rule = self.create_project_rule(project=self.project)
self.login_as(user=self.user)
@patch("sentry.analytics.record")
def test_simple(self, record_analytics: MagicMock) -> None:
self.rule.status = ObjectStatus.DISABLED
self.rule.save()
with outbox_runner():
self.get_success_response(
self.organization.slug,
self.project.slug,
self.rule.id,
status_code=status.HTTP_202_ACCEPTED,
)
assert Rule.objects.filter(id=self.rule.id, status=ObjectStatus.ACTIVE).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=self.organization.id,
target_object=self.rule.id,
event=audit_log.get_event_id("RULE_EDIT"),
).exists()
assert_any_analytics_event(
record_analytics,
RuleReenableExplicit(
rule_id=self.rule.id,
user_id=self.user.id,
organization_id=self.organization.id,
),
)
def test_rule_enabled(self) -> None:
"""Test that we do not accept an enabled rule"""
response = self.get_error_response(
self.organization.slug,
self.project.slug,
self.rule.id,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert response.data["detail"] == "Rule is not disabled."
def test_duplicate_rule(self) -> None:
"""Test that we do not allow enabling a rule that is an exact duplicate of another rule in the same project"""
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
]
actions = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
}
]
rule = self.create_project_rule(
project=self.project, action_data=actions, condition_data=conditions
)
rule2 = self.create_project_rule(
project=self.project, action_data=actions, condition_data=conditions
)
rule2.status = ObjectStatus.DISABLED
rule2.save()
response = self.get_error_response(
self.organization.slug,
self.project.slug,
rule2.id,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert (
response.data["detail"]
== f"This rule is an exact duplicate of '{rule.label}' in this project and may not be enabled unless it's edited."
)
def test_duplicate_rule_diff_env(self) -> None:
"""Test that we do allow enabling a rule that's the exact duplicate of another
rule in the same project EXCEPT that the environment is different"""
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
]
actions = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
}
]
dev_env = self.create_environment(self.project, name="dev", organization=self.organization)
prod_env = self.create_environment(
self.project, name="prod", organization=self.organization
)
self.create_project_rule(
project=self.project,
action_data=actions,
condition_data=conditions,
environment_id=dev_env.id,
)
rule2 = self.create_project_rule(
project=self.project,
action_data=actions,
condition_data=conditions,
environment_id=prod_env.id,
)
rule2.status = ObjectStatus.DISABLED
rule2.save()
self.get_success_response(
self.organization.slug,
self.project.slug,
rule2.id,
status_code=status.HTTP_202_ACCEPTED,
)
def test_duplicate_rule_one_env_one_not(self) -> None:
"""Test that we do allow enabling a rule that's the exact duplicate of another
rule in the same project EXCEPT that the environment is set for only one"""
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
]
actions = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
}
]
dev_env = self.create_environment(self.project, name="dev", organization=self.organization)
self.create_project_rule(
project=self.project,
action_data=actions,
condition_data=conditions,
environment_id=dev_env.id,
)
rule2 = self.create_project_rule(
project=self.project,
action_data=actions,
condition_data=conditions,
)
rule2.status = ObjectStatus.DISABLED
rule2.save()
self.get_success_response(
self.organization.slug,
self.project.slug,
rule2.id,
status_code=status.HTTP_202_ACCEPTED,
)
def test_no_action_rule(self) -> None:
"""Test that we do not allow enabling a rule that has no action(s)"""
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
]
rule = Rule.objects.create(
project=self.project,
data={"conditions": conditions, "action_match": "all"},
)
rule.status = ObjectStatus.DISABLED
rule.save()
response = self.get_error_response(
self.organization.slug,
self.project.slug,
rule.id,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert response.data["detail"] == "Cannot enable a rule with no action."
| ProjectRuleEnableTestCase |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_releases.py | {
"start": 88483,
"end": 94996
} | class ____(APITestCase):
def setUp(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user)
team = self.create_team(organization=org, members=[self.user])
project1 = self.create_project(organization=org, teams=[team], name="foo")
project2 = self.create_project(organization=org, teams=[team], name="bar")
env1 = self.make_environment("prod", project1)
env2 = self.make_environment("staging", project2)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
ReleaseProjectEnvironment.objects.create(
project_id=project1.id, release_id=release1.id, environment_id=env1.id
)
release2 = Release.objects.create(
organization_id=org.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
ReleaseProjectEnvironment.objects.create(
project_id=project2.id, release_id=release2.id, environment_id=env2.id
)
release3 = Release.objects.create(
organization_id=org.id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
)
release3.add_project(project1)
ReleaseProjectEnvironment.objects.create(
project_id=project1.id, release_id=release3.id, environment_id=env2.id
)
release4 = Release.objects.create(organization_id=org.id, version="4")
release4.add_project(project2)
release5 = Release.objects.create(organization_id=org.id, version="5")
release5.add_project(project1)
release5.add_project(project2)
ReleaseProjectEnvironment.objects.create(
project_id=project1.id, release_id=release5.id, environment_id=env1.id
)
ReleaseProjectEnvironment.objects.create(
project_id=project2.id, release_id=release5.id, environment_id=env2.id
)
self.project1 = project1
self.project2 = project2
self.release1 = release1
self.release2 = release2
self.release3 = release3
self.release4 = release4
self.release5 = release5
self.env1 = env1
self.env2 = env2
self.org = org
def make_environment(self, name, project):
env = Environment.objects.create(organization_id=project.organization_id, name=name)
env.add_project(project)
return env
def assert_releases(self, response, releases):
assert response.status_code == 200, response.content
assert len(response.data) == len(releases)
response_versions = sorted(r["version"] for r in response.data)
releases_versions = sorted(r.version for r in releases)
assert response_versions == releases_versions
def test_environments_filter(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(url + "?environment=" + self.env1.name, format="json")
self.assert_releases(response, [self.release1, self.release5])
response = self.client.get(url + "?environment=" + self.env2.name, format="json")
self.assert_releases(response, [self.release2, self.release3, self.release5])
def test_empty_environment(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
env = self.make_environment("", self.project2)
ReleaseProjectEnvironment.objects.create(
project_id=self.project2.id, release_id=self.release4.id, environment_id=env.id
)
response = self.client.get(url + "?environment=", format="json")
self.assert_releases(response, [self.release4])
def test_all_environments(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(url, format="json")
self.assert_releases(
response, [self.release1, self.release2, self.release3, self.release4, self.release5]
)
def test_invalid_environment(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(url + "?environment=" + "invalid_environment", format="json")
assert response.status_code == 404
def test_specify_project_ids(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(url, format="json", data={"project": self.project1.id})
self.assert_releases(response, [self.release1, self.release3, self.release5])
response = self.client.get(url, format="json", data={"project": self.project2.id})
self.assert_releases(response, [self.release2, self.release4, self.release5])
response = self.client.get(
url, format="json", data={"project": [self.project1.id, self.project2.id]}
)
self.assert_releases(
response, [self.release1, self.release2, self.release3, self.release4, self.release5]
)
def test_date_range(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(
url,
format="json",
data={
"start": (datetime.now() - timedelta(days=1)).isoformat() + "Z",
"end": datetime.now().isoformat() + "Z",
},
)
self.assert_releases(response, [self.release4, self.release5])
def test_invalid_date_range(self) -> None:
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
response = self.client.get(url, format="json", data={"start": "null", "end": "null"})
assert response.status_code == 400
| OrganizationReleaseListEnvironmentsTest |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 4652,
"end": 4984
} | class ____(TagValueSerializerResponseOptional):
key: str
name: str
value: str
count: int
# Empty values do not have last seen timestamps.
lastSeen: str | None
# Empty values do not have first seen timestamps.
firstSeen: str | None
@register(GroupTagValue)
@register(TagValue)
| TagValueSerializerResponse |
python | spack__spack | lib/spack/spack/config.py | {
"start": 35795,
"end": 39319
} | class ____:
"""Base properties for all includes."""
name: str
when: str
optional: bool
prefer_modify: bool
_scopes: List[ConfigScope]
def __init__(self, entry: dict):
self.name = entry.get("name", "")
self.when = entry.get("when", "")
self.optional = entry.get("optional", False)
self.prefer_modify = entry.get("prefer_modify", False)
self._scopes = []
def _scope(
self, path: str, config_path: str, parent_scope: ConfigScope
) -> Optional[ConfigScope]:
"""Instantiate a configuration scope for the configuration path.
Args:
path: raw include path
config_path: configuration path
parent_scope: including scope
Returns: configuration scopes
Raises:
ValueError: the required configuration path does not exist
"""
# use specified name if there is one
config_name = self.name
if not config_name:
# Try to use the relative path to create the included scope name
parent_path = getattr(parent_scope, "path", None)
if parent_path and str(parent_path) == os.path.commonprefix(
[parent_path, config_path]
):
included_name = os.path.relpath(config_path, parent_path)
else:
included_name = config_path
if sys.platform == "win32":
# Clean windows path for use in config name that looks nicer
# ie. The path: C:\\some\\path\\to\\a\\file
# becomes C/some/path/to/a/file
included_name = included_name.replace("\\", "/")
included_name = included_name.replace(":", "")
config_name = f"{parent_scope.name}:{included_name}"
if os.path.isdir(config_path):
# directories are treated as regular ConfigScopes
tty.debug(f"Creating DirectoryConfigScope {config_name} for '{config_path}'")
return DirectoryConfigScope(config_name, config_path, prefer_modify=self.prefer_modify)
if os.path.exists(config_path):
# files are assumed to be SingleFileScopes
tty.debug(f"Creating SingleFileScope {config_name} for '{config_path}'")
return SingleFileScope(
config_name,
config_path,
spack.schema.merged.schema,
prefer_modify=self.prefer_modify,
)
if not self.optional:
dest = f" at ({config_path})" if config_path != path else ""
raise ValueError(f"Required path ({path}) does not exist{dest}")
return None
def evaluate_condition(self) -> bool:
# circular dependencies
import spack.spec
return (not self.when) or spack.spec.eval_conditional(self.when)
def scopes(self, parent_scope: ConfigScope) -> List[ConfigScope]:
"""Instantiate configuration scopes.
Args:
parent_scope: including scope
Returns: configuration scopes IF the when condition is satisfied;
otherwise, an empty list.
Raises:
ValueError: the required configuration path does not exist
"""
raise NotImplementedError("must be implemented in derived classes")
@property
def paths(self) -> List[str]:
"""Path(s) associated with the include."""
raise NotImplementedError("must be implemented in derived classes")
| OptionalInclude |
python | huggingface__transformers | tests/models/mobilevitv2/test_modeling_mobilevitv2.py | {
"start": 10671,
"end": 14682
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = MobileViTV2ForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to(
torch_device
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-1.6336e00, -7.3204e-02, -5.1883e-01],
("cuda", 8): [-1.6336, -0.0732, -0.5188],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_semantic_segmentation(self):
model = MobileViTV2ForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
model = model.to(torch_device)
image_processor = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
],
("cuda", 8): [
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_post_processing_semantic_segmentation(self):
model = MobileViTV2ForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
model = model.to(torch_device)
image_processor = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
outputs.logits = outputs.logits.detach().cpu()
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)])
expected_shape = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, expected_shape)
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
expected_shape = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, expected_shape)
| MobileViTV2ModelIntegrationTest |
python | numba__llvmlite | llvmlite/tests/test_ir.py | {
"start": 432,
"end": 3407
} | class ____(TestCase):
"""
Utilities for IR tests.
"""
def assertInText(self, pattern, text):
"""
Assert *pattern* is in *text*, ignoring any whitespace differences
(including newlines).
"""
def escape(c):
if not c.isalnum() and not c.isspace():
return '\\' + c
return c
pattern = ''.join(map(escape, pattern))
regex = re.sub(r'\s+', r'\\s*', pattern)
self.assertRegex(text, regex)
def assert_ir_line(self, line, mod):
lines = [line.strip() for line in str(mod).splitlines()]
self.assertIn(line, lines)
def assert_valid_ir(self, mod):
llvm.parse_assembly(str(mod))
def assert_pickle_correctly(self, irobject):
"""Assert that the IR object pickles and unpickles correctly.
The IR string is equal and that their type is equal
"""
newobject = pickle.loads(pickle.dumps(irobject, protocol=-1))
self.assertIs(irobject.__class__, newobject.__class__)
self.assertEqual(str(irobject), str(newobject))
return newobject
def module(self):
return ir.Module()
def function(self, module=None, name='my_func'):
module = module or self.module()
fnty = ir.FunctionType(int32, (int32, int32, dbl,
ir.PointerType(int32)))
return ir.Function(module, fnty, name)
def block(self, func=None, name=''):
func = func or self.function()
return func.append_basic_block(name)
def descr(self, thing):
buf = []
thing.descr(buf)
return "".join(buf)
def _normalize_asm(self, asm):
asm = textwrap.dedent(asm)
# Normalize indent
asm = asm.replace("\n ", "\n ")
return asm
def check_descr_regex(self, descr, asm):
expected = self._normalize_asm(asm)
self.assertRegex(descr, expected)
def check_descr(self, descr, asm):
expected = self._normalize_asm(asm)
self.assertEqual(descr, expected)
def check_block(self, block, asm):
self.check_descr(self.descr(block), asm)
def check_block_regex(self, block, asm):
self.check_descr_regex(self.descr(block), asm)
def check_module_body(self, module, asm):
expected = self._normalize_asm(asm)
actual = module._stringify_body()
self.assertEqual(actual.strip(), expected.strip())
def check_metadata(self, module, asm):
"""
Check module metadata against *asm*.
"""
expected = self._normalize_asm(asm)
actual = module._stringify_metadata()
self.assertEqual(actual.strip(), expected.strip())
def check_func_body(self, func, asm):
expected = self._normalize_asm(asm)
actual = self.descr(func)
actual = actual.partition('{')[2].rpartition('}')[0]
self.assertEqual(actual.strip(), expected.strip())
| TestBase |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/arpack.py | {
"start": 13029,
"end": 13675
} | class ____(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20)
| ArpackNoConvergence |
python | huggingface__transformers | src/transformers/models/qwen3_next/modular_qwen3_next.py | {
"start": 27615,
"end": 30383
} | class ____(Qwen3MoeDecoderLayer):
def __init__(self, config: Qwen3NextConfig, layer_idx: int):
nn.Module.__init__(self)
self.hidden_size = config.hidden_size
# token mixer
self.layer_type = config.layer_types[layer_idx]
if self.layer_type == "linear_attention":
self.linear_attn = Qwen3NextGatedDeltaNet(config, layer_idx)
elif self.layer_type == "full_attention":
self.self_attn = Qwen3NextAttention(config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen3NextSparseMoeBlock(config)
else:
self.mlp = Qwen3NextMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Token Mixer
if self.layer_type == "linear_attention":
hidden_states = self.linear_attn(
hidden_states=hidden_states,
cache_params=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
)
elif self.layer_type == "full_attention":
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# For the MoE layers, we need to unpack
if isinstance(hidden_states, tuple):
hidden_states, _ = hidden_states
hidden_states = residual + hidden_states
return hidden_states
| Qwen3NextDecoderLayer |
python | streamlit__streamlit | lib/streamlit/elements/widgets/text_widgets.py | {
"start": 2457,
"end": 26665
} | class ____:
@overload
def text_input(
self,
label: str,
value: str = "",
max_chars: int | None = None,
key: Key | None = None,
type: Literal["default", "password"] = "default",
help: str | None = None,
autocomplete: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> str:
pass
@overload
def text_input(
self,
label: str,
value: SupportsStr | None = None,
max_chars: int | None = None,
key: Key | None = None,
type: Literal["default", "password"] = "default",
help: str | None = None,
autocomplete: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> str | None:
pass
@gather_metrics("text_input")
def text_input(
self,
label: str,
value: str | SupportsStr | None = "",
max_chars: int | None = None,
key: Key | None = None,
type: Literal["default", "password"] = "default",
help: str | None = None,
autocomplete: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> str | None:
r"""Display a single-line text input widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
value : object or None
The text value of this widget when it first renders. This will be
cast to str internally. If ``None``, will initialize empty and
return ``None`` until the user provides input. Defaults to empty string.
max_chars : int or None
Max number of characters allowed in text input.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
type : "default" or "password"
The type of the text input. This can be either "default" (for
a regular text input), or "password" (for a text input that
masks the user's typed value). Defaults to "default".
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
autocomplete : str
An optional value that will be passed to the <input> element's
autocomplete property. If unspecified, this value will be set to
"new-password" for "password" inputs, and the empty string for
"default" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete
on_change : callable
An optional callback invoked when this text input's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
placeholder : str or None
An optional string displayed when the text input is empty. If None,
no text is displayed.
disabled : bool
An optional boolean that disables the text input if set to
``True``. The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
icon : str, None
An optional emoji or icon to display within the input field to the
left of the value. If ``icon`` is ``None`` (default), no icon is
displayed. If ``icon`` is a string, the following options are
valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
width : "stretch" or int
The width of the text input widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
str or None
The current value of the text input widget or ``None`` if no value has been
provided by the user.
Example
-------
>>> import streamlit as st
>>>
>>> title = st.text_input("Movie title", "Life of Brian")
>>> st.write("The current movie title is", title)
.. output::
https://doc-text-input.streamlit.app/
height: 260px
"""
ctx = get_script_run_ctx()
return self._text_input(
label=label,
value=value,
max_chars=max_chars,
key=key,
type=type,
help=help,
autocomplete=autocomplete,
on_change=on_change,
args=args,
kwargs=kwargs,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
icon=icon,
width=width,
ctx=ctx,
)
def _text_input(
self,
label: str,
value: SupportsStr | None = "",
max_chars: int | None = None,
key: Key | None = None,
type: str = "default",
help: str | None = None,
autocomplete: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> str | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None if value == "" else value,
)
maybe_raise_label_warnings(label, label_visibility)
# Make sure value is always string or None:
value = str(value) if value is not None else None
element_id = compute_and_register_element_id(
"text_input",
user_key=key,
# Explicitly whitelist max_chars to make sure the ID changes when it changes
# since the widget value might become invalid based on a different max_chars
key_as_main_identity={"max_chars"},
dg=self.dg,
label=label,
value=value,
max_chars=max_chars,
type=type,
help=help,
autocomplete=autocomplete,
placeholder=str(placeholder),
icon=icon,
width=width,
)
session_state = get_session_state().filtered_state
if key is not None and key in session_state and session_state[key] is None:
value = None
text_input_proto = TextInputProto()
text_input_proto.id = element_id
text_input_proto.label = label
if value is not None:
text_input_proto.default = value
text_input_proto.form_id = current_form_id(self.dg)
text_input_proto.disabled = disabled
text_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
text_input_proto.help = dedent(help)
if max_chars is not None:
text_input_proto.max_chars = max_chars
if placeholder is not None:
text_input_proto.placeholder = str(placeholder)
if icon is not None:
text_input_proto.icon = validate_icon_or_emoji(icon)
if type == "default":
text_input_proto.type = TextInputProto.DEFAULT
elif type == "password":
text_input_proto.type = TextInputProto.PASSWORD
else:
raise StreamlitAPIException(
f"'{type}' is not a valid text_input type. Valid types are 'default' and 'password'."
)
# Marshall the autocomplete param. If unspecified, this will be
# set to "new-password" for password inputs.
if autocomplete is None:
autocomplete = "new-password" if type == "password" else ""
text_input_proto.autocomplete = autocomplete
serde = TextInputSerde(value)
widget_state = register_widget(
text_input_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_value",
)
if widget_state.value_changed:
if widget_state.value is not None:
text_input_proto.value = widget_state.value
text_input_proto.set_value = True
validate_width(width)
layout_config = LayoutConfig(width=width)
self.dg._enqueue("text_input", text_input_proto, layout_config=layout_config)
return widget_state.value
@overload
def text_area(
self,
label: str,
value: str = "",
height: Height | None = None,
max_chars: int | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> str:
pass
@overload
def text_area(
self,
label: str,
value: SupportsStr | None = None,
height: Height | None = None,
max_chars: int | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> str | None:
pass
@gather_metrics("text_area")
def text_area(
self,
label: str,
value: str | SupportsStr | None = "",
height: Height | None = None,
max_chars: int | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> str | None:
r"""Display a multi-line text input widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
value : object or None
The text value of this widget when it first renders. This will be
cast to str internally. If ``None``, will initialize empty and
return ``None`` until the user provides input. Defaults to empty string.
height : "content", "stretch", int, or None
The height of the text area widget. This can be one of the
following:
- ``None`` (default): The height of the widget fits three lines.
- ``"content"``: The height of the widget matches the
height of its content.
- ``"stretch"``: The height of the widget matches the height of
its content or the height of the parent container, whichever is
larger. If the widget is not in a parent container, the height
of the widget matches the height of its content.
- An integer specifying the height in pixels: The widget has a
fixed height. If the content is larger than the specified
height, scrolling is enabled.
The widget's height can't be smaller than the height of two lines.
When ``label_visibility="collapsed"``, the minimum height is 68
pixels. Otherwise, the minimum height is 98 pixels.
max_chars : int or None
Maximum number of characters allowed in text area.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this text_area's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
placeholder : str or None
An optional string displayed when the text area is empty. If None,
no text is displayed.
disabled : bool
An optional boolean that disables the text area if set to ``True``.
The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "stretch" or int
The width of the text area widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
str or None
The current value of the text area widget or ``None`` if no value has been
provided by the user.
Example
-------
>>> import streamlit as st
>>>
>>> txt = st.text_area(
... "Text to analyze",
... "It was the best of times, it was the worst of times, it was the age of "
... "wisdom, it was the age of foolishness, it was the epoch of belief, it "
... "was the epoch of incredulity, it was the season of Light, it was the "
... "season of Darkness, it was the spring of hope, it was the winter of "
... "despair, (...)",
... )
>>>
>>> st.write(f"You wrote {len(txt)} characters.")
.. output::
https://doc-text-area.streamlit.app/
height: 300px
"""
ctx = get_script_run_ctx()
return self._text_area(
label=label,
value=value,
height=height,
max_chars=max_chars,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
width=width,
ctx=ctx,
)
def _text_area(
self,
label: str,
value: SupportsStr | None = "",
height: Height | None = None,
max_chars: int | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> str | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None if value == "" else value,
)
maybe_raise_label_warnings(label, label_visibility)
value = str(value) if value is not None else None
element_id = compute_and_register_element_id(
"text_area",
user_key=key,
# Explicitly whitelist max_chars to make sure the ID changes when it changes
# since the widget value might become invalid based on a different max_chars
key_as_main_identity={"max_chars"},
dg=self.dg,
label=label,
value=value,
height=height,
max_chars=max_chars,
help=help,
placeholder=str(placeholder),
width=width,
)
session_state = get_session_state().filtered_state
if key is not None and key in session_state and session_state[key] is None:
value = None
text_area_proto = TextAreaProto()
text_area_proto.id = element_id
text_area_proto.label = label
if value is not None:
text_area_proto.default = value
text_area_proto.form_id = current_form_id(self.dg)
text_area_proto.disabled = disabled
text_area_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
text_area_proto.help = dedent(help)
if max_chars is not None:
text_area_proto.max_chars = max_chars
if placeholder is not None:
text_area_proto.placeholder = str(placeholder)
serde = TextAreaSerde(value)
widget_state = register_widget(
text_area_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_value",
)
if widget_state.value_changed:
if widget_state.value is not None:
text_area_proto.value = widget_state.value
text_area_proto.set_value = True
validate_width(width)
if height is not None:
validate_height(height, allow_content=True)
else:
# We want to maintain the same approximately three lines of text height
# for the text input when the label is collapsed.
# These numbers are for the entire element including the label and
# padding.
height = 122 if label_visibility != "collapsed" else 94
layout_config = LayoutConfig(width=width, height=height)
self.dg._enqueue("text_area", text_area_proto, layout_config=layout_config)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| TextWidgetsMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.