language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numba__numba | numba/tests/test_record_dtype.py | {
"start": 34872,
"end": 37322
} | class ____(TestCase):
"""
Test setitem when index is Literal[str]
"""
def test_literal_variable(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field1
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field2
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_variable_global_tuple(self):
# This tests the setitem of record array when the indexes come from a
# global tuple. It tests getitem behaviour but also tests that a global
# tuple is being typed as a tuple of constants.
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field3
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll_global_tuple(self):
# This tests the setitem of record array when the indexes come from a
# global tuple and are being unrolled.
# It tests setitem behaviour but also tests that literal_unroll accepts
# a global tuple as argument
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field4
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll_free_var_tuple(self):
# This tests the setitem of record array when the indexes come from a
# free variable tuple (not local, not global) and are being unrolled.
# It tests setitem behaviour but also tests that literal_unroll accepts
# a free variable tuple as argument
arr = np.array([1, 2], dtype=recordtype2)
fs = arr.dtype.names
def set_field(rec):
for f in literal_unroll(fs):
rec[f] = 10
return rec
jitfunc = njit(set_field)
self.assertEqual(set_field(arr[0].copy()), jitfunc(arr[0].copy()))
def test_error_w_invalid_field(self):
arr = np.array([1, 2], dtype=recordtype3)
jitfunc = njit(set_field1)
with self.assertRaises(TypingError) as raises:
jitfunc(arr[0])
self.assertIn("Field 'f' was not found in record with fields "
"('first', 'second')", str(raises.exception))
| TestRecordArraySetItem |
python | numba__llvmlite | llvmlite/binding/module.py | {
"start": 7305,
"end": 7521
} | class ____(_Iterator):
kind = 'function'
def _dispose(self):
self._capi.LLVMPY_DisposeFunctionsIter(self)
def _next(self):
return ffi.lib.LLVMPY_FunctionsIterNext(self)
| _FunctionsIterator |
python | jazzband__django-polymorphic | src/polymorphic/tests/test_multidb.py | {
"start": 324,
"end": 4575
} | class ____(TestCase):
databases = ["default", "secondary"]
def test_save_to_non_default_database(self):
Model2A.objects.db_manager("secondary").create(field1="A1")
Model2C(field1="C1", field2="C2", field3="C3").save(using="secondary")
Model2B.objects.create(field1="B1", field2="B2")
Model2D(field1="D1", field2="D2", field3="D3", field4="D4").save()
self.assertQuerySetEqual(
Model2A.objects.order_by("id"),
[Model2B, Model2D],
transform=lambda o: o.__class__,
)
self.assertQuerySetEqual(
Model2A.objects.db_manager("secondary").order_by("id"),
[Model2A, Model2C],
transform=lambda o: o.__class__,
)
def test_instance_of_filter_on_non_default_database(self):
Base.objects.db_manager("secondary").create(field_b="B1")
ModelX.objects.db_manager("secondary").create(field_b="B", field_x="X")
ModelY.objects.db_manager("secondary").create(field_b="Y", field_y="Y")
objects = Base.objects.db_manager("secondary").filter(instance_of=Base)
self.assertQuerySetEqual(
objects,
[Base, ModelX, ModelY],
transform=lambda o: o.__class__,
ordered=False,
)
self.assertQuerySetEqual(
Base.objects.db_manager("secondary").filter(instance_of=ModelX),
[ModelX],
transform=lambda o: o.__class__,
)
self.assertQuerySetEqual(
Base.objects.db_manager("secondary").filter(instance_of=ModelY),
[ModelY],
transform=lambda o: o.__class__,
)
self.assertQuerySetEqual(
Base.objects.db_manager("secondary").filter(
Q(instance_of=ModelX) | Q(instance_of=ModelY)
),
[ModelX, ModelY],
transform=lambda o: o.__class__,
ordered=False,
)
def test_forward_many_to_one_descriptor_on_non_default_database(self):
def func():
blog = BlogA.objects.db_manager("secondary").create(name="Blog", info="Info")
entry = BlogEntry.objects.db_manager("secondary").create(blog=blog, text="Text")
ContentType.objects.clear_cache()
entry = BlogEntry.objects.db_manager("secondary").get(pk=entry.id)
assert blog == entry.blog
# Ensure no queries are made using the default database.
self.assertNumQueries(0, func)
def test_reverse_many_to_one_descriptor_on_non_default_database(self):
def func():
blog = BlogA.objects.db_manager("secondary").create(name="Blog", info="Info")
entry = BlogEntry.objects.db_manager("secondary").create(blog=blog, text="Text")
ContentType.objects.clear_cache()
blog = BlogA.objects.db_manager("secondary").get(pk=blog.id)
assert entry == blog.blogentry_set.using("secondary").get()
# Ensure no queries are made using the default database.
self.assertNumQueries(0, func)
def test_reverse_one_to_one_descriptor_on_non_default_database(self):
def func():
m2a = Model2A.objects.db_manager("secondary").create(field1="A1")
one2one = One2OneRelatingModel.objects.db_manager("secondary").create(
one2one=m2a, field1="121"
)
ContentType.objects.clear_cache()
m2a = Model2A.objects.db_manager("secondary").get(pk=m2a.id)
assert one2one == m2a.one2onerelatingmodel
# Ensure no queries are made using the default database.
self.assertNumQueries(0, func)
def test_many_to_many_descriptor_on_non_default_database(self):
def func():
m2a = Model2A.objects.db_manager("secondary").create(field1="A1")
rm = RelatingModel.objects.db_manager("secondary").create()
rm.many2many.add(m2a)
ContentType.objects.clear_cache()
m2a = Model2A.objects.db_manager("secondary").get(pk=m2a.id)
assert rm == m2a.relatingmodel_set.using("secondary").get()
# Ensure no queries are made using the default database.
self.assertNumQueries(0, func)
| MultipleDatabasesTests |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v1.py | {
"start": 1192,
"end": 1414
} | class ____(init_ops.RandomNormal):
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(RandomNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
| RandomNormal |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 38331,
"end": 41485
} | class ____:
"""A singleton object for marking undefined parameters."""
__instance = None
def __new__(cls, *args, **kwargs) -> Self:
if not isinstance(cls.__instance, cls):
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def __repr__(self) -> str:
return "Undefined"
Undefined = UndefinedType()
T = TypeVar("T")
Optional: TypeAlias = Union[T, UndefinedType]
"""One of ``T`` specified type(s), or the ``Undefined`` singleton.
Examples
--------
The parameters ``short``, ``long`` accept the same range of types::
# ruff: noqa: UP006, UP007
from altair.typing import Optional
def func_1(
short: Optional[str | bool | float | dict[str, Any] | SchemaBase] = Undefined,
long: Union[
str, bool, float, Dict[str, Any], SchemaBase, UndefinedType
] = Undefined,
): ...
This is distinct from `typing.Optional <https://typing.readthedocs.io/en/latest/spec/historical.html#union-and-optional>`__.
``altair.typing.Optional`` treats ``None`` like any other type::
# ruff: noqa: UP006, UP007
from altair.typing import Optional
def func_2(
short: Optional[str | float | dict[str, Any] | None | SchemaBase] = Undefined,
long: Union[
str, float, Dict[str, Any], None, SchemaBase, UndefinedType
] = Undefined,
): ...
"""
def is_undefined(obj: Any) -> TypeIs[UndefinedType]:
"""
Type-safe singleton check for `UndefinedType`.
Notes
-----
- Using `obj is Undefined` does not narrow from `UndefinedType` in a union.
- Due to the assumption that other `UndefinedType`'s could exist.
- Current [typing spec advises](https://typing.readthedocs.io/en/latest/spec/concepts.html#support-for-singleton-types-in-unions) using an `Enum`.
- Otherwise, requires an explicit guard to inform the type checker.
"""
return obj is Undefined
@overload
def _shallow_copy(obj: _CopyImpl) -> _CopyImpl: ...
@overload
def _shallow_copy(obj: Any) -> Any: ...
def _shallow_copy(obj: _CopyImpl | Any) -> _CopyImpl | Any:
if isinstance(obj, SchemaBase):
return obj.copy(deep=False)
elif isinstance(obj, (list, dict)):
return obj.copy()
else:
return obj
@overload
def _deep_copy(obj: _CopyImpl, by_ref: set[str]) -> _CopyImpl: ...
@overload
def _deep_copy(obj: Any, by_ref: set[str]) -> Any: ...
def _deep_copy(obj: _CopyImpl | Any, by_ref: set[str]) -> _CopyImpl | Any:
copy = partial(_deep_copy, by_ref=by_ref)
if isinstance(obj, SchemaBase):
if copier := getattr(obj, "__deepcopy__", None):
with debug_mode(False):
return copier(obj)
args = (copy(arg) for arg in obj._args)
kwds = {k: (copy(v) if k not in by_ref else v) for k, v in obj._kwds.items()}
with debug_mode(False):
return obj.__class__(*args, **kwds)
elif isinstance(obj, list):
return [copy(v) for v in obj]
elif isinstance(obj, dict):
return {k: (copy(v) if k not in by_ref else v) for k, v in obj.items()}
else:
return obj
| UndefinedType |
python | python-openxml__python-docx | tests/oxml/unitdata/text.py | {
"start": 649,
"end": 750
} | class ____(BaseBuilder):
__tag__ = "w:pPr"
__nspfxs__ = ("w",)
__attrs__ = ()
| CT_PPrBuilder |
python | protocolbuffers__protobuf | python/google/protobuf/internal/enum_type_wrapper.py | {
"start": 542,
"end": 3747
} | class ____(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
# This is a type alias, which mypy typing stubs can type as
# a genericized parameter constrained to an int, allowing subclasses
# to be typed with more constraint in .pyi stubs
# Eg.
# def MyGeneratedEnum(Message):
# ValueType = NewType('ValueType', int)
# def Name(self, number: MyGeneratedEnum.ValueType) -> str
ValueType = int
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type # pylint: disable=invalid-name
def Name(self, number): # pylint: disable=invalid-name
"""Returns a string containing the name of an enum value."""
try:
return self._enum_type.values_by_number[number].name
except KeyError:
pass # fall out to break exception chaining
if not isinstance(number, int):
raise TypeError(
'Enum value for {} must be an int, but got {} {!r}.'.format(
self._enum_type.name, type(number), number))
else:
# repr here to handle the odd case when you pass in a boolean.
raise ValueError('Enum {} has no name defined for value {!r}'.format(
self._enum_type.name, number))
def Value(self, name): # pylint: disable=invalid-name
"""Returns the value corresponding to the given enum name."""
try:
return self._enum_type.values_by_name[name].number
except KeyError:
pass # fall out to break exception chaining
raise ValueError('Enum {} has no value defined for name {!r}'.format(
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
Returns:
A list of strs, in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
Returns:
A list of ints, in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
Returns:
A list of (str, int) pairs, in the order they were defined
in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]
def __getattr__(self, name):
"""Returns the value corresponding to the given enum name."""
try:
return super(
EnumTypeWrapper,
self).__getattribute__('_enum_type').values_by_name[name].number
except KeyError:
pass # fall out to break exception chaining
raise AttributeError('Enum {} has no value defined for name {!r}'.format(
self._enum_type.name, name))
def __or__(self, other):
"""Returns the union type of self and other."""
if sys.version_info >= (3, 10):
return type(self) | other
else:
raise NotImplementedError(
'You may not use | on EnumTypes (or classes) below python 3.10'
)
| EnumTypeWrapper |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 28513,
"end": 30843
} | class ____(unittest.TestCase):
def subtest_accuracy(self, paq):
"""
Compare IndexIVFAdditiveQuantizerFastScan with
IndexIVFAdditiveQuantizer
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500)
gt = ds.get_groundtruth(k=1)
index = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4_Nqint8')
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4fsr_Nlsq2x4')
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 4
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
assert abs(recall_ref - recall1) < 0.05
def test_accuracy_PLSQ(self):
self.subtest_accuracy("PLSQ")
def test_accuracy_PRQ(self):
self.subtest_accuracy("PRQ")
def subtest_factory(self, paq):
nlist, d = 128, 16
index = faiss.index_factory(d, f'IVF{nlist},{paq}2x3x4fsr_Nlsq2x4')
q = faiss.downcast_Quantizer(index.aq)
self.assertEqual(index.nlist, nlist)
self.assertEqual(q.nsplits, 2)
self.assertEqual(q.subquantizer(0).M, 3)
self.assertTrue(index.by_residual)
def test_factory(self):
self.subtest_factory('PLSQ')
self.subtest_factory('PRQ')
def subtest_io(self, factory_str):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000)
index = faiss.index_factory(d, factory_str)
index.train(ds.get_train())
index.add(ds.get_database())
D1, I1 = index.search(ds.get_queries(), 1)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname)
D2, I2 = index2.search(ds.get_queries(), 1)
np.testing.assert_array_equal(I1, I2)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_io(self):
self.subtest_io('IVF16,PLSQ2x3x4fsr_Nlsq2x4')
self.subtest_io('IVF16,PRQ2x3x4fs_Nrq2x4')
| TestIVFPAQFastScan |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_utils/test_cases.py | {
"start": 137,
"end": 7084
} | class ____:
"""A test case for validating a component, used to
test raw YAML error messages as well as the check CLI.
"""
component_path: str
component_type_filepath: Optional[Path]
should_error: bool
validate_error_msg: Optional[Callable[[str], None]] = None
check_error_msg: Optional[Callable[[str], None]] = None
def msg_includes_all_of(*substrings: str) -> Callable[[str], None]:
def _validate_error_msg(msg: str) -> None:
for substring in substrings:
assert substring in msg, f"Expected '{substring}' to be in error message '{msg}'"
return _validate_error_msg
BASIC_COMPONENT_TYPE_FILEPATH = Path(__file__).parent / "basic_components.py"
BASIC_INVALID_VALUE = ComponentValidationTestCase(
component_path="validation/basic_component_invalid_value",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:5", "attributes.an_int", "Input should be a valid integer"
),
check_error_msg=msg_includes_all_of(
"defs.yaml:5",
"attributes.an_int",
"{} is not of type 'integer'",
),
)
BASIC_MISSING_VALUE = ComponentValidationTestCase(
component_path="validation/basic_component_missing_value",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of("defs.yaml:3", "attributes.an_int", "required"),
check_error_msg=msg_includes_all_of(
"defs.yaml:3",
"attributes",
"'an_int' is a required property",
),
)
BASIC_VALID_VALUE = ComponentValidationTestCase(
component_path="validation/basic_component_success",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=False,
)
COMPONENT_VALIDATION_TEST_CASES = [
BASIC_VALID_VALUE,
BASIC_INVALID_VALUE,
BASIC_MISSING_VALUE,
ComponentValidationTestCase(
component_path="validation/basic_component_extra_value",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:7", "attributes.a_bool", "Extra inputs are not permitted"
),
check_error_msg=msg_includes_all_of(
"defs.yaml:7",
"'a_bool' was unexpected",
),
),
ComponentValidationTestCase(
component_path="validation/basic_component_extra_value_in_a_subfolder",
component_type_filepath=None,
should_error=True,
validate_error_msg=msg_includes_all_of("defs.yaml:4", "attributes.path"),
check_error_msg=msg_includes_all_of(
"defs.yaml:4",
"attributes.path",
),
),
ComponentValidationTestCase(
component_path="validation/nested_component_invalid_values",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:7",
"attributes.nested.foo.an_int",
"Input should be a valid integer",
"defs.yaml:12",
"attributes.nested.baz.a_string",
"Input should be a valid string",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:7",
"attributes.nested.foo.an_int",
"{} is not of type 'integer'",
"defs.yaml:12",
"attributes.nested.baz.a_string",
"{} is not of type 'string'",
),
),
ComponentValidationTestCase(
component_path="validation/nested_component_missing_values",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:5", "attributes.nested.foo.an_int", "required"
),
check_error_msg=msg_includes_all_of(
"defs.yaml:5",
"attributes.nested.foo",
"'an_int' is a required property",
"defs.yaml:10",
"attributes.nested.baz",
"'a_string' is a required property",
),
),
ComponentValidationTestCase(
component_path="validation/nested_component_extra_values",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:8",
"attributes.nested.foo.a_bool",
"Extra inputs are not permitted",
"defs.yaml:15",
"attributes.nested.baz.another_bool",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:8",
"attributes.nested.foo",
"'a_bool' was unexpected",
"defs.yaml:15",
"attributes.nested.baz",
"'another_bool' was unexpected",
),
),
ComponentValidationTestCase(
component_path="validation/invalid_component_file_model",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:1",
"type",
"Input should be a valid string",
"defs.yaml:3",
"attributes",
"Input should be an object",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:1",
"type",
"{} is not of type 'string'",
"defs.yaml:3",
"attributes",
"'asdfasdf' is not of type 'object'",
),
),
ComponentValidationTestCase(
component_path="validation/other_invalid_component_file_model",
component_type_filepath=BASIC_COMPONENT_TYPE_FILEPATH,
should_error=True,
validate_error_msg=msg_includes_all_of(
"defs.yaml:3",
"attributez",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:3",
"attributez",
),
),
ComponentValidationTestCase(
component_path="validation/invalid_yaml_missing_quote",
component_type_filepath=None,
should_error=True,
validate_error_msg=msg_includes_all_of(
"line 2",
"found unexpected end of stream",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:2",
"Unable to parse YAML",
"found unexpected end of stream",
),
),
ComponentValidationTestCase(
component_path="validation/invalid_yaml_invalid_char",
component_type_filepath=None,
should_error=True,
validate_error_msg=msg_includes_all_of(
"line 1",
"found character '@' that cannot start any token",
),
check_error_msg=msg_includes_all_of(
"defs.yaml:1",
"Unable to parse YAML",
"found character '@' that cannot start any token",
),
),
]
| ComponentValidationTestCase |
python | gevent__gevent | src/greentest/3.12/test_socket.py | {
"start": 96277,
"end": 98748
} | class ____(unittest.TestCase):
def testHyperVConstants(self):
socket.HVSOCKET_CONNECT_TIMEOUT
socket.HVSOCKET_CONNECT_TIMEOUT_MAX
socket.HVSOCKET_CONNECTED_SUSPEND
socket.HVSOCKET_ADDRESS_FLAG_PASSTHRU
socket.HV_GUID_ZERO
socket.HV_GUID_WILDCARD
socket.HV_GUID_BROADCAST
socket.HV_GUID_CHILDREN
socket.HV_GUID_LOOPBACK
socket.HV_GUID_PARENT
def testCreateHyperVSocketWithUnknownProtoFailure(self):
expected = r"\[WinError 10041\]"
with self.assertRaisesRegex(OSError, expected):
socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM)
def testCreateHyperVSocketAddrNotTupleFailure(self):
expected = "connect(): AF_HYPERV address must be tuple, not str"
with socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM, socket.HV_PROTOCOL_RAW) as s:
with self.assertRaisesRegex(TypeError, re.escape(expected)):
s.connect(socket.HV_GUID_ZERO)
def testCreateHyperVSocketAddrNotTupleOf2StrsFailure(self):
expected = "AF_HYPERV address must be a str tuple (vm_id, service_id)"
with socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM, socket.HV_PROTOCOL_RAW) as s:
with self.assertRaisesRegex(TypeError, re.escape(expected)):
s.connect((socket.HV_GUID_ZERO,))
def testCreateHyperVSocketAddrNotTupleOfStrsFailure(self):
expected = "AF_HYPERV address must be a str tuple (vm_id, service_id)"
with socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM, socket.HV_PROTOCOL_RAW) as s:
with self.assertRaisesRegex(TypeError, re.escape(expected)):
s.connect((1, 2))
def testCreateHyperVSocketAddrVmIdNotValidUUIDFailure(self):
expected = "connect(): AF_HYPERV address vm_id is not a valid UUID string"
with socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM, socket.HV_PROTOCOL_RAW) as s:
with self.assertRaisesRegex(ValueError, re.escape(expected)):
s.connect(("00", socket.HV_GUID_ZERO))
def testCreateHyperVSocketAddrServiceIdNotValidUUIDFailure(self):
expected = "connect(): AF_HYPERV address service_id is not a valid UUID string"
with socket.socket(socket.AF_HYPERV, socket.SOCK_STREAM, socket.HV_PROTOCOL_RAW) as s:
with self.assertRaisesRegex(ValueError, re.escape(expected)):
s.connect((socket.HV_GUID_ZERO, "00"))
| BasicHyperVTest |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 8799,
"end": 9096
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
return float(metafeatures.get_value("NumberOfMissingValues")) / float(
X.shape[0] * X.shape[1]
)
# TODO: generalize this!
@metafeatures.define("NumberOfNumericFeatures")
| PercentageOfMissingValues |
python | pytest-dev__pytest | src/_pytest/warning_types.py | {
"start": 348,
"end": 496
} | class ____(PytestWarning):
"""Warning emitted by the pytest assert rewrite module."""
__module__ = "pytest"
@final
| PytestAssertRewriteWarning |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver15.py | {
"start": 140,
"end": 438
} | class ____: ...
T = TypeVar("T", bound=Base)
def register(state_name: str, state: type[T]): ...
def register_state(state_name: str) -> Callable[[type[T]], type[T]]:
def decorator(state: type[T]) -> type[T]:
register(state_name, state)
return state
return decorator
| Base |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 11649,
"end": 11784
} | class ____(PydanticValueError):
code = 'decimal.not_finite'
msg_template = 'value is not a valid decimal'
| DecimalIsNotFiniteError |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 1463,
"end": 1522
} | class ____(Child2):
value3 = models.IntegerField()
| Child3 |
python | apache__airflow | helm-tests/tests/helm_tests/security/test_scc_rolebinding.py | {
"start": 914,
"end": 6796
} | class ____:
"""Tests SCCs."""
@pytest.mark.parametrize(
("rbac_enabled", "scc_enabled", "created"),
[
(False, False, False),
(False, True, False),
(True, True, True),
(True, False, False),
],
)
def test_create_scc(self, rbac_enabled, scc_enabled, created):
docs = render_chart(
values={
"multiNamespaceMode": False,
"webserver": {"defaultUser": {"enabled": True}},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"rbac": {"create": rbac_enabled, "createSCCRoleBinding": scc_enabled},
"dagProcessor": {"enabled": True},
},
show_only=["templates/rbac/security-context-constraint-rolebinding.yaml"],
)
assert bool(docs) is created
if created:
assert jmespath.search("kind", docs[0]) == "RoleBinding"
assert jmespath.search("roleRef.kind", docs[0]) == "ClusterRole"
assert jmespath.search("metadata.name", docs[0]) == "release-name-scc-rolebinding"
assert jmespath.search("roleRef.name", docs[0]) == "system:openshift:scc:anyuid"
assert jmespath.search("subjects[0].name", docs[0]) == "release-name-airflow-webserver"
assert jmespath.search("subjects[1].name", docs[0]) == "release-name-airflow-worker"
assert jmespath.search("subjects[2].name", docs[0]) == "release-name-airflow-scheduler"
assert jmespath.search("subjects[3].name", docs[0]) == "release-name-airflow-api-server"
assert jmespath.search("subjects[4].name", docs[0]) == "release-name-airflow-statsd"
assert jmespath.search("subjects[5].name", docs[0]) == "release-name-airflow-flower"
assert jmespath.search("subjects[6].name", docs[0]) == "release-name-airflow-redis"
assert jmespath.search("subjects[7].name", docs[0]) == "release-name-airflow-triggerer"
assert jmespath.search("subjects[8].name", docs[0]) == "release-name-airflow-migrate-database-job"
assert jmespath.search("subjects[9].name", docs[0]) == "release-name-airflow-create-user-job"
assert jmespath.search("subjects[10].name", docs[0]) == "release-name-airflow-cleanup"
assert jmespath.search("subjects[11].name", docs[0]) == "release-name-airflow-database-cleanup"
assert jmespath.search("subjects[12].name", docs[0]) == "release-name-airflow-dag-processor"
@pytest.mark.parametrize(
("rbac_enabled", "scc_enabled", "created", "namespace", "expected_name"),
[
(True, True, True, "default", "default-release-name-scc-rolebinding"),
(True, True, True, "other-ns", "other-ns-release-name-scc-rolebinding"),
],
)
def test_create_scc_multinamespace(self, rbac_enabled, scc_enabled, created, namespace, expected_name):
docs = render_chart(
namespace=namespace,
values={
"multiNamespaceMode": True,
"webserver": {"defaultUser": {"enabled": False}},
"cleanup": {"enabled": False},
"databaseCleanup": {"enabled": False},
"flower": {"enabled": False},
"rbac": {"create": rbac_enabled, "createSCCRoleBinding": scc_enabled},
},
show_only=["templates/rbac/security-context-constraint-rolebinding.yaml"],
)
assert bool(docs) is created
if created:
assert jmespath.search("kind", docs[0]) == "ClusterRoleBinding"
assert jmespath.search("roleRef.kind", docs[0]) == "ClusterRole"
assert expected_name == jmespath.search("metadata.name", docs[0])
assert jmespath.search("roleRef.name", docs[0]) == "system:openshift:scc:anyuid"
@pytest.mark.parametrize(
("rbac_enabled", "scc_enabled", "created"),
[
(True, True, True),
],
)
def test_create_scc_worker_only(self, rbac_enabled, scc_enabled, created):
docs = render_chart(
values={
"multiNamespaceMode": False,
"webserver": {"defaultUser": {"enabled": False}},
"cleanup": {"enabled": False},
"databaseCleanup": {"enabled": False},
"flower": {"enabled": False},
"statsd": {"enabled": False},
"rbac": {"create": rbac_enabled, "createSCCRoleBinding": scc_enabled},
},
show_only=["templates/rbac/security-context-constraint-rolebinding.yaml"],
)
assert bool(docs) is created
if created:
assert jmespath.search("kind", docs[0]) == "RoleBinding"
assert jmespath.search("roleRef.kind", docs[0]) == "ClusterRole"
assert jmespath.search("metadata.name", docs[0]) == "release-name-scc-rolebinding"
assert jmespath.search("roleRef.name", docs[0]) == "system:openshift:scc:anyuid"
assert jmespath.search("subjects[0].name", docs[0]) == "release-name-airflow-webserver"
assert jmespath.search("subjects[1].name", docs[0]) == "release-name-airflow-worker"
assert jmespath.search("subjects[2].name", docs[0]) == "release-name-airflow-scheduler"
assert jmespath.search("subjects[3].name", docs[0]) == "release-name-airflow-api-server"
assert jmespath.search("subjects[4].name", docs[0]) == "release-name-airflow-redis"
assert jmespath.search("subjects[5].name", docs[0]) == "release-name-airflow-triggerer"
assert jmespath.search("subjects[6].name", docs[0]) == "release-name-airflow-migrate-database-job"
assert len(docs[0]["subjects"]) == 7
| TestSCCActivation |
python | ray-project__ray | python/ray/dashboard/modules/reporter/gpu_providers.py | {
"start": 2411,
"end": 13522
} | class ____(GpuProvider):
"""NVIDIA GPU provider using pynvml."""
def __init__(self):
super().__init__()
self._pynvml = None
# Maintain per-GPU sampling timestamps when using process utilization API
self._gpu_process_last_sample_ts: Dict[int, int] = {}
def get_provider_name(self) -> GpuProviderType:
return GpuProviderType.NVIDIA
def is_available(self) -> bool:
"""Check if NVIDIA GPUs are available."""
try:
import ray._private.thirdparty.pynvml as pynvml
pynvml.nvmlInit()
pynvml.nvmlShutdown()
return True
except Exception as e:
logger.debug(f"NVIDIA GPU not available: {e}")
return False
def _initialize(self) -> bool:
"""Initialize the NVIDIA GPU provider."""
if self._initialized:
return True
try:
import ray._private.thirdparty.pynvml as pynvml
self._pynvml = pynvml
self._pynvml.nvmlInit()
self._initialized = True
return True
except Exception as e:
logger.debug(f"Failed to initialize NVIDIA GPU provider: {e}")
return False
def _shutdown(self):
"""Shutdown the NVIDIA GPU provider."""
if self._initialized and self._pynvml:
try:
self._pynvml.nvmlShutdown()
except Exception as e:
logger.debug(f"Error shutting down NVIDIA GPU provider: {e}")
finally:
self._initialized = False
def get_gpu_utilization(self) -> List[GpuUtilizationInfo]:
"""Get GPU utilization information for all NVIDIA GPUs and MIG devices."""
return self._get_pynvml_gpu_usage()
def _get_pynvml_gpu_usage(self) -> List[GpuUtilizationInfo]:
if not self._initialized:
if not self._initialize():
return []
gpu_utilizations = []
try:
num_gpus = self._pynvml.nvmlDeviceGetCount()
for i in range(num_gpus):
gpu_handle = self._pynvml.nvmlDeviceGetHandleByIndex(i)
# Check if MIG mode is enabled on this GPU
try:
mig_mode = self._pynvml.nvmlDeviceGetMigMode(gpu_handle)
if mig_mode[0]: # MIG mode is enabled
# Get MIG device instances
mig_devices = self._get_mig_devices(gpu_handle, i)
gpu_utilizations.extend(mig_devices)
continue
except (self._pynvml.NVMLError, AttributeError):
# MIG not supported or not enabled, continue with regular GPU
pass
# Process regular GPU (non-MIG)
gpu_info = self._get_gpu_info(gpu_handle, i)
if gpu_info:
gpu_utilizations.append(gpu_info)
except Exception as e:
logger.warning(f"Error getting NVIDIA GPU utilization: {e}")
finally:
self._shutdown()
return gpu_utilizations
def _get_mig_devices(self, gpu_handle, gpu_index: int) -> List[GpuUtilizationInfo]:
"""Get MIG device information for a GPU with MIG enabled."""
mig_devices = []
try:
# Get all MIG device instances
mig_count = self._pynvml.nvmlDeviceGetMaxMigDeviceCount(gpu_handle)
for mig_idx in range(mig_count):
try:
# Get MIG device handle
mig_handle = self._pynvml.nvmlDeviceGetMigDeviceHandleByIndex(
gpu_handle, mig_idx
)
# Get MIG device info
mig_info = self._get_mig_device_info(mig_handle, gpu_index, mig_idx)
if mig_info:
mig_devices.append(mig_info)
except self._pynvml.NVMLError:
# MIG device not available at this index
continue
except (self._pynvml.NVMLError, AttributeError) as e:
logger.debug(f"Error getting MIG devices: {e}")
return mig_devices
def _get_mig_device_info(
self, mig_handle, gpu_index: int, mig_index: int
) -> Optional[GpuUtilizationInfo]:
"""Get utilization info for a single MIG device."""
try:
memory_info = self._pynvml.nvmlDeviceGetMemoryInfo(mig_handle)
# Get MIG device utilization
utilization = -1
try:
utilization_info = self._pynvml.nvmlDeviceGetUtilizationRates(
mig_handle
)
utilization = int(utilization_info.gpu)
except self._pynvml.NVMLError as e:
logger.debug(f"Failed to retrieve MIG device utilization: {e}")
# Get running processes on MIG device
processes_pids = {}
try:
nv_comp_processes = self._pynvml.nvmlDeviceGetComputeRunningProcesses(
mig_handle
)
nv_graphics_processes = (
self._pynvml.nvmlDeviceGetGraphicsRunningProcesses(mig_handle)
)
for nv_process in nv_comp_processes + nv_graphics_processes:
processes_pids[int(nv_process.pid)] = ProcessGPUInfo(
pid=int(nv_process.pid),
gpu_memory_usage=(
int(nv_process.usedGpuMemory) // MB
if nv_process.usedGpuMemory
else 0
),
# NOTE: According to nvml, this is not currently available in MIG mode
gpu_utilization=None,
)
except self._pynvml.NVMLError as e:
logger.debug(f"Failed to retrieve MIG device processes: {e}")
# Get MIG device UUID and name
try:
mig_uuid = self._decode(self._pynvml.nvmlDeviceGetUUID(mig_handle))
mig_name = self._decode(self._pynvml.nvmlDeviceGetName(mig_handle))
except self._pynvml.NVMLError:
# Fallback for older drivers
try:
parent_name = self._decode(
self._pynvml.nvmlDeviceGetName(
self._pynvml.nvmlDeviceGetHandleByIndex(gpu_index)
)
)
mig_name = f"{parent_name} MIG {mig_index}"
mig_uuid = f"MIG-GPU-{gpu_index}-{mig_index}"
except Exception:
mig_name = f"NVIDIA MIG Device {gpu_index}.{mig_index}"
mig_uuid = f"MIG-{gpu_index}-{mig_index}"
return GpuUtilizationInfo(
index=gpu_index * 1000 + mig_index, # Unique index for MIG devices
name=mig_name,
uuid=mig_uuid,
utilization_gpu=utilization,
memory_used=int(memory_info.used) // MB,
memory_total=int(memory_info.total) // MB,
processes_pids=processes_pids,
)
except Exception as e:
logger.debug(f"Error getting MIG device info: {e}")
return None
def _get_gpu_info(self, gpu_handle, gpu_index: int) -> Optional[GpuUtilizationInfo]:
"""Get utilization info for a regular (non-MIG) GPU."""
try:
memory_info = self._pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
# Get GPU utilization
utilization = -1
try:
utilization_info = self._pynvml.nvmlDeviceGetUtilizationRates(
gpu_handle
)
utilization = int(utilization_info.gpu)
except self._pynvml.NVMLError as e:
logger.debug(f"Failed to retrieve GPU utilization: {e}")
# Get running processes
processes_pids = {}
try:
# Try to use the newer API first (available in driver version 550+)
current_ts_ms = int(time.time() * 1000)
last_ts_ms = self._gpu_process_last_sample_ts.get(gpu_index, 0)
nv_processes = self._pynvml.nvmlDeviceGetProcessesUtilizationInfo(
gpu_handle, last_ts_ms
)
self._gpu_process_last_sample_ts[gpu_index] = current_ts_ms
for nv_process in nv_processes:
processes_pids[int(nv_process.pid)] = ProcessGPUInfo(
pid=int(nv_process.pid),
gpu_memory_usage=int(nv_process.memUtil)
/ 100
* int(memory_info.total)
// MB,
gpu_utilization=int(nv_process.smUtil),
)
except self._pynvml.NVMLError as e:
logger.debug(
f"Failed to retrieve GPU processes using `nvmlDeviceGetProcessesUtilizationInfo`, fallback to `nvmlDeviceGetComputeRunningProcesses` and `nvmlDeviceGetGraphicsRunningProcesses`: {e}"
)
# Fallback to older API for compatibility with older drivers
try:
nv_comp_processes = (
self._pynvml.nvmlDeviceGetComputeRunningProcesses(gpu_handle)
)
nv_graphics_processes = (
self._pynvml.nvmlDeviceGetGraphicsRunningProcesses(gpu_handle)
)
for nv_process in nv_comp_processes + nv_graphics_processes:
processes_pids[int(nv_process.pid)] = ProcessGPUInfo(
pid=int(nv_process.pid),
gpu_memory_usage=(
int(nv_process.usedGpuMemory) // MB
if nv_process.usedGpuMemory
else 0
),
gpu_utilization=None, # Not available with older API
)
except self._pynvml.NVMLError as fallback_e:
logger.debug(
f"Failed to retrieve GPU processes using `nvmlDeviceGetComputeRunningProcesses` and `nvmlDeviceGetGraphicsRunningProcesses`: {fallback_e}"
)
return GpuUtilizationInfo(
index=gpu_index,
name=self._decode(self._pynvml.nvmlDeviceGetName(gpu_handle)),
uuid=self._decode(self._pynvml.nvmlDeviceGetUUID(gpu_handle)),
utilization_gpu=utilization,
memory_used=int(memory_info.used) // MB,
memory_total=int(memory_info.total) // MB,
processes_pids=processes_pids,
)
except Exception as e:
logger.debug(f"Error getting GPU info: {e}")
return None
| NvidiaGpuProvider |
python | simonw__datasette | datasette/utils/asgi.py | {
"start": 1164,
"end": 1209
} | class ____(Base400):
status = 403
| Forbidden |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 5088,
"end": 5185
} | class ____(type):
def __call__(cls, *args, **kwargs):
"""Meta.__call__"""
| Issue352aMeta |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 3880,
"end": 4399
} | class ____(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps}
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
| SqueezeBertLayerNorm |
python | streamlit__streamlit | lib/tests/streamlit/auth_util_test.py | {
"start": 1556,
"end": 2462
} | class ____(unittest.TestCase):
"""Test auth utils."""
def test_auth_cache(self):
"""Test AuthCache basic functionality."""
cache = AuthCache()
cache.set("key1", "value1", 3600)
assert cache.get("key1") == "value1"
cache.delete("key1")
assert cache.get("key1") is None
@patch(
"streamlit.auth_util.secrets_singleton",
MagicMock(
load_if_toml_exists=MagicMock(return_value=True),
get=MagicMock(return_value=SECRETS_MOCK),
),
)
@patch(
"streamlit.auth_util.config",
MagicMock(
get_option=MagicMock(return_value="CONFIG_COOKIE_SECRET"),
),
)
def test_get_signing_secret(self):
"""Get the cookie signing secret from the configuration or secrets.toml."""
x = get_signing_secret()
assert x == "your_cookie_secret_here"
| AuthUtilTest |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/llm/test_llm_server.py | {
"start": 2157,
"end": 17523
} | class ____:
@pytest.mark.parametrize("api_type", ["chat", "completion"])
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.parametrize("stream_batching_interval_ms", [0, 10000])
@pytest.mark.asyncio
async def test_unified_llm_server(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
stream_batching_interval_ms: int,
):
"""Unified test for both chat and completion APIs, streaming and non-streaming."""
# Create request based on API type
if api_type == "chat":
request = mock_chat_request
batched_chunks = serve_handle.chat.remote(request)
elif api_type == "completion":
request = mock_completion_request
batched_chunks = serve_handle.completions.remote(request)
print(
f"\n\n_____ {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
if stream:
# Collect responses from the stream
chunks = []
async for batch in batched_chunks:
chunks.extend(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response
LLMResponseValidator.validate_streaming_chunks(chunks, api_type, max_tokens)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response
LLMResponseValidator.validate_non_streaming_response(
chunks[0], api_type, max_tokens
)
@pytest.mark.parametrize("dimensions", [None, 512])
@pytest.mark.asyncio
async def test_embedding_llm_server(
self,
serve_handle,
mock_llm_config,
mock_embedding_request,
dimensions: Optional[int],
):
"""Test embedding API from LLMServer perspective."""
# Create embedding request
request = mock_embedding_request
print(f"\n\n_____ EMBEDDING SERVER dimensions={dimensions} _____\n\n")
# Get the response
batched_chunks = serve_handle.embeddings.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate embedding response
LLMResponseValidator.validate_embedding_response(chunks[0], dimensions)
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("temperature", [0.0])
@pytest.mark.parametrize("language", ["en", "hi"])
@pytest.mark.asyncio
async def test_transcription_llm_server(
self,
serve_handle,
mock_llm_config,
mock_transcription_request,
stream: bool,
temperature: float,
language: Optional[str],
):
"""Test transcription API from LLMServer perspective."""
# Create transcription request
request = mock_transcription_request
print(
f"\n\n_____ TRANSCRIPTION SERVER ({'STREAMING' if stream else 'NON-STREAMING'}) language={language} temperature={temperature} _____\n\n"
)
# Get the response
batched_chunks = serve_handle.transcriptions.remote(request)
if stream:
# Collect streaming responses
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response
LLMResponseValidator.validate_transcription_response(
chunks, temperature, language
)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response
LLMResponseValidator.validate_transcription_response(
chunks[0], temperature, language
)
@pytest.mark.asyncio
async def test_score_llm_server(
self,
serve_handle,
mock_llm_config,
mock_score_request,
):
"""Test score API from LLMServer perspective."""
# Create score request
request = mock_score_request
print("\n\n_____ SCORE SERVER _____\n\n")
# Get the response
batched_chunks = serve_handle.score.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate score response
LLMResponseValidator.validate_score_response(chunks[0])
@pytest.mark.asyncio
async def test_check_health(self, mock_llm_config):
"""Test health check functionality."""
# Mock the engine's check_health method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_health_called = False
async def check_health(self):
self.check_health_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Perform the health check, no exceptions should be raised
await server.check_health()
# Check that the health check method was called
assert server.engine.check_health_called
@pytest.mark.asyncio
async def test_reset_prefix_cache(self, mock_llm_config):
"""Test reset prefix cache functionality."""
# Mock the engine's reset_prefix_cache method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_prefix_cache_called = False
async def reset_prefix_cache(self):
self.reset_prefix_cache_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Reset prefix cache, no exceptions should be raised
await server.reset_prefix_cache()
# Check that the reset prefix cache method was called
assert server.engine.reset_prefix_cache_called
@pytest.mark.asyncio
async def test_start_profile(self, mock_llm_config):
"""Test start profile functionality."""
# Mock the engine's start_profile method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_profile_called = False
async def start_profile(self):
self.start_profile_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Start profile, no exceptions should be raised
await server.start_profile()
# Check that the start profile method was called
assert server.engine.start_profile_called
@pytest.mark.asyncio
async def test_stop_profile(self, mock_llm_config):
"""Test stop profile functionality."""
# Mock the engine's stop_profile method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stop_profile_called = False
async def stop_profile(self):
self.stop_profile_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Stop profile, no exceptions should be raised
await server.stop_profile()
# Check that the stop profile method was called
assert server.engine.stop_profile_called
@pytest.mark.asyncio
async def test_llm_config_property(self, mock_llm_config):
"""Test the llm_config property."""
server = LLMServer.sync_init(mock_llm_config, engine_cls=MockVLLMEngine)
await server.start()
llm_config = await server.llm_config()
assert isinstance(llm_config, type(mock_llm_config))
@pytest.mark.parametrize("stream", [False])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.asyncio
async def test_request_id_handling(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
stream: bool,
max_tokens: int,
):
"""Test that the request id is handled correctly."""
# Create a chat completion request
# We should patch get_server_request_id to return a test_request_id
serve.context._serve_request_context.set(
serve.context._RequestContext(**{"request_id": "test_request_id"})
)
# Get the response
chunks = []
async for chunk in serve_handle.chat.remote(mock_chat_request):
chunks.append(chunk)
assert len(chunks) == 1
assert chunks[0].id == "test_request_id"
@pytest.mark.parametrize("api_type", ["chat", "completion"])
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.parametrize("stream_batching_interval_ms", [0, 10000])
@pytest.mark.asyncio
async def test_multiplexed_request_handling(
self,
multiplexed_serve_handle,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
stream_batching_interval_ms: int,
):
"""Unified test for multiplexed (LoRA) requests - both chat and completion APIs, streaming and non-streaming."""
# Create request based on API type and set model ID for multiplexing
if api_type == "chat":
request = mock_chat_request
batched_chunks = multiplexed_serve_handle.chat.remote(request)
elif api_type == "completion":
request = mock_completion_request
batched_chunks = multiplexed_serve_handle.completions.remote(request)
request.model = "test_model_id"
print(
f"\n\n_____ MULTIPLEXED {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
if stream:
# Collect responses from the stream
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response with LoRA model ID
LLMResponseValidator.validate_streaming_chunks(
chunks, api_type, max_tokens, lora_model_id=request.model
)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response with LoRA model ID
LLMResponseValidator.validate_non_streaming_response(
chunks[0], api_type, max_tokens, lora_model_id=request.model
)
@pytest.mark.asyncio
async def test_push_telemetry(self, mock_llm_config):
"""Test that the telemetry push is called properly."""
with patch(
"ray.llm._internal.serve.core.server.llm_server.push_telemetry_report_for_all_models"
) as mock_push_telemetry:
server = LLMServer.sync_init(mock_llm_config, engine_cls=MockVLLMEngine)
await server.start()
mock_push_telemetry.assert_called_once()
@pytest.mark.parametrize("api_type", ["chat", "completions"])
@pytest.mark.parametrize("stream", [True])
@pytest.mark.parametrize("max_tokens", [64])
@pytest.mark.parametrize("concurrency", [1, 16])
@pytest.mark.parametrize("stream_batching_interval_ms", [0])
@pytest.mark.asyncio
async def test_stable_streaming_tpot(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
concurrency: int,
stream_batching_interval_ms: int,
):
"""Test that the streaming TPOT is stable when batching is disabled."""
# Create request based on API type
if api_type == "chat":
request = mock_chat_request
elif api_type == "completions":
request = mock_completion_request
batched_chunks: list[AsyncGenerator] = [
getattr(serve_handle, api_type).remote(request) for _ in range(concurrency)
]
print(
f"\n\n_____ {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
# Collect responses from llm_server
tpots_ms = await asyncio.gather(
*[
count_tpot_ms_from_stream(server_stream)
for server_stream in batched_chunks
]
)
mean_llm_server = np.mean(tpots_ms)
std_var_llm_server = np.std(tpots_ms)
# Run same request with vllm engine
vllm_engine = MockVLLMEngine(llm_config=mock_llm_config)
await vllm_engine.start()
engine_streams: list[AsyncGenerator] = [
getattr(vllm_engine, api_type)(request) for _ in range(concurrency)
]
tpots_ms_engine = await asyncio.gather(
*[
count_tpot_ms_from_stream(engine_stream)
for engine_stream in engine_streams
]
)
mean_engine = np.mean(tpots_ms_engine)
std_var_engine = np.std(tpots_ms_engine)
assert np.isclose(
mean_llm_server, mean_engine, rtol=0.1
), f"{mean_llm_server=}, {mean_engine=}"
assert np.isclose(
std_var_llm_server, std_var_engine, atol=1.0
), f"{std_var_llm_server=}, {std_var_engine=}"
| TestLLMServer |
python | tensorflow__tensorflow | tensorflow/python/framework/constant_op.py | {
"start": 14701,
"end": 15906
} | class ____:
"""Codec for Tensor."""
def can_encode(self, pyobj):
return isinstance(pyobj, tensor_lib.Tensor)
def do_encode(self, tensor_value, encode_fn):
"""Returns an encoded `TensorProto` for the given `tf.Tensor`."""
del encode_fn
encoded_tensor = struct_pb2.StructuredValue()
if isinstance(tensor_value, ops.EagerTensor):
encoded_tensor.tensor_value.CopyFrom(
tensor_util.make_tensor_proto(tensor_value.numpy())
)
else:
if tensor_value.op.type == "Const":
encoded_tensor.tensor_value.CopyFrom(tensor_value.op.get_attr("value"))
else:
raise nested_structure_coder.NotEncodableError(
f"No encoder for object {str(tensor_value)} of type"
f" {type(tensor_value)}."
)
return encoded_tensor
def can_decode(self, value):
return value.HasField("tensor_value")
def do_decode(self, value, decode_fn):
"""Returns the `tf.Tensor` encoded by the proto `value`."""
del decode_fn
tensor_proto = value.tensor_value
tensor = constant(tensor_util.MakeNdarray(tensor_proto))
return tensor
nested_structure_coder.register_codec(_ConstantTensorCodec())
| _ConstantTensorCodec |
python | realpython__materials | python-guitar-synthesizer/source_code_final/demo/play_diablo.py | {
"start": 663,
"end": 772
} | class ____:
BEAT = Time(seconds=60 / BEATS_PER_MINUTE)
MEASURE = BEAT * BEATS_PER_MEASURE
| MeasureTiming |
python | sqlalchemy__sqlalchemy | test/orm/test_versioning.py | {
"start": 52098,
"end": 54326
} | class ____(fixtures.MappedTest):
run_define_tables = "each"
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("vid", Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(
cls.classes.A,
cls.tables.a,
version_id_col=cls.tables.a.c.vid,
version_id_generator=False,
)
def test_insert(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
sess.add(a1)
sess.commit()
eq_(a1.vid, 1)
def test_update(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
a1.vid = 2
a1.data = "d2"
with conditional_sane_rowcount_warnings(update=True):
sess.commit()
eq_(a1.vid, 2)
@testing.requires.sane_rowcount
def test_update_concurrent_check(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
a1.vid = 2
sess.execute(self.tables.a.update().values(vid=3))
a1.data = "d2"
assert_raises(orm_exc.StaleDataError, sess.commit)
def test_update_version_conditional(self):
sess = fixture_session()
a1 = self.classes.A()
a1.vid = 1
a1.data = "d1"
sess.add(a1)
sess.commit()
# change the data and UPDATE without
# incrementing version id
a1.data = "d2"
with conditional_sane_rowcount_warnings(update=True):
sess.commit()
eq_(a1.vid, 1)
a1.data = "d3"
a1.vid = 2
with conditional_sane_rowcount_warnings(update=True):
sess.commit()
eq_(a1.vid, 2)
| ManualVersionTest |
python | great-expectations__great_expectations | tests/integration/conftest.py | {
"start": 10614,
"end": 14000
} | class ____:
comparison: DataSourceTestConfig
base: DataSourceTestConfig
def multi_source_batch_setup(
multi_source_test_configs: list[MultiSourceTestConfig],
base_data: pd.DataFrame,
comparison_data: pd.DataFrame,
) -> Callable[[_F], _F]:
def decorator(func: _F) -> _F:
pytest_params = []
for multi_source_test_config in multi_source_test_configs:
pytest_params.append(
pytest.param(
TestConfig(
data_source_config=multi_source_test_config.base,
data=base_data,
extra_data={},
secondary_source_config=multi_source_test_config.comparison,
secondary_data=comparison_data,
),
id=f"{multi_source_test_config.comparison.test_id}->{multi_source_test_config.base.test_id}",
marks=_get_multi_source_marks(multi_source_test_config),
)
)
parameterize_decorator = pytest.mark.parametrize(
_batch_setup_for_datasource.__name__,
pytest_params,
indirect=True,
)
return parameterize_decorator(func)
return decorator
def _get_multi_source_marks(multi_source_test_config: MultiSourceTestConfig) -> list[MarkDecorator]:
if multi_source_test_config.base.pytest_mark == multi_source_test_config.comparison.pytest_mark:
return [multi_source_test_config.base.pytest_mark]
# our test setup restricts us to testing a single backend at a time.
# sqlite doesn't require any extra setup, so it's an exception.
marks = [
mark
for mark in [
multi_source_test_config.comparison.pytest_mark,
multi_source_test_config.base.pytest_mark,
]
if mark != pytest.mark.sqlite
]
if len(marks) == 1:
return marks
elif not marks:
return [pytest.mark.sqlite]
else:
raise ValueError(
"MultiSourceBatch tests must either use the same backend or include sqlite."
)
@pytest.fixture(scope="session")
def session_sql_engine_manager():
logger.info("SessionSqlEngineManager: Starting setup.")
manager = SessionSQLEngineManager()
yield manager
logger.info("SessionSqlEngineManager: Starting teardown.")
pre_cleanup_stats = manager.get_all_pool_statistics()
# We temporarily log a warning so we can see this in the pytest output without turning on info
# logging across the whole test run.
logger.warning(
"SessionSqlEngineManager: Pool statistics before explicit cleanup:\n"
f"{pprint.pformat(pre_cleanup_stats)}"
)
# Check for any immediately obvious issues before cleanup
for key, stat in pre_cleanup_stats.items():
if "error" not in stat and stat.get("checked_out", 0) > 0:
logger.warning(
f"SessionSqlEngineManager: Engine {key} has {stat['checked_out']} connections "
"still checked out BEFORE manager disposal."
)
manager.dispose_all_engines()
logger.info("SessionSqlEngineManager: All engines disposed by manager.")
assert not manager._engine_cache, "Engine cache should be empty after dispose_all_engines."
logger.info("SessionSqlEngineManager: Teardown complete.")
| MultiSourceTestConfig |
python | dagster-io__dagster | python_modules/libraries/dagster-celery/dagster_celery/config.py | {
"start": 375,
"end": 646
} | class ____:
"""Wraps a dict to convert `obj['attr']` to `obj.attr`."""
def __init__(self, dictionary):
self.__dict__ = dictionary
TASK_EXECUTE_PLAN_NAME = "execute_plan"
TASK_EXECUTE_JOB_NAME = "execute_job"
TASK_RESUME_JOB_NAME = "resume_job"
| dict_wrapper |
python | scipy__scipy | scipy/linalg/_matfuncs_inv_ssq.py | {
"start": 681,
"end": 820
} | class ____(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
| FractionalMatrixPowerError |
python | ray-project__ray | rllib/core/learner/tests/test_learner.py | {
"start": 523,
"end": 9363
} | class ____(unittest.TestCase):
ENV = gym.make("CartPole-v1")
@classmethod
def setUp(cls) -> None:
ray.init()
@classmethod
def tearDown(cls) -> None:
ray.shutdown()
def test_end_to_end_update(self):
config = BaseTestingAlgorithmConfig()
learner = config.build_learner(env=self.ENV)
reader = get_cartpole_dataset_reader(batch_size=512)
min_loss = float("inf")
for iter_i in range(1000):
batch = reader.next().as_multi_agent()
batch = learner._convert_batch_type(batch)
results = learner.update(batch=batch)
loss = results[DEFAULT_MODULE_ID][Learner.TOTAL_LOSS_KEY].peek()
min_loss = min(loss, min_loss)
print(f"[iter = {iter_i}] Loss: {loss:.3f}, Min Loss: {min_loss:.3f}")
self.assertLess(min_loss, 0.58)
def test_compute_gradients(self):
"""Tests the compute_gradients correctness.
Tests that if we sum all the trainable variables the gradient of output w.r.t.
the weights is all ones.
"""
config = BaseTestingAlgorithmConfig()
learner = config.build_learner(env=self.ENV)
params = learner.get_parameters(learner.module[DEFAULT_MODULE_ID])
tape = None
loss_per_module = {ALL_MODULES: sum(param.sum() for param in params)}
gradients = learner.compute_gradients(loss_per_module, gradient_tape=tape)
# Type should be a mapping from ParamRefs to gradients.
self.assertIsInstance(gradients, dict)
for grad in gradients.values():
check(grad, np.ones(grad.shape))
def test_postprocess_gradients(self):
"""Tests the base grad clipping logic in `postprocess_gradients()`."""
# Clip by value only.
config = BaseTestingAlgorithmConfig().training(
lr=0.0003, grad_clip=0.75, grad_clip_by="value"
)
learner = config.build_learner(env=self.ENV)
# Pretend our computed gradients are our weights + 1.0.
grads = {
learner.get_param_ref(v): v + 1.0
for v in learner.get_parameters(learner.module[DEFAULT_MODULE_ID])
}
# Call the learner's postprocessing method.
processed_grads = list(learner.postprocess_gradients(grads).values())
# Check clipped gradients.
# No single gradient must be larger than 0.1 or smaller than -0.1:
self.assertTrue(
all(
np.max(grad) <= config.grad_clip and np.min(grad) >= -config.grad_clip
for grad in convert_to_numpy(processed_grads)
)
)
# Clip by norm.
config.grad_clip = 1.0
config.grad_clip_by = "norm"
learner = config.build_learner(env=self.ENV)
# Pretend our computed gradients are our weights + 1.0.
grads = {
learner.get_param_ref(v): v + 1.0
for v in learner.get_parameters(learner.module[DEFAULT_MODULE_ID])
}
# Call the learner's postprocessing method.
processed_grads = list(learner.postprocess_gradients(grads).values())
# Check clipped gradients.
for proc_grad, grad in zip(
convert_to_numpy(processed_grads),
convert_to_numpy(list(grads.values())),
):
l2_norm = np.sqrt(np.sum(grad**2.0))
if l2_norm > config.grad_clip:
check(proc_grad, grad * (config.grad_clip / l2_norm))
# Clip by global norm.
config.grad_clip = 5.0
config.grad_clip_by = "global_norm"
learner = config.build_learner(env=self.ENV)
# Pretend our computed gradients are our weights + 1.0.
grads = {
learner.get_param_ref(v): v + 1.0
for v in learner.get_parameters(learner.module[DEFAULT_MODULE_ID])
}
# Call the learner's postprocessing method.
processed_grads = list(learner.postprocess_gradients(grads).values())
# Check clipped gradients.
global_norm = np.sqrt(
np.sum(
np.sum(grad**2.0) for grad in convert_to_numpy(list(grads.values()))
)
)
if global_norm > config.grad_clip:
for proc_grad, grad in zip(
convert_to_numpy(processed_grads),
grads.values(),
):
check(proc_grad, grad * (config.grad_clip / global_norm))
def test_apply_gradients(self):
"""Tests the apply_gradients correctness.
Tests that if we apply gradients of all ones, the new params are equal to the
standard SGD/Adam update rule.
"""
config = BaseTestingAlgorithmConfig().training(lr=0.0003)
learner = config.build_learner(env=self.ENV)
# calculated the expected new params based on gradients of all ones.
params = learner.get_parameters(learner.module[DEFAULT_MODULE_ID])
n_steps = 100
expected = [
(
convert_to_numpy(param)
- n_steps * learner.config.lr * np.ones(param.shape)
)
for param in params
]
for _ in range(n_steps):
gradients = {learner.get_param_ref(p): torch.ones_like(p) for p in params}
learner.apply_gradients(gradients)
check(params, expected)
def test_add_remove_module(self):
"""Tests the compute/apply_gradients with add/remove modules.
Tests that if we add a module with SGD optimizer with a known lr (different
from default), and remove the default module, with a loss that is the sum of
all variables the updated parameters follow the SGD update rule.
"""
config = BaseTestingAlgorithmConfig().training(lr=0.0003)
learner = config.build_learner(env=self.ENV)
rl_module_spec = config.get_default_rl_module_spec()
rl_module_spec.observation_space = self.ENV.observation_space
rl_module_spec.action_space = self.ENV.action_space
learner.add_module(
module_id="test",
module_spec=rl_module_spec,
)
learner.remove_module(DEFAULT_MODULE_ID)
# only test module should be left
self.assertEqual(set(learner.module.keys()), {"test"})
# calculated the expected new params based on gradients of all ones.
params = learner.get_parameters(learner.module["test"])
n_steps = 100
expected = [
convert_to_numpy(param) - n_steps * learner.config.lr * np.ones(param.shape)
for param in params
]
for _ in range(n_steps):
tape = None
loss_per_module = {ALL_MODULES: sum(param.sum() for param in params)}
gradients = learner.compute_gradients(loss_per_module, gradient_tape=tape)
learner.apply_gradients(gradients)
check(params, expected)
def test_save_to_path_and_restore_from_path(self):
"""Tests, whether a Learner's state is properly saved and restored."""
config = BaseTestingAlgorithmConfig()
# Get a Learner instance for the framework and env.
learner1 = config.build_learner(env=self.ENV)
with tempfile.TemporaryDirectory() as tmpdir:
learner1.save_to_path(tmpdir)
learner2 = config.build_learner(env=self.ENV)
learner2.restore_from_path(tmpdir)
self._check_learner_states("torch", learner1, learner2)
# Add a module then save/load and check states.
with tempfile.TemporaryDirectory() as tmpdir:
rl_module_spec = config.get_default_rl_module_spec()
rl_module_spec.observation_space = self.ENV.observation_space
rl_module_spec.action_space = self.ENV.action_space
learner1.add_module(
module_id="test",
module_spec=rl_module_spec,
)
learner1.save_to_path(tmpdir)
learner2 = Learner.from_checkpoint(tmpdir)
self._check_learner_states("torch", learner1, learner2)
# Remove a module then save/load and check states.
with tempfile.TemporaryDirectory() as tmpdir:
learner1.remove_module(module_id=DEFAULT_MODULE_ID)
learner1.save_to_path(tmpdir)
learner2 = Learner.from_checkpoint(tmpdir)
self._check_learner_states("torch", learner1, learner2)
def _check_learner_states(self, framework, learner1, learner2):
check(learner1.module.get_state(), learner2.module.get_state())
check(learner1._get_optimizer_state(), learner2._get_optimizer_state())
check(learner1._module_optimizers, learner2._module_optimizers)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestLearner |
python | doocs__leetcode | solution/0600-0699/0689.Maximum Sum of 3 Non-Overlapping Subarrays/Solution.py | {
"start": 0,
"end": 828
} | class ____:
def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:
s = s1 = s2 = s3 = 0
mx1 = mx12 = 0
idx1, idx12 = 0, ()
ans = []
for i in range(k * 2, len(nums)):
s1 += nums[i - k * 2]
s2 += nums[i - k]
s3 += nums[i]
if i >= k * 3 - 1:
if s1 > mx1:
mx1 = s1
idx1 = i - k * 3 + 1
if mx1 + s2 > mx12:
mx12 = mx1 + s2
idx12 = (idx1, i - k * 2 + 1)
if mx12 + s3 > s:
s = mx12 + s3
ans = [*idx12, i - k + 1]
s1 -= nums[i - k * 3 + 1]
s2 -= nums[i - k * 2 + 1]
s3 -= nums[i - k + 1]
return ans
| Solution |
python | Textualize__textual | src/textual/_on.py | {
"start": 287,
"end": 442
} | class ____(Exception):
"""Errors related to the `on` decorator.
Typically raised at import time as an early warning system.
"""
| OnDecoratorError |
python | coleifer__peewee | tests/migrations.py | {
"start": 912,
"end": 1077
} | class ____(TestModel):
id = CharField(primary_key=True, max_length=20)
password = CharField(default='secret')
class Meta:
table_name = 'users'
| User |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 28194,
"end": 28450
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
members = [
('parent', fe_type.array_type),
]
super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
@register_default(types.NestedArray)
| ArrayFlagsModel |
python | doocs__leetcode | solution/2700-2799/2779.Maximum Beauty of an Array After Applying Operation/Solution.py | {
"start": 0,
"end": 244
} | class ____:
def maximumBeauty(self, nums: List[int], k: int) -> int:
m = max(nums) + k * 2 + 2
d = [0] * m
for x in nums:
d[x] += 1
d[x + k * 2 + 1] -= 1
return max(accumulate(d))
| Solution |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/range_formatting/regressions.py | {
"start": 0,
"end": 340
} | class ____:
event_name: ClassVar[str]
@staticmethod
def cls_for(event_name: str) -> type[Event]:
event_cls = _CONCRETE_EVENT_CLASSES.get(event_name)
if event_cls is not <RANGE_START>None:
return event_cls<RANGE_END>
else:
raise ValueError(f"unknown event name '{event_name}'")
| Event |
python | kamyu104__LeetCode-Solutions | Python/smallest-index-with-equal-value.py | {
"start": 29,
"end": 232
} | class ____(object):
def smallestEqual(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return next((i for i, x in enumerate(nums) if i%10 == x), -1)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 968,
"end": 4826
} | class ____(HttpStream, ABC):
primary_key = None
additional_fields = ()
maximum_rows = 1_000_000
transformer: TypeTransformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization)
def __init__(
self, app_id: str, api_token: str, timezone: str, start_date: Union[date, str] = None, end_date: Union[date, str] = None, **kwargs
):
super().__init__(**kwargs)
self.app_id = app_id
self.api_token = api_token
self.start_date = start_date
self.end_date = end_date
self.timezone = pendulum.timezone(timezone)
@property
def url_base(self) -> str:
return "https://hq1.appsflyer.com/api/"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {
"from": pendulum.yesterday(self.timezone).to_date_string(),
"to": pendulum.today(self.timezone).to_date_string(),
"timezone": self.timezone.name,
"maximum_rows": self.maximum_rows,
}
if self.additional_fields:
additional_fields = ",".join(self.additional_fields)
params["additional_fields"] = additional_fields
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
csv_data = map(lambda x: x.decode("utf-8"), response.iter_lines())
reader = csv.DictReader(csv_data)
known_keys = mapper.field_map.keys()
for record in reader:
yield {mapper.field_map[k]: v for k, v in record.items() if k in known_keys}
def is_aggregate_reports_reached_limit(self, response: requests.Response) -> bool:
template = "Limit reached for "
is_forbidden = response.status_code == HTTPStatus.FORBIDDEN
is_template_match = template in response.text
return is_forbidden and is_template_match
def is_raw_data_reports_reached_limit(self, response: requests.Response) -> bool:
template = "Your API calls limit has been reached for report type"
is_bad_request = response.status_code == HTTPStatus.BAD_REQUEST
is_template_match = template in response.text
return is_bad_request and is_template_match
def should_retry(self, response: requests.Response) -> bool:
is_aggregate_reports_reached_limit = self.is_aggregate_reports_reached_limit(response)
is_raw_data_reports_reached_limit = self.is_raw_data_reports_reached_limit(response)
is_rejected = is_aggregate_reports_reached_limit or is_raw_data_reports_reached_limit
return is_rejected or super().should_retry(response)
def backoff_time(self, response: requests.Response) -> Optional[float]:
if self.is_raw_data_reports_reached_limit(response):
now = pendulum.now("UTC")
midnight = pendulum.tomorrow("UTC")
wait_time = (midnight - now).seconds
elif self.is_aggregate_reports_reached_limit(response):
wait_time = 60
else:
return super().backoff_time(response)
logging.getLogger("airbyte").log(logging.INFO, f"Rate limit exceeded. Retry in {wait_time} seconds.")
return wait_time
@transformer.registerCustomTransform
def transform_function(original_value: Any, field_schema: Dict[str, Any]) -> Any:
if original_value == "" or original_value == "N/A" or original_value == "NULL":
return None
if isinstance(original_value, float):
return Decimal(original_value)
return original_value
# Basic incremental stream
| AppsflyerStream |
python | django__django | django/db/models/lookups.py | {
"start": 26887,
"end": 26996
} | class ____(YearLookup, LessThan):
def get_bound_params(self, start, finish):
return (start,)
| YearLt |
python | pytorch__pytorch | test/inductor/test_kernel_benchmark.py | {
"start": 676,
"end": 17838
} | class ____(TestCase):
device_type = GPU_TYPE
# to make sure the subprocess runs on the exact same path as the parent process
# we augment the PYTHONPATH env var
python_path = ""
@classmethod
def setUpClass(cls):
cls.exit_stack = contextlib.ExitStack()
cls.exit_stack.enter_context(patch.object(config, "benchmark_kernel", True))
# setup the augmented PYTHONPATH to pass to the subprocess calls
augmented_pp = ":".join(sys.path)
if os.environ.get("PYTHONPATH"):
augmented_pp = f"{os.environ.get('PYTHONPATH')}:{augmented_pp}"
cls.python_path = augmented_pp
@classmethod
def tearDownClass(cls):
cls.exit_stack.close()
def setUp(self):
super().setUp()
PyCodeCache.cache_clear()
def get_compiled_module(self):
compiled_module = None
for v in PyCodeCache.modules:
if hasattr(v, "benchmark_compiled_module"):
self.assertTrue(
compiled_module is None, "Found multiple compiled modules"
)
compiled_module = v
self.assertTrue(compiled_module is not None)
return compiled_module
def verify_compiled_kernels(self, GB_count=1):
compiled_module = self.get_compiled_module()
# now run the compiled module in subprocess and check its output
try:
bench_out = subprocess.check_output(
f"{sys.executable} {compiled_module.__file__} -kc".split(),
stderr=subprocess.STDOUT,
env={**os.environ, "PYTHONPATH": self.python_path},
).decode()
except subprocess.CalledProcessError as e:
print("Failed when running output code", e)
print(e.output.decode())
raise e
# make sure we have the bandwidth information in the output
FileCheck().check_count(
"GB/s",
GB_count,
exactly=1,
).run(bench_out)
def verify_remove_inductor_deps(self, compiled_module):
try:
out = subprocess.check_output(
f"{sys.executable} {compiled_module.__file__}".split(),
env={
**os.environ.copy(),
"TORCHINDUCTOR_DUMP_LAUNCH_PARAMS": "1",
"PYTHONPATH": self.python_path,
},
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
print(
"Failed when runinng triton code with TORCHINDUCTOR_DUMP_LAUNCH_PARAMS=1",
e,
)
print(e.output.decode())
raise e
from torch.utils._get_clean_triton import get_clean_triton
cleaned_triton = get_clean_triton(
compiled_module.__file__, f"{compiled_module.__file__}.cleaned"
)
self.assertTrue("@triton_heuristics" not in cleaned_triton)
self.assertTrue(".run(" not in cleaned_triton)
try:
out = subprocess.check_output(
f"{sys.executable} {compiled_module.__file__}.cleaned".split(),
stderr=subprocess.STDOUT,
env={**os.environ, "PYTHONPATH": self.python_path},
)
except subprocess.CalledProcessError as e:
print("Failed when when running cleaned triton", e)
print(e.output.decode())
print(cleaned_triton)
raise e
return cleaned_triton
def check_bandwidth(self, compiled_module, num_gb):
# now run the compiled module in subprocess and check its output
try:
bench_out = subprocess.check_output(
f"{sys.executable} {compiled_module.__file__} -k".split(),
stderr=subprocess.STDOUT,
env={**os.environ, "PYTHONPATH": self.python_path},
).decode()
except subprocess.CalledProcessError as e:
print("Failed when running output code", e)
print(e.output.decode())
raise e
# make sure we have the bandwidth information in the output
FileCheck().check_count(
f"{num_gb} GB ",
1,
exactly=1,
).run(bench_out)
def test_pw_kernel_benchmark(self):
@torch.compile
def f(x):
return torch.sin(x) + torch.cos(x)
inp = torch.rand(2, 3).to(device=GPU_TYPE)
out = f(inp)
self.verify_compiled_kernels()
# TODO: Currently the Triton mm template + relu fusion causes slowdown on XPU,
# Need to refine the template and config for XPU.
@config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@fresh_cache()
def test_matmul_triton_kernel_benchmark(self):
M = 12544
N = 256
K = 64
a = torch.rand(M, K, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(N, K, dtype=torch.float16, device=GPU_TYPE).t()
@torch.compile
def f(a, b):
return torch.relu(a @ b)
f(a, b)
self.verify_compiled_kernels()
@config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", shape_padding=False
)
@fresh_cache()
def test_mm_triton_kernel_benchmark(self):
M = 2048
N = 2432
K = 1949
K_2 = 3581
a = rand_strided((M, K_2), (K_2, 1), device=GPU_TYPE, dtype=torch.float16)
b = rand_strided((K, N), (1, K), device=GPU_TYPE, dtype=torch.float16)
@torch.compile
def f(a, b):
a_1 = torch.narrow(a, 1, 0, K)
c = torch.mm(a_1, b)
return c
f(a, b)
self.verify_compiled_kernels(GB_count=1)
def test_matmul_bandwidth_computation(self):
"""
The test does a matmul and then mul. Without max-autotune, we use
the matmul in aten. So there is a single triton kernel for mul.
The kernel we generated is like:
@triton.jit
def triton_(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
Note the in_out_ptr0 argument. It's for a 1000x1000 tensor, but it's
inplace updated, so when computing the bandwidth, we should count
the total memory access as 2 * 1000 * 1000 * 4 = 8MB. This amount is
what this test asserts.
"""
torch.set_float32_matmul_precision("high") # suggested by a warning
@torch.compile
def f(x, y):
z = x @ y
w = z * z
return w
M, N, K = 1000, 1000, 10
x = torch.rand(M, K).to(device=GPU_TYPE)
y = torch.rand(K, N).to(device=GPU_TYPE)
out = f(x, y)
compiled_module = self.get_compiled_module()
self.check_bandwidth(compiled_module, 0.008)
def test_unused_input_bandwidth_computation(self):
M, N = 5, 1000000
@torch.compile
def f(a, b, c):
return a + c
a = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
torch._dynamo.mark_dynamic(c, 0)
inputs = (a, b, c)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# num_gb = size_a + size_c + size_out
# num_gb = (5 * 1000000 + 5 * 1000000 + 5 * 1000000) * 2 / 1e9
# = 0.030
self.check_bandwidth(compiled_module, "0.030")
def test_reduction_bandwidth_computation(self):
@torch.compile
def f(a):
return torch.sum(a, dim=1)
a = torch.rand(1000, 20, 1000, dtype=torch.float16, device=GPU_TYPE)
inputs = (a,)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# num_gb = size_a + size_out
# num_gb = (1000 * 20 * 1000 + 1000 * 1000) * 2 / 1e9
# = 0.042
self.check_bandwidth(compiled_module, "0.042")
@config.patch(max_autotune=True)
def test_fused_layernorm_bandwidth_computation(self):
M, N = 10, 1000000
@torch.compile
def f(a, b, c, d):
x0 = a + b
x1 = torch.nn.functional.layer_norm(
x0, normalized_shape=(N,), weight=c, bias=d, eps=1e-05
)
x2 = torch.sigmoid(x1)
return x0 * x2
a = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(N, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(N, dtype=torch.float16, device=GPU_TYPE)
d = torch.rand(N, dtype=torch.float16, device=GPU_TYPE)
inputs = (a, b, c, d)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# num_gb = size_a + size_b + size_c + size_d + size_out
# num_gb = (10 * 1000000 + 1000000 + 1000000 + 1000000 + 10 * 1000000) * 2 / 1e9
# = 0.046
self.check_bandwidth(compiled_module, "0.046")
def test_slice_add_cat_bandwidth_computation(self):
M, N = 5, 1000000
@torch.compile
def f(a, b, c):
x0 = torch.narrow(b, 1, N, N)
# broadcasting
x1 = x0 + c
return torch.cat([a, x1], dim=1)
a = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(M, N * 5, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(N, dtype=torch.float16, device=GPU_TYPE)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
inputs = (a, b, c)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# we overestimate the size of "slice_b" due to torch.cat
# num_gp = size_a + size_slice_b + size_c + size_out
# num_gb = (5 * 1000000 + 5 * 2000000 + 1000000 + 5 * 2000000) * 2 / 1e9
# = 0.052
self.check_bandwidth(compiled_module, "0.052")
def test_slice_add_bandwidth_computation(self):
M, N = 5, 1000000
@torch.compile
def f(a, b, c):
x0 = torch.narrow(b, 1, N, N)
return a + x0 + c
a = torch.rand(M, N, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(M, N * 5, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(N, dtype=torch.float16, device=GPU_TYPE)
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 0)
inputs = (a, b, c)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# num_gb = size_a + size_slice_b + size_c + out_size
# num_gb = (5 * 1000000 + 5 * 1000000 + 1000000 + 5 * 1000000) * 2 / 1e9
# = 0.032
self.check_bandwidth(compiled_module, "0.032")
def test_mm_slice_add_bandwidth_computation(self):
M, N, K = 1000, 1000, 30
@torch.compile
def f(a, b, c):
x0 = torch.mm(a, b)
x1 = torch.narrow(c, 1, 20 * N, N)
x2 = torch.narrow(c, 1, 21 * N, N)
return x0 + x1 + x2
a = torch.rand(M, K, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(K, N, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(N, N * 100, dtype=torch.float16, device=GPU_TYPE)
inputs = (a, b, c)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# torch.mm becomes an extern kernel, so we measure the nbytes
# for the pointwise add kernel:
# num_gb = x0 + 2 * size_slice_c + size_out
# num_gb = (1000 * 1000 + 2 * 1000 * 1000 + 1000 * 1000) * 2/ 1e9
# = 0.008
num_gb = "0.008"
self.check_bandwidth(compiled_module, num_gb)
def test_mm_slice_add_bandwidth_computation_2(self):
M, N, K = 1000, 1000, 30
@torch.compile
def f(a, b, c):
x0 = torch.mm(a, b)
x1 = torch.narrow(c, 1, 20 * N, N)
x2 = torch.narrow(c, 1, 20 * N, N)
return x0 + x1 + x2
a = torch.rand(M, K, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(K, N, dtype=torch.float16, device=GPU_TYPE)
c = torch.rand(N, N * 100, dtype=torch.float16, device=GPU_TYPE)
inputs = (a, b, c)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# torch.mm becomes an extern kernel, so we measure the nbytes
# for the pointwise add kernel:
# num_gb = x0 + size_slice_c + size_out
# num_gb = (1000 * 1000 + 1000 * 1000 + 1000 * 1000) * 2 / 1e9
# = 0.006
# note that we only count one size_slice_c because two accesses
# have the same index.
self.check_bandwidth(compiled_module, "0.006")
@xfailIfSM89
@config.patch(
max_autotune=True, max_autotune_gemm_backends="TRITON", force_shape_pad=True
)
def test_slice_mm_bandwidth_computation(self):
if GPU_TYPE == "xpu" and not torch._inductor.utils.is_big_gpu():
raise unittest.SkipTest("unsupported device")
M, N, K = 1000, 2000, 3000
@torch.compile
def f(a, b):
x = torch.narrow(a, 1, K, K)
return torch.mm(x, b)
a = torch.rand(M, 3 * K, dtype=torch.float16, device=GPU_TYPE)
b = torch.rand(K, N, dtype=torch.float16, device=GPU_TYPE)
torch._dynamo.mark_dynamic(a, 0)
inputs = (a, b)
out = f(*inputs)
compiled_module = self.get_compiled_module()
# c[1000, 2000] = x[1000, 3000] @ b[3000, 2000]
# num_gb = (1000 * 2000 + 1000 * 3000 + 3000 * 2000) * 2 / 1e9
# = 0.022
self.check_bandwidth(compiled_module, "0.022")
def test_star_dep(self):
"""
Test the bandwidth estimation for StarDep
"""
@torch.compile
def f(a, b):
a[b] = 3.0
a = torch.rand(10000, 5000, device=GPU_TYPE)
b = torch.randint(
0, 10000, [20000], device=GPU_TYPE, dtype=torch.int32
).unsqueeze(1)
f(a, b)
compiled_module = self.get_compiled_module()
# 20000 * 4 = 80KB for b
# 20000 * 5000 * 4 = 200MB for a
self.check_bandwidth(compiled_module, "0.200")
def test_split_scan(self):
@torch.compile
def f(a):
return a.cumsum(-1)
a = torch.rand(10000, 5000, device=GPU_TYPE)
f(a.reshape(-1))
compiled_module = self.get_compiled_module()
# 10000 * 5000 * 4 = 200 MB for a
# Double that for output as well
self.check_bandwidth(compiled_module, "0.400")
@config.patch("triton.unique_kernel_names", True)
@config.patch(benchmark_kernel=False)
@config.patch(compile_threads=1)
def test_remove_inductor_deps(self):
@torch.compile
def f(a):
return a.cos().sin()
a = torch.randn(5, device=GPU_TYPE)
f(a)
compiled_module = self.get_compiled_module()
cleaned_triton = self.verify_remove_inductor_deps(compiled_module)
@config.patch("triton.unique_kernel_names", True)
@config.patch(benchmark_kernel=False)
@config.patch(compile_threads=1)
def test_remove_inductor_deps_multiple_kernels(self):
@torch.compile
def f(a):
a = torch.mm(a, a)
a = a.cos().sin()
a = torch.mm(a, a)
a = torch.softmax(a, dim=-1)
return a
a = torch.randn(5, 5, device=GPU_TYPE)
f(a)
compiled_module = self.get_compiled_module()
self.verify_remove_inductor_deps(compiled_module)
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@config.patch("triton.unique_kernel_names", True)
@config.patch("triton.unique_kernel_names", True)
@config.patch(benchmark_kernel=False)
@config.patch(compile_threads=1)
@config.patch(max_autotune=True, max_autotune_gemm_backends="TRITON")
def test_remove_inductor_deps_templates(self):
@torch.compile
def f(a):
a = torch.mm(a, a)
a = a.cos()
a = torch.mm(a, a)
a = a.sin()
return a
a = torch.randn(128, 128, device=GPU_TYPE)
f(a)
compiled_module = self.get_compiled_module()
self.verify_remove_inductor_deps(compiled_module)
@config.patch("triton.unique_kernel_names", True)
@config.patch(benchmark_kernel=False)
@config.patch(compile_threads=1)
def test_remove_inductor_deps_scalar(self):
@torch.compile
def f(a, b):
return a + b
a = torch.tensor(1.0, device=GPU_TYPE)
b = torch.tensor(2.0, device=GPU_TYPE)
f(a, b)
compiled_module = self.get_compiled_module()
self.verify_remove_inductor_deps(compiled_module)
if __name__ == "__main__":
if HAS_GPU:
run_tests()
| TestKernelBenchmark |
python | pytorch__pytorch | torch/autograd/profiler.py | {
"start": 29580,
"end": 34936
} | class ____(_ContextDecorator):
"""Context manager/function decorator that adds a label to a code block/function when running autograd profiler.
Label will only appear if CPU activity tracing is enabled.
It is useful when tracing the code profile.
Args:
name (str): Label assigned to the block of code.
node_id (int): ID of node, for distributed profiling. Unset in
non-distributed cases.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
... y = x**2
... with torch.autograd.profiler.record_function(
... "label-z"
... ): # label the block
... z = y**3
... y.backward()
>>> # xdoctest: +IGNORE_WANT
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total % CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
pow 60.77% 47.470us 3
mul 21.73% 25.465us 2
PowBackward0 12.03% 121.891us 1
torch::autograd::AccumulateGrad 2.70% 6.324us 1
label-z 2.13% 12.421us 1
torch::autograd::GraphRoot 0.64% 1.503us 1
----------------------------------- --------------- --------------- ---------------
Self CPU time total: 234.344us
CUDA time total: 0.000us
"""
def __init__(self, name: str, args: Optional[str] = None):
self.name: str = name
self.args: Optional[str] = args
# Whether or not we should run record function's end callbacks when exiting.
self.run_callbacks_on_exit: bool = True
# TODO: TorchScript ignores standard type annotation here
# self.record: Optional["torch.classes.profiler._RecordFunction"] = None
self.record = torch.jit.annotate(
# pyrefly: ignore [not-a-type]
Optional["torch.classes.profiler._RecordFunction"],
None,
)
def __enter__(self):
self.record = torch.ops.profiler._record_function_enter_new(
self.name, self.args
)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
if not self.run_callbacks_on_exit:
return
# Local variable is needed by TorchScript to refine Optional[T] to T
record = self.record
if record is None:
raise AssertionError("Expected record to be set")
# TODO: Too slow with __torch_function__ handling enabled
# See https://github.com/pytorch/pytorch/issues/76410
if not torch.jit.is_scripting():
with torch._C.DisableTorchFunctionSubclass():
torch.ops.profiler._record_function_exit._RecordFunction(record)
else:
torch.ops.profiler._record_function_exit(record)
def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
"""Use for profiling async calls that return a future.
Calling this function will extend recording beyond this scope, until the future is
satisfied. It is useful for profiling the end to end time of asynchronous calls.
This function should only be called once to attach the callback onto the future, and
will throw if called multiple times.
Args:
fut: (torch._C.Future): future for which to schedule
callback for.
Returns:
A future that completes with the value of the passed in future when
the profiling callbacks have ran.
"""
# Throw if we have already attached a callback onto the future.
if not self.run_callbacks_on_exit:
raise RuntimeError("_call_end_callbacks_on_future can only be called once.")
# We are scheduling to run this RecordFunction's end callbacks when the
# passed in future completes, so don't run end callbacks on exit.
self.run_callbacks_on_exit = False
# Local variable is needed by TorchScript to refine Optional[T] to T
record = self.record
if record is None:
raise AssertionError("Expected record to be set")
# TODO: Too slow with __torch_function__ handling enabled
# See https://github.com/pytorch/pytorch/issues/76410
if not torch.jit.is_scripting():
with torch._C.DisableTorchFunctionSubclass():
profiled_future = (
torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction(
record, fut
)
)
else:
profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(
record, fut
)
return profiled_future
| record_function |
python | PyCQA__pylint | tests/functional/u/unused/unused_import_assigned_to.py | {
"start": 220,
"end": 289
} | class ____:
x = x[0]
def test(default=None):
return default
| Y |
python | getsentry__sentry | src/sentry/issues/endpoints/group_hashes.py | {
"start": 890,
"end": 1006
} | class ____(TypedDict):
id: str
latestEvent: Any
mergedBySeer: bool
@region_silo_endpoint
| GroupHashesResult |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/endpoints/test_scope_handling.py | {
"start": 507,
"end": 8768
} | class ____(TestCase):
DEFAULT_REDIRECT_URI = 'http://i.b./path'
def set_scopes(self, scopes):
def set_request_scopes(client_id, code, client, request):
request.scopes = scopes
return True
return set_request_scopes
def set_user(self, request):
request.user = 'foo'
request.client_id = 'bar'
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = TestScopeHandling.DEFAULT_REDIRECT_URI
self.validator.get_code_challenge.return_value = None
self.validator.authenticate_client.side_effect = self.set_client
self.server = Server(self.validator)
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
self.legacy = LegacyApplicationServer(self.validator)
self.backend = BackendApplicationServer(self.validator)
def test_scope_extraction(self):
scopes = (
('images', ['images']),
('images+videos', ['images', 'videos']),
('images+videos+openid', ['images', 'videos', 'openid']),
('http%3A%2f%2fa.b%2fvideos', ['http://a.b/videos']),
('http%3A%2f%2fa.b%2fvideos+pics', ['http://a.b/videos', 'pics']),
('pics+http%3A%2f%2fa.b%2fvideos', ['pics', 'http://a.b/videos']),
('http%3A%2f%2fa.b%2fvideos+https%3A%2f%2fc.d%2Fsecret', ['http://a.b/videos', 'https://c.d/secret']),
)
uri = 'http://example.com/path?client_id=abc&scope=%s&response_type=%s'
for scope, correct_scopes in scopes:
scopes, _ = self.web.validate_authorization_request(
uri % (scope, 'code'))
self.assertCountEqual(scopes, correct_scopes)
scopes, _ = self.mobile.validate_authorization_request(
uri % (scope, 'token'))
self.assertCountEqual(scopes, correct_scopes)
scopes, _ = self.server.validate_authorization_request(
uri % (scope, 'code'))
self.assertCountEqual(scopes, correct_scopes)
def test_scope_preservation(self):
scope = 'pics+http%3A%2f%2fa.b%2fvideos'
decoded_scope = 'pics http://a.b/videos'
auth_uri = 'http://example.com/path?client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
for backend_server_type in ['web', 'server']:
h, _, s = getattr(self, backend_server_type).create_authorization_response(
auth_uri + 'code', scopes=decoded_scope.split(' '))
self.validator.validate_code.side_effect = self.set_scopes(decoded_scope.split(' '))
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
_, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
body='client_id=me&redirect_uri=http://back.to/me&grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
# implicit grant
for backend_server_type in ['mobile', 'server']:
h, _, s = getattr(self, backend_server_type).create_authorization_response(
auth_uri + 'token', scopes=decoded_scope.split(' '))
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['scope'][0], decoded_scope)
# resource owner password credentials grant
for backend_server_type in ['legacy', 'server']:
body = 'grant_type=password&username=abc&password=secret&scope=%s'
_, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
# client credentials grant
for backend_server_type in ['backend', 'server']:
body = 'grant_type=client_credentials&scope=%s'
self.validator.authenticate_client.side_effect = self.set_user
_, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
def test_scope_changed(self):
scope = 'pics+http%3A%2f%2fa.b%2fvideos'
scopes = ['images', 'http://a.b/videos']
decoded_scope = 'images http://a.b/videos'
auth_uri = 'http://example.com/path?client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + 'code', scopes=scopes)
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_scopes(scopes)
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
# implicit grant
self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=scopes)
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['scope'][0], decoded_scope)
# resource owner password credentials grant
self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
body = 'grant_type=password&username=abc&password=secret&scope=%s'
_, body, _ = self.legacy.create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
# client credentials grant
self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
self.validator.authenticate_client.side_effect = self.set_user
body = 'grant_type=client_credentials&scope=%s'
_, body, _ = self.backend.create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['scope'], decoded_scope)
def test_invalid_scope(self):
scope = 'pics+http%3A%2f%2fa.b%2fvideos'
auth_uri = 'http://example.com/path?client_id=abc&response_type='
token_uri = 'http://example.com/path'
self.validator.validate_scopes.return_value = False
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + 'code', scopes=['invalid'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
error = get_query_credentials(h['Location'])['error'][0]
self.assertEqual(error, 'invalid_scope')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=['invalid'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
error = get_fragment_credentials(h['Location'])['error'][0]
self.assertEqual(error, 'invalid_scope')
# resource owner password credentials grant
body = 'grant_type=password&username=abc&password=secret&scope=%s'
_, body, _ = self.legacy.create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['error'], 'invalid_scope')
# client credentials grant
self.validator.authenticate_client.side_effect = self.set_user
body = 'grant_type=client_credentials&scope=%s'
_, body, _ = self.backend.create_token_response(token_uri,
body=body % scope)
self.assertEqual(json.loads(body)['error'], 'invalid_scope')
| TestScopeHandling |
python | apache__airflow | shared/configuration/src/airflow_shared/configuration/parser.py | {
"start": 3377,
"end": 73195
} | class ____(ConfigParser):
"""
Base configuration parser with pure parsing logic.
This class provides the core parsing methods that work with:
- configuration_description: dict describing config options (required in __init__)
- _default_values: ConfigParser with default values (required in __init__)
- deprecated_options: class attribute mapping new -> old options
- deprecated_sections: class attribute mapping new -> old sections
"""
# A mapping of section -> setting -> { old, replace } for deprecated default values.
# Subclasses can override this to define deprecated values that should be upgraded.
deprecated_values: dict[str, dict[str, tuple[Pattern, str]]] = {}
# A mapping of (new section, new option) -> (old section, old option, since_version).
# When reading new option, the old option will be checked to see if it exists. If it does a
# DeprecationWarning will be issued and the old option will be used instead
deprecated_options: dict[tuple[str, str], tuple[str, str, str]] = {
("dag_processor", "refresh_interval"): ("scheduler", "dag_dir_list_interval", "3.0"),
("api", "host"): ("webserver", "web_server_host", "3.0"),
("api", "port"): ("webserver", "web_server_port", "3.0"),
("api", "workers"): ("webserver", "workers", "3.0"),
("api", "worker_timeout"): ("webserver", "web_server_worker_timeout", "3.0"),
("api", "ssl_cert"): ("webserver", "web_server_ssl_cert", "3.0"),
("api", "ssl_key"): ("webserver", "web_server_ssl_key", "3.0"),
("api", "access_logfile"): ("webserver", "access_logfile", "3.0"),
("triggerer", "capacity"): ("triggerer", "default_capacity", "3.0"),
("api", "expose_config"): ("webserver", "expose_config", "3.0.1"),
("fab", "access_denied_message"): ("webserver", "access_denied_message", "3.0.2"),
("fab", "expose_hostname"): ("webserver", "expose_hostname", "3.0.2"),
("fab", "navbar_color"): ("webserver", "navbar_color", "3.0.2"),
("fab", "navbar_text_color"): ("webserver", "navbar_text_color", "3.0.2"),
("fab", "navbar_hover_color"): ("webserver", "navbar_hover_color", "3.0.2"),
("fab", "navbar_text_hover_color"): ("webserver", "navbar_text_hover_color", "3.0.2"),
("api", "secret_key"): ("webserver", "secret_key", "3.0.2"),
("api", "enable_swagger_ui"): ("webserver", "enable_swagger_ui", "3.0.2"),
("dag_processor", "parsing_pre_import_modules"): ("scheduler", "parsing_pre_import_modules", "3.0.4"),
("api", "grid_view_sorting_order"): ("webserver", "grid_view_sorting_order", "3.1.0"),
("api", "log_fetch_timeout_sec"): ("webserver", "log_fetch_timeout_sec", "3.1.0"),
("api", "hide_paused_dags_by_default"): ("webserver", "hide_paused_dags_by_default", "3.1.0"),
("api", "page_size"): ("webserver", "page_size", "3.1.0"),
("api", "default_wrap"): ("webserver", "default_wrap", "3.1.0"),
("api", "auto_refresh_interval"): ("webserver", "auto_refresh_interval", "3.1.0"),
("api", "require_confirmation_dag_change"): ("webserver", "require_confirmation_dag_change", "3.1.0"),
("api", "instance_name"): ("webserver", "instance_name", "3.1.0"),
("api", "log_config"): ("api", "access_logfile", "3.1.0"),
}
# A mapping of new section -> (old section, since_version).
deprecated_sections: dict[str, tuple[str, str]] = {}
@property
def _lookup_sequence(self) -> list[Callable]:
"""
Define the sequence of lookup methods for get(). The definition here does not have provider lookup.
Subclasses can override this to customise lookup order.
"""
return [
self._get_environment_variables,
self._get_option_from_config_file,
self._get_option_from_commands,
self._get_option_from_secrets,
self._get_option_from_defaults,
]
@property
def _validators(self) -> list[Callable[[], None]]:
"""
Return list of validators defined on a config parser class. Base class will return an empty list.
Subclasses can override this to customize the validators that are run during validation on the
config parser instance.
"""
return []
def validate(self) -> None:
"""Run all registered validators."""
for validator in self._validators:
validator()
self.is_validated = True
def _validate_deprecated_values(self) -> None:
"""Validate and upgrade deprecated default values."""
for section, replacement in self.deprecated_values.items():
for name, info in replacement.items():
old, new = info
current_value = self.get(section, name, fallback="")
if self._using_old_value(old, current_value):
self.upgraded_values[(section, name)] = current_value
new_value = old.sub(new, current_value)
self._update_env_var(section=section, name=name, new_value=new_value)
self._create_future_warning(
name=name,
section=section,
current_value=current_value,
new_value=new_value,
)
def _using_old_value(self, old: Pattern, current_value: str) -> bool:
"""Check if current_value matches the old pattern."""
return old.search(current_value) is not None
def _update_env_var(self, section: str, name: str, new_value: str) -> None:
"""Update environment variable with new value."""
env_var = self._env_var_name(section, name)
# Set it as an env var so that any subprocesses keep the same override!
os.environ[env_var] = new_value
@staticmethod
def _create_future_warning(name: str, section: str, current_value: Any, new_value: Any) -> None:
"""Create a FutureWarning for deprecated default values."""
warnings.warn(
f"The {name!r} setting in [{section}] has the old default value of {current_value!r}. "
f"This value has been changed to {new_value!r} in the running config, but please update your config.",
FutureWarning,
stacklevel=3,
)
def __init__(
self,
configuration_description: dict[str, dict[str, Any]],
_default_values: ConfigParser,
*args,
**kwargs,
):
"""
Initialize the parser.
:param configuration_description: Description of configuration options
:param _default_values: ConfigParser with default values
"""
super().__init__(*args, **kwargs)
self.configuration_description = configuration_description
self._default_values = _default_values
self._suppress_future_warnings = False
self.upgraded_values = {}
@functools.cached_property
def inversed_deprecated_options(self):
"""Build inverse mapping from old options to new options."""
return {(sec, name): key for key, (sec, name, ver) in self.deprecated_options.items()}
@functools.cached_property
def inversed_deprecated_sections(self):
"""Build inverse mapping from old sections to new sections."""
return {
old_section: new_section for new_section, (old_section, ver) in self.deprecated_sections.items()
}
@functools.cached_property
def sensitive_config_values(self) -> set[tuple[str, str]]:
"""Get set of sensitive config values that should be masked."""
flattened = {
(s, k): item
for s, s_c in self.configuration_description.items()
for k, item in s_c.get("options", {}).items()
}
sensitive = {
(section.lower(), key.lower())
for (section, key), v in flattened.items()
if v.get("sensitive") is True
}
depr_option = {self.deprecated_options[x][:-1] for x in sensitive if x in self.deprecated_options}
depr_section = {
(self.deprecated_sections[s][0], k) for s, k in sensitive if s in self.deprecated_sections
}
sensitive.update(depr_section, depr_option)
return sensitive
@overload # type: ignore[override]
def get(self, section: str, key: str, fallback: str = ..., **kwargs) -> str: ...
@overload
def get(self, section: str, key: str, **kwargs) -> str | None: ...
def _update_defaults_from_string(self, config_string: str) -> None:
"""
Update the defaults in _default_values based on values in config_string ("ini" format).
Override shared parser's method to add validation for template variables.
Note that those values are not validated and cannot contain variables because we are using
regular config parser to load them. This method is used to test the config parser in unit tests.
:param config_string: ini-formatted config string
"""
parser = ConfigParser()
parser.read_string(config_string)
for section in parser.sections():
if section not in self._default_values.sections():
self._default_values.add_section(section)
errors = False
for key, value in parser.items(section):
if not self.is_template(section, key) and "{" in value:
errors = True
log.error(
"The %s.%s value %s read from string contains variable. This is not supported",
section,
key,
value,
)
self._default_values.set(section, key, value)
if errors:
raise AirflowConfigException(
f"The string config passed as default contains variables. "
f"This is not supported. String config: {config_string}"
)
def get_default_value(self, section: str, key: str, fallback: Any = None, raw=False, **kwargs) -> Any:
"""
Retrieve default value from default config parser.
This will retrieve the default value from the default config parser. Optionally a raw, stored
value can be retrieved by setting skip_interpolation to True. This is useful for example when
we want to write the default value to a file, and we don't want the interpolation to happen
as it is going to be done later when the config is read.
:param section: section of the config
:param key: key to use
:param fallback: fallback value to use
:param raw: if raw, then interpolation will be reversed
:param kwargs: other args
:return:
"""
value = self._default_values.get(section, key, fallback=fallback, **kwargs)
if raw and value is not None:
return value.replace("%", "%%")
return value
def _get_custom_secret_backend(self, worker_mode: bool = False) -> Any | None:
"""
Get Secret Backend if defined in airflow.cfg.
Conditionally selects the section, key and kwargs key based on whether it is called from worker or not.
"""
section = "workers" if worker_mode else "secrets"
key = "secrets_backend" if worker_mode else "backend"
kwargs_key = "secrets_backend_kwargs" if worker_mode else "backend_kwargs"
secrets_backend_cls = self.getimport(section=section, key=key)
if not secrets_backend_cls:
if worker_mode:
# if we find no secrets backend for worker, return that of secrets backend
secrets_backend_cls = self.getimport(section="secrets", key="backend")
if not secrets_backend_cls:
return None
# When falling back to secrets backend, use its kwargs
kwargs_key = "backend_kwargs"
section = "secrets"
else:
return None
try:
backend_kwargs = self.getjson(section=section, key=kwargs_key)
if not backend_kwargs:
backend_kwargs = {}
elif not isinstance(backend_kwargs, dict):
raise ValueError("not a dict")
except AirflowConfigException:
log.warning("Failed to parse [%s] %s as JSON, defaulting to no kwargs.", section, kwargs_key)
backend_kwargs = {}
except ValueError:
log.warning("Failed to parse [%s] %s into a dict, defaulting to no kwargs.", section, kwargs_key)
backend_kwargs = {}
return secrets_backend_cls(**backend_kwargs)
def _get_config_value_from_secret_backend(self, config_key: str) -> str | None:
"""
Get Config option values from Secret Backend.
Called by the shared parser's _get_secret_option() method as part of the lookup chain.
Uses _get_custom_secret_backend() to get the backend instance.
:param config_key: the config key to retrieve
:return: config value or None
"""
try:
secrets_client = self._get_custom_secret_backend()
if not secrets_client:
return None
return secrets_client.get_config(config_key)
except Exception as e:
raise AirflowConfigException(
"Cannot retrieve config from alternative secrets backend. "
"Make sure it is configured properly and that the Backend "
"is accessible.\n"
f"{e}"
)
def _get_cmd_option_from_config_sources(
self, config_sources: ConfigSourcesType, section: str, key: str
) -> str | None:
fallback_key = key + "_cmd"
if (section, key) in self.sensitive_config_values:
section_dict = config_sources.get(section)
if section_dict is not None:
command_value = section_dict.get(fallback_key)
if command_value is not None:
if isinstance(command_value, str):
command = command_value
else:
command = command_value[0]
return run_command(command)
return None
def _get_secret_option_from_config_sources(
self, config_sources: ConfigSourcesType, section: str, key: str
) -> str | None:
fallback_key = key + "_secret"
if (section, key) in self.sensitive_config_values:
section_dict = config_sources.get(section)
if section_dict is not None:
secrets_path_value = section_dict.get(fallback_key)
if secrets_path_value is not None:
if isinstance(secrets_path_value, str):
secrets_path = secrets_path_value
else:
secrets_path = secrets_path_value[0]
return self._get_config_value_from_secret_backend(secrets_path)
return None
def _include_secrets(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for section, key in self.sensitive_config_values:
value: str | None = self._get_secret_option_from_config_sources(config_sources, section, key)
if value:
if not display_sensitive:
value = "< hidden >"
if display_source:
opt: str | tuple[str, str] = (value, "secret")
elif raw:
opt = value.replace("%", "%%")
else:
opt = value
config_sources.setdefault(section, {}).update({key: opt})
del config_sources[section][key + "_secret"]
def _include_commands(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for section, key in self.sensitive_config_values:
opt = self._get_cmd_option_from_config_sources(config_sources, section, key)
if not opt:
continue
opt_to_set: str | tuple[str, str] | None = opt
if not display_sensitive:
opt_to_set = "< hidden >"
if display_source:
opt_to_set = (str(opt_to_set), "cmd")
elif raw:
opt_to_set = str(opt_to_set).replace("%", "%%")
if opt_to_set is not None:
dict_to_update: dict[str, str | tuple[str, str]] = {key: opt_to_set}
config_sources.setdefault(section, {}).update(dict_to_update)
del config_sources[section][key + "_cmd"]
def _include_envs(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for env_var in [
os_environment for os_environment in os.environ if os_environment.startswith(ENV_VAR_PREFIX)
]:
try:
_, section, key = env_var.split("__", 2)
opt = self._get_env_var_option(section, key)
except ValueError:
continue
if opt is None:
log.warning("Ignoring unknown env var '%s'", env_var)
continue
if not display_sensitive and env_var != self._env_var_name("core", "unit_test_mode"):
# Don't hide cmd/secret values here
if not env_var.lower().endswith(("cmd", "secret")):
if (section, key) in self.sensitive_config_values:
opt = "< hidden >"
elif raw:
opt = opt.replace("%", "%%")
if display_source:
opt = (opt, "env var")
section = section.lower()
key = key.lower()
config_sources.setdefault(section, {}).update({key: opt})
def _filter_by_source(
self,
config_sources: ConfigSourcesType,
display_source: bool,
getter_func,
):
"""
Delete default configs from current configuration.
An OrderedDict of OrderedDicts, if it would conflict with special sensitive_config_values.
This is necessary because bare configs take precedence over the command
or secret key equivalents so if the current running config is
materialized with Airflow defaults they in turn override user set
command or secret key configs.
:param config_sources: The current configuration to operate on
:param display_source: If False, configuration options contain raw
values. If True, options are a tuple of (option_value, source).
Source is either 'airflow.cfg', 'default', 'env var', or 'cmd'.
:param getter_func: A callback function that gets the user configured
override value for a particular sensitive_config_values config.
:return: None, the given config_sources is filtered if necessary,
otherwise untouched.
"""
for section, key in self.sensitive_config_values:
# Don't bother if we don't have section / key
if section not in config_sources or key not in config_sources[section]:
continue
# Check that there is something to override defaults
try:
getter_opt = getter_func(section, key)
except ValueError:
continue
if not getter_opt:
continue
# Check to see that there is a default value
if self.get_default_value(section, key) is None:
continue
# Check to see if bare setting is the same as defaults
if display_source:
# when display_source = true, we know that the config_sources contains tuple
opt, source = config_sources[section][key] # type: ignore
else:
opt = config_sources[section][key]
if opt == self.get_default_value(section, key):
del config_sources[section][key]
@staticmethod
def _deprecated_value_is_set_in_config(
deprecated_section: str,
deprecated_key: str,
configs: Iterable[tuple[str, ConfigParser]],
) -> bool:
for config_type, config in configs:
if config_type != "default":
with contextlib.suppress(NoSectionError):
deprecated_section_array = config.items(section=deprecated_section, raw=True)
if any(key == deprecated_key for key, _ in deprecated_section_array):
return True
return False
@staticmethod
def _deprecated_variable_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}")
is not None
)
@staticmethod
def _deprecated_command_is_set_in_config(
deprecated_section: str,
deprecated_key: str,
configs: Iterable[tuple[str, ConfigParser]],
) -> bool:
return AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section=deprecated_section, deprecated_key=deprecated_key + "_cmd", configs=configs
)
@staticmethod
def _deprecated_variable_command_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}_CMD")
is not None
)
@staticmethod
def _deprecated_secret_is_set_in_config(
deprecated_section: str,
deprecated_key: str,
configs: Iterable[tuple[str, ConfigParser]],
) -> bool:
return AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section=deprecated_section, deprecated_key=deprecated_key + "_secret", configs=configs
)
@staticmethod
def _deprecated_variable_secret_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}_SECRET")
is not None
)
@staticmethod
def _replace_config_with_display_sources(
config_sources: ConfigSourcesType,
configs: Iterable[tuple[str, ConfigParser]],
configuration_description: dict[str, dict[str, Any]],
display_source: bool,
raw: bool,
deprecated_options: dict[tuple[str, str], tuple[str, str, str]],
include_env: bool,
include_cmds: bool,
include_secret: bool,
):
for source_name, config in configs:
sections = config.sections()
for section in sections:
AirflowConfigParser._replace_section_config_with_display_sources(
config,
config_sources,
configuration_description,
display_source,
raw,
section,
source_name,
deprecated_options,
configs,
include_env=include_env,
include_cmds=include_cmds,
include_secret=include_secret,
)
@staticmethod
def _replace_section_config_with_display_sources(
config: ConfigParser,
config_sources: ConfigSourcesType,
configuration_description: dict[str, dict[str, Any]],
display_source: bool,
raw: bool,
section: str,
source_name: str,
deprecated_options: dict[tuple[str, str], tuple[str, str, str]],
configs: Iterable[tuple[str, ConfigParser]],
include_env: bool,
include_cmds: bool,
include_secret: bool,
):
sect = config_sources.setdefault(section, {})
if isinstance(config, AirflowConfigParser):
with config.suppress_future_warnings():
items: Iterable[tuple[str, Any]] = config.items(section=section, raw=raw)
else:
items = config.items(section=section, raw=raw)
for k, val in items:
deprecated_section, deprecated_key, _ = deprecated_options.get((section, k), (None, None, None))
if deprecated_section and deprecated_key:
if source_name == "default":
# If deprecated entry has some non-default value set for any of the sources requested,
# We should NOT set default for the new entry (because it will override anything
# coming from the deprecated ones)
if AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section, deprecated_key, configs
):
continue
if include_env and AirflowConfigParser._deprecated_variable_is_set(
deprecated_section, deprecated_key
):
continue
if include_cmds and (
AirflowConfigParser._deprecated_variable_command_is_set(
deprecated_section, deprecated_key
)
or AirflowConfigParser._deprecated_command_is_set_in_config(
deprecated_section, deprecated_key, configs
)
):
continue
if include_secret and (
AirflowConfigParser._deprecated_variable_secret_is_set(
deprecated_section, deprecated_key
)
or AirflowConfigParser._deprecated_secret_is_set_in_config(
deprecated_section, deprecated_key, configs
)
):
continue
if display_source:
updated_source_name = source_name
if source_name == "default":
# defaults can come from other sources (default-<PROVIDER>) that should be used here
source_description_section = configuration_description.get(section, {})
source_description_key = source_description_section.get("options", {}).get(k, {})
if source_description_key is not None:
updated_source_name = source_description_key.get("source", source_name)
sect[k] = (val, updated_source_name)
else:
sect[k] = val
def _warn_deprecate(
self, section: str, key: str, deprecated_section: str, deprecated_name: str, extra_stacklevel: int
):
"""Warn about deprecated config option usage."""
if section == deprecated_section:
warnings.warn(
f"The {deprecated_name} option in [{section}] has been renamed to {key} - "
f"the old setting has been used, but please update your config.",
DeprecationWarning,
stacklevel=4 + extra_stacklevel,
)
else:
warnings.warn(
f"The {deprecated_name} option in [{deprecated_section}] has been moved to the {key} option "
f"in [{section}] - the old setting has been used, but please update your config.",
DeprecationWarning,
stacklevel=4 + extra_stacklevel,
)
@contextmanager
def suppress_future_warnings(self):
"""
Context manager to temporarily suppress future warnings.
This is a stub used by the shared parser's lookup methods when checking deprecated options.
Subclasses can override this to customize warning suppression behavior.
:return: context manager that suppresses future warnings
"""
suppress_future_warnings = self._suppress_future_warnings
self._suppress_future_warnings = True
yield self
self._suppress_future_warnings = suppress_future_warnings
def _env_var_name(self, section: str, key: str, team_name: str | None = None) -> str:
"""Generate environment variable name for a config option."""
team_component: str = f"{team_name.upper()}___" if team_name else ""
return f"{ENV_VAR_PREFIX}{team_component}{section.replace('.', '_').upper()}__{key.upper()}"
def _get_env_var_option(self, section: str, key: str, team_name: str | None = None):
"""Get config option from environment variable."""
env_var: str = self._env_var_name(section, key, team_name=team_name)
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# alternatively AIRFLOW__{SECTION}__{KEY}_CMD (for a command)
env_var_cmd = env_var + "_CMD"
if env_var_cmd in os.environ:
# if this is a valid command key...
if (section, key) in self.sensitive_config_values:
return run_command(os.environ[env_var_cmd])
# alternatively AIRFLOW__{SECTION}__{KEY}_SECRET (to get from Secrets Backend)
env_var_secret_path = env_var + "_SECRET"
if env_var_secret_path in os.environ:
# if this is a valid secret path...
if (section, key) in self.sensitive_config_values:
return self._get_config_value_from_secret_backend(os.environ[env_var_secret_path])
return None
def _get_cmd_option(self, section: str, key: str):
"""Get config option from command execution."""
fallback_key = key + "_cmd"
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
command = super().get(section, fallback_key)
try:
cmd_output = run_command(command)
except AirflowConfigException as e:
raise e
except Exception as e:
raise AirflowConfigException(
f"Cannot run the command for the config section [{section}]{fallback_key}_cmd."
f" Please check the {fallback_key} value."
) from e
return cmd_output
return None
def _get_secret_option(self, section: str, key: str) -> str | None:
"""Get Config option values from Secret Backend."""
fallback_key = key + "_secret"
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
secrets_path = super().get(section, fallback_key)
return self._get_config_value_from_secret_backend(secrets_path)
return None
def _get_environment_variables(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from environment variables."""
team_name = kwargs.get("team_name", None)
option = self._get_env_var_option(section, key, team_name=team_name)
if option is not None:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_env_var_option(deprecated_section, deprecated_key, team_name=team_name)
if option is not None:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return VALUE_NOT_FOUND_SENTINEL
def _get_option_from_config_file(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from config file."""
if team_name := kwargs.get("team_name", None):
section = f"{team_name}={section}"
# since this is the last lookup that supports team_name, pop it
kwargs.pop("team_name")
if super().has_option(section, key):
return expand_env_var(super().get(section, key, **kwargs))
if deprecated_section and deprecated_key:
if super().has_option(deprecated_section, deprecated_key):
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
with self.suppress_future_warnings():
return expand_env_var(super().get(deprecated_section, deprecated_key, **kwargs))
return VALUE_NOT_FOUND_SENTINEL
def _get_option_from_commands(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from command execution."""
option = self._get_cmd_option(section, key)
if option:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_cmd_option(deprecated_section, deprecated_key)
if option:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return VALUE_NOT_FOUND_SENTINEL
def _get_option_from_secrets(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from secrets backend."""
option = self._get_secret_option(section, key)
if option:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_secret_option(deprecated_section, deprecated_key)
if option:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return VALUE_NOT_FOUND_SENTINEL
def _get_option_from_defaults(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from default values."""
if self.get_default_value(section, key) is not None or "fallback" in kwargs:
return expand_env_var(self.get_default_value(section, key, **kwargs))
return VALUE_NOT_FOUND_SENTINEL
def _resolve_deprecated_lookup(
self,
section: str,
key: str,
lookup_from_deprecated: bool,
extra_stacklevel: int = 0,
) -> tuple[str, str, str | None, str | None, bool]:
"""
Resolve deprecated section/key mappings and determine deprecated values.
:param section: Section name (will be lowercased)
:param key: Key name (will be lowercased)
:param lookup_from_deprecated: Whether to lookup from deprecated options
:param extra_stacklevel: Extra stack level for warnings
:return: Tuple of (resolved_section, resolved_key, deprecated_section, deprecated_key, warning_emitted)
"""
section = section.lower()
key = key.lower()
warning_emitted = False
deprecated_section: str | None = None
deprecated_key: str | None = None
if not lookup_from_deprecated:
return section, key, deprecated_section, deprecated_key, warning_emitted
option_description = self.configuration_description.get(section, {}).get("options", {}).get(key, {})
if option_description.get("deprecated"):
deprecation_reason = option_description.get("deprecation_reason", "")
warnings.warn(
f"The '{key}' option in section {section} is deprecated. {deprecation_reason}",
DeprecationWarning,
stacklevel=2 + extra_stacklevel,
)
# For the cases in which we rename whole sections
if section in self.inversed_deprecated_sections:
deprecated_section, deprecated_key = (section, key)
section = self.inversed_deprecated_sections[section]
if not self._suppress_future_warnings:
warnings.warn(
f"The config section [{deprecated_section}] has been renamed to "
f"[{section}]. Please update your `conf.get*` call to use the new name",
FutureWarning,
stacklevel=2 + extra_stacklevel,
)
# Don't warn about individual rename if the whole section is renamed
warning_emitted = True
elif (section, key) in self.inversed_deprecated_options:
# Handle using deprecated section/key instead of the new section/key
new_section, new_key = self.inversed_deprecated_options[(section, key)]
if not self._suppress_future_warnings and not warning_emitted:
warnings.warn(
f"section/key [{section}/{key}] has been deprecated, you should use"
f"[{new_section}/{new_key}] instead. Please update your `conf.get*` call to use the "
"new name",
FutureWarning,
stacklevel=2 + extra_stacklevel,
)
warning_emitted = True
deprecated_section, deprecated_key = section, key
section, key = (new_section, new_key)
elif section in self.deprecated_sections:
# When accessing the new section name, make sure we check under the old config name
deprecated_key = key
deprecated_section = self.deprecated_sections[section][0]
else:
deprecated_section, deprecated_key, _ = self.deprecated_options.get(
(section, key), (None, None, None)
)
return section, key, deprecated_section, deprecated_key, warning_emitted
@overload # type: ignore[override]
def get(self, section: str, key: str, fallback: str = ..., **kwargs) -> str: ...
@overload # type: ignore[override]
def get(self, section: str, key: str, **kwargs) -> str | None: ...
def get( # type: ignore[misc, override]
self,
section: str,
key: str,
suppress_warnings: bool = False,
lookup_from_deprecated: bool = True,
_extra_stacklevel: int = 0,
team_name: str | None = None,
**kwargs,
) -> str | None:
"""
Get config value by iterating through lookup sequence.
Priority order is defined by _lookup_sequence property.
"""
section, key, deprecated_section, deprecated_key, warning_emitted = self._resolve_deprecated_lookup(
section=section,
key=key,
lookup_from_deprecated=lookup_from_deprecated,
extra_stacklevel=_extra_stacklevel,
)
if team_name is not None:
kwargs["team_name"] = team_name
for lookup_method in self._lookup_sequence:
value = lookup_method(
deprecated_key=deprecated_key,
deprecated_section=deprecated_section,
key=key,
section=section,
issue_warning=not warning_emitted,
extra_stacklevel=_extra_stacklevel,
**kwargs,
)
if value is not VALUE_NOT_FOUND_SENTINEL:
return value
# Check if fallback was explicitly provided (even if None)
if "fallback" in kwargs:
return kwargs["fallback"]
if not suppress_warnings:
log.warning("section/key [%s/%s] not found in config", section, key)
raise AirflowConfigException(f"section/key [{section}/{key}] not found in config")
def getboolean(self, section: str, key: str, **kwargs) -> bool: # type: ignore[override]
"""Get config value as boolean."""
val = str(self.get(section, key, _extra_stacklevel=1, **kwargs)).lower().strip()
if "#" in val:
val = val.split("#")[0].strip()
if val in ("t", "true", "1"):
return True
if val in ("f", "false", "0"):
return False
raise AirflowConfigException(
f'Failed to convert value to bool. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getint(self, section: str, key: str, **kwargs) -> int: # type: ignore[override]
"""Get config value as integer."""
val = self.get(section, key, _extra_stacklevel=1, **kwargs)
if val is None:
raise AirflowConfigException(
f"Failed to convert value None to int. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
return int(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to int. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getfloat(self, section: str, key: str, **kwargs) -> float: # type: ignore[override]
"""Get config value as float."""
val = self.get(section, key, _extra_stacklevel=1, **kwargs)
if val is None:
raise AirflowConfigException(
f"Failed to convert value None to float. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
return float(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to float. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getlist(self, section: str, key: str, delimiter=",", **kwargs):
"""Get config value as list."""
val = self.get(section, key, **kwargs)
if val is None:
if "fallback" in kwargs:
return kwargs["fallback"]
raise AirflowConfigException(
f"Failed to convert value None to list. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
return [item.strip() for item in val.split(delimiter)]
except Exception:
raise AirflowConfigException(
f'Failed to parse value to a list. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
E = TypeVar("E", bound=Enum)
def getenum(self, section: str, key: str, enum_class: type[E], **kwargs) -> E:
"""Get config value as enum."""
val = self.get(section, key, **kwargs)
enum_names = [enum_item.name for enum_item in enum_class]
if val is None:
raise AirflowConfigException(
f'Failed to convert value. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}" and it must be one of {", ".join(enum_names)}'
)
try:
return enum_class[val]
except KeyError:
if "fallback" in kwargs and kwargs["fallback"] in enum_names:
return enum_class[kwargs["fallback"]]
raise AirflowConfigException(
f'Failed to convert value. Please check "{key}" key in "{section}" section. '
f"the value must be one of {', '.join(enum_names)}"
)
def getenumlist(self, section: str, key: str, enum_class: type[E], delimiter=",", **kwargs) -> list[E]:
"""Get config value as list of enums."""
string_list = self.getlist(section, key, delimiter, **kwargs)
enum_names = [enum_item.name for enum_item in enum_class]
enum_list = []
for val in string_list:
try:
enum_list.append(enum_class[val])
except KeyError:
log.warning(
"Failed to convert value. Please check %s key in %s section. "
"it must be one of %s, if not the value is ignored",
key,
section,
", ".join(enum_names),
)
return enum_list
def getimport(self, section: str, key: str, **kwargs) -> Any:
"""
Read options, import the full qualified name, and return the object.
In case of failure, it throws an exception with the key and section names
:return: The object or None, if the option is empty
"""
# Fixed: use self.get() instead of conf.get()
full_qualified_path = self.get(section=section, key=key, **kwargs)
if not full_qualified_path:
return None
try:
# Import here to avoid circular dependency
from airflow.utils.module_loading import import_string
return import_string(full_qualified_path)
except ImportError as e:
log.warning(e)
raise AirflowConfigException(
f'The object could not be loaded. Please check "{key}" key in "{section}" section. '
f'Current value: "{full_qualified_path}".'
)
def getjson(
self, section: str, key: str, fallback=None, **kwargs
) -> dict | list | str | int | float | None:
"""
Return a config value parsed from a JSON string.
``fallback`` is *not* JSON parsed but used verbatim when no config value is given.
"""
try:
data = self.get(section=section, key=key, fallback=None, _extra_stacklevel=1, **kwargs)
except (NoSectionError, NoOptionError):
data = None
if data is None or data == "":
return fallback
try:
return json.loads(data)
except JSONDecodeError as e:
raise AirflowConfigException(f"Unable to parse [{section}] {key!r} as valid json") from e
def gettimedelta(
self, section: str, key: str, fallback: Any = None, **kwargs
) -> datetime.timedelta | None:
"""
Get the config value for the given section and key, and convert it into datetime.timedelta object.
If the key is missing, then it is considered as `None`.
:param section: the section from the config
:param key: the key defined in the given section
:param fallback: fallback value when no config value is given, defaults to None
:raises AirflowConfigException: raised because ValueError or OverflowError
:return: datetime.timedelta(seconds=<config_value>) or None
"""
val = self.get(section, key, fallback=fallback, _extra_stacklevel=1, **kwargs)
if val:
# the given value must be convertible to integer
try:
int_val = int(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to int. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
try:
return datetime.timedelta(seconds=int_val)
except OverflowError as err:
raise AirflowConfigException(
f"Failed to convert value to timedelta in `seconds`. "
f"{err}. "
f'Please check "{key}" key in "{section}" section. Current value: "{val}".'
)
return fallback
def get_mandatory_value(self, section: str, key: str, **kwargs) -> str:
"""Get mandatory config value, raising ValueError if not found."""
value = self.get(section, key, _extra_stacklevel=1, **kwargs)
if value is None:
raise ValueError(f"The value {section}/{key} should be set!")
return value
def get_mandatory_list_value(self, section: str, key: str, **kwargs) -> list[str]:
"""Get mandatory config value as list, raising ValueError if not found."""
value = self.getlist(section, key, **kwargs)
if value is None:
raise ValueError(f"The value {section}/{key} should be set!")
return value
def read(
self,
filenames: str | bytes | os.PathLike | Iterable[str | bytes | os.PathLike],
encoding: str | None = None,
) -> list[str]:
return super().read(filenames=filenames, encoding=encoding)
def read_dict( # type: ignore[override]
self, dictionary: dict[str, dict[str, Any]], source: str = "<dict>"
) -> None:
"""
We define a different signature here to add better type hints and checking.
:param dictionary: dictionary to read from
:param source: source to be used to store the configuration
:return:
"""
super().read_dict(dictionary=dictionary, source=source)
def get_sections_including_defaults(self) -> list[str]:
"""
Retrieve all sections from the configuration parser, including sections defined by built-in defaults.
:return: list of section names
"""
sections_from_config = self.sections()
sections_from_description = list(self.configuration_description.keys())
return list(dict.fromkeys(itertools.chain(sections_from_description, sections_from_config)))
def get_options_including_defaults(self, section: str) -> list[str]:
"""
Retrieve all possible options from the configuration parser for the section given.
Includes options defined by built-in defaults.
:param section: section name
:return: list of option names for the section given
"""
my_own_options = self.options(section) if self.has_section(section) else []
all_options_from_defaults = list(
self.configuration_description.get(section, {}).get("options", {}).keys()
)
return list(dict.fromkeys(itertools.chain(all_options_from_defaults, my_own_options)))
def has_option(self, section: str, option: str, lookup_from_deprecated: bool = True) -> bool:
"""
Check if option is defined.
Uses self.get() to avoid reimplementing the priority order of config variables
(env, config, cmd, defaults).
:param section: section to get option from
:param option: option to get
:param lookup_from_deprecated: If True, check if the option is defined in deprecated sections
:return:
"""
try:
value = self.get(
section,
option,
fallback=None,
_extra_stacklevel=1,
suppress_warnings=True,
lookup_from_deprecated=lookup_from_deprecated,
)
if value is None:
return False
return True
except (NoOptionError, NoSectionError, AirflowConfigException):
return False
def set(self, section: str, option: str, value: str | None = None) -> None:
"""
Set an option to the given value.
This override just makes sure the section and option are lower case, to match what we do in `get`.
"""
section = section.lower()
option = option.lower()
defaults = self.configuration_description or {}
if not self.has_section(section) and section in defaults:
# Trying to set a key in a section that exists in default, but not in the user config;
# automatically create it
self.add_section(section)
super().set(section, option, value)
def remove_option(self, section: str, option: str, remove_default: bool = True):
"""
Remove an option if it exists in config from a file or default config.
If both of config have the same option, this removes the option
in both configs unless remove_default=False.
"""
section = section.lower()
option = option.lower()
if super().has_option(section, option):
super().remove_option(section, option)
if self.get_default_value(section, option) is not None and remove_default:
self._default_values.remove_option(section, option)
def optionxform(self, optionstr: str) -> str:
"""
Transform option names on every read, get, or set operation.
This changes from the default behaviour of ConfigParser from lower-casing
to instead be case-preserving.
:param optionstr:
:return:
"""
return optionstr
def _get_config_sources_for_as_dict(self) -> list[tuple[str, ConfigParser]]:
"""
Get list of config sources to use in as_dict().
Subclasses can override to add additional sources (e.g., provider configs).
"""
return [
("default", self._default_values),
("airflow.cfg", self),
]
def as_dict(
self,
display_source: bool = False,
display_sensitive: bool = False,
raw: bool = False,
include_env: bool = True,
include_cmds: bool = True,
include_secret: bool = True,
) -> ConfigSourcesType:
"""
Return the current configuration as an OrderedDict of OrderedDicts.
When materializing current configuration Airflow defaults are
materialized along with user set configs. If any of the `include_*`
options are False then the result of calling command or secret key
configs do not override Airflow defaults and instead are passed through.
In order to then avoid Airflow defaults from overwriting user set
command or secret key configs we filter out bare sensitive_config_values
that are set to Airflow defaults when command or secret key configs
produce different values.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg', 'default', 'env var', or 'cmd'.
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:param raw: Should the values be output as interpolated values, or the
"raw" form that can be fed back in to ConfigParser
:param include_env: Should the value of configuration from AIRFLOW__
environment variables be included or not
:param include_cmds: Should the result of calling any ``*_cmd`` config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:param include_secret: Should the result of calling any ``*_secret`` config be
set (True, default), or should the _secret options be left as the
path to get the secret from (False)
:return: Dictionary, where the key is the name of the section and the content is
the dictionary with the name of the parameter and its value.
"""
if not display_sensitive:
# We want to hide the sensitive values at the appropriate methods
# since envs from cmds, secrets can be read at _include_envs method
if not all([include_env, include_cmds, include_secret]):
raise ValueError(
"If display_sensitive is false, then include_env, "
"include_cmds, include_secret must all be set as True"
)
config_sources: ConfigSourcesType = {}
# We check sequentially all those sources and the last one we saw it in will "win"
configs = self._get_config_sources_for_as_dict()
self._replace_config_with_display_sources(
config_sources,
configs,
self.configuration_description,
display_source,
raw,
self.deprecated_options,
include_cmds=include_cmds,
include_env=include_env,
include_secret=include_secret,
)
# add env vars and overwrite because they have priority
if include_env:
self._include_envs(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_env_var_option)
# add bash commands
if include_cmds:
self._include_commands(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_cmd_option)
# add config from secret backends
if include_secret:
self._include_secrets(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_secret_option)
if not display_sensitive:
# This ensures the ones from config file is hidden too
# if they are not provided through env, cmd and secret
hidden = "< hidden >"
for section, key in self.sensitive_config_values:
if config_sources.get(section):
if config_sources[section].get(key, None):
if display_source:
source = config_sources[section][key][1]
config_sources[section][key] = (hidden, source)
else:
config_sources[section][key] = hidden
return config_sources
def _write_option_header(
self,
file: IO[str],
option: str,
extra_spacing: bool,
include_descriptions: bool,
include_env_vars: bool,
include_examples: bool,
include_sources: bool,
section_config_description: dict[str, dict[str, Any]],
section_to_write: str,
sources_dict: ConfigSourcesType,
) -> tuple[bool, bool]:
"""
Write header for configuration option.
Returns tuple of (should_continue, needs_separation) where needs_separation should be
set if the option needs additional separation to visually separate it from the next option.
"""
option_config_description = (
section_config_description.get("options", {}).get(option, {})
if section_config_description
else {}
)
description = option_config_description.get("description")
needs_separation = False
if description and include_descriptions:
for line in description.splitlines():
file.write(f"# {line}\n")
needs_separation = True
example = option_config_description.get("example")
if example is not None and include_examples:
if extra_spacing:
file.write("#\n")
example_lines = example.splitlines()
example = "\n# ".join(example_lines)
file.write(f"# Example: {option} = {example}\n")
needs_separation = True
if include_sources and sources_dict:
sources_section = sources_dict.get(section_to_write)
value_with_source = sources_section.get(option) if sources_section else None
if value_with_source is None:
file.write("#\n# Source: not defined\n")
else:
file.write(f"#\n# Source: {value_with_source[1]}\n")
needs_separation = True
if include_env_vars:
file.write(f"#\n# Variable: AIRFLOW__{section_to_write.upper()}__{option.upper()}\n")
if extra_spacing:
file.write("#\n")
needs_separation = True
return True, needs_separation
def is_template(self, section: str, key) -> bool:
"""
Return whether the value is templated.
:param section: section of the config
:param key: key in the section
:return: True if the value is templated
"""
return _is_template(self.configuration_description, section, key)
def getsection(self, section: str) -> ConfigOptionsDictType | None:
"""
Return the section as a dict.
Values are converted to int, float, bool as required.
:param section: section from the config
"""
if not self.has_section(section) and not self._default_values.has_section(section):
return None
if self._default_values.has_section(section):
_section: ConfigOptionsDictType = dict(self._default_values.items(section))
else:
_section = {}
if self.has_section(section):
_section.update(self.items(section))
section_prefix = self._env_var_name(section, "")
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, "")
if key.endswith("_CMD"):
key = key[:-4]
key = key.lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in _section.items():
if val is None:
raise AirflowConfigException(
f"Failed to convert value automatically. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
_section[key] = int(val)
except ValueError:
try:
_section[key] = float(val)
except ValueError:
if isinstance(val, str) and val.lower() in ("t", "true"):
_section[key] = True
elif isinstance(val, str) and val.lower() in ("f", "false"):
_section[key] = False
return _section
@staticmethod
def _write_section_header(
file: IO[str],
include_descriptions: bool,
section_config_description: dict[str, str],
section_to_write: str,
) -> None:
"""Write header for configuration section."""
file.write(f"[{section_to_write}]\n")
section_description = section_config_description.get("description")
if section_description and include_descriptions:
for line in section_description.splitlines():
file.write(f"# {line}\n")
file.write("\n")
def _write_value(
self,
file: IO[str],
option: str,
comment_out_everything: bool,
needs_separation: bool,
only_defaults: bool,
section_to_write: str,
):
default_value = self.get_default_value(section_to_write, option, raw=True)
if only_defaults:
value = default_value
else:
value = self.get(section_to_write, option, fallback=default_value, raw=True)
if value is None:
file.write(f"# {option} = \n")
else:
if comment_out_everything:
value_lines = value.splitlines()
value = "\n# ".join(value_lines)
file.write(f"# {option} = {value}\n")
else:
if "\n" in value:
try:
value = json.dumps(json.loads(value), indent=4)
value = value.replace(
"\n", "\n "
) # indent multi-line JSON to satisfy configparser format
except JSONDecodeError:
pass
file.write(f"{option} = {value}\n")
if needs_separation:
file.write("\n")
def write( # type: ignore[override]
self,
file: IO[str],
section: str | None = None,
include_examples: bool = True,
include_descriptions: bool = True,
include_sources: bool = True,
include_env_vars: bool = True,
include_providers: bool = True,
comment_out_everything: bool = False,
hide_sensitive_values: bool = False,
extra_spacing: bool = True,
only_defaults: bool = False,
**kwargs: Any,
) -> None:
"""
Write configuration with comments and examples to a file.
:param file: file to write to
:param section: section of the config to write, defaults to all sections
:param include_examples: Include examples in the output
:param include_descriptions: Include descriptions in the output
:param include_sources: Include the source of each config option
:param include_env_vars: Include environment variables corresponding to each config option
:param include_providers: Include providers configuration
:param comment_out_everything: Comment out all values
:param hide_sensitive_values: Include sensitive values in the output
:param extra_spacing: Add extra spacing before examples and after variables
:param only_defaults: Only include default values when writing the config, not the actual values
"""
sources_dict = {}
if include_sources:
sources_dict = self.as_dict(display_source=True)
with self.make_sure_configuration_loaded(with_providers=include_providers):
for section_to_write in self.get_sections_including_defaults():
section_config_description = self.configuration_description.get(section_to_write, {})
if section_to_write != section and section is not None:
continue
if self._default_values.has_section(section_to_write) or self.has_section(section_to_write):
self._write_section_header(
file, include_descriptions, section_config_description, section_to_write
)
for option in self.get_options_including_defaults(section_to_write):
should_continue, needs_separation = self._write_option_header(
file=file,
option=option,
extra_spacing=extra_spacing,
include_descriptions=include_descriptions,
include_env_vars=include_env_vars,
include_examples=include_examples,
include_sources=include_sources,
section_config_description=section_config_description,
section_to_write=section_to_write,
sources_dict=sources_dict,
)
self._write_value(
file=file,
option=option,
comment_out_everything=comment_out_everything,
needs_separation=needs_separation,
only_defaults=only_defaults,
section_to_write=section_to_write,
)
if include_descriptions and not needs_separation:
# extra separation between sections in case last option did not need it
file.write("\n")
@contextmanager
def make_sure_configuration_loaded(self, with_providers: bool) -> Generator[None, None, None]:
"""
Make sure configuration is loaded with or without providers.
This happens regardless if the provider configuration has been loaded before or not.
Restores configuration to the state before entering the context.
:param with_providers: whether providers should be loaded
"""
needs_reload = False
if with_providers:
self._ensure_providers_config_loaded()
else:
needs_reload = self._ensure_providers_config_unloaded()
yield
if needs_reload:
self._reload_provider_configs()
def _ensure_providers_config_loaded(self) -> None:
"""Ensure providers configurations are loaded."""
raise NotImplementedError("Subclasses must implement _ensure_providers_config_loaded method")
def _ensure_providers_config_unloaded(self) -> bool:
"""Ensure providers configurations are unloaded temporarily to load core configs. Returns True if providers get unloaded."""
raise NotImplementedError("Subclasses must implement _ensure_providers_config_unloaded method")
def _reload_provider_configs(self) -> None:
"""Reload providers configuration."""
raise NotImplementedError("Subclasses must implement _reload_provider_configs method")
| AirflowConfigParser |
python | walkccc__LeetCode | solutions/3270. Find the Key of the Numbers/3270.py | {
"start": 0,
"end": 305
} | class ____:
def generateKey(self, num1: int, num2: int, num3: int) -> int:
return int(''.join(min(a, b, c)
for a, b, c in zip(str(num1).zfill(4),
str(num2).zfill(4),
str(num3).zfill(4))))
| Solution |
python | geekcomputers__Python | Reverse_list_in_groups.py | {
"start": 94,
"end": 1452
} | class ____:
def __init__(self):
self.head = None
def Insert_At_End(self, new_data):
new_node = Node(new_data)
if self.head is None:
self.head = new_node
return
current = self.head
while current.next:
current = current.next
current.next = new_node
def Reverse_list_Groups(self, head, k):
count = 0
previous = None
current = head
while current is not None and count < k:
following = current.next
current.next = previous
previous = current
current = following
count += 1
if following is not None:
head.next = self.Reverse_list_Groups(following, k)
return previous
def Display(self):
temp = self.head
while temp:
print(temp.data, "->", end=" ")
temp = temp.next
print("None")
if __name__ == "__main__":
L_list = Reverse_Linked_List()
L_list.Insert_At_End(1)
L_list.Insert_At_End(2)
L_list.Insert_At_End(3)
L_list.Insert_At_End(4)
L_list.Insert_At_End(5)
L_list.Insert_At_End(6)
L_list.Insert_At_End(7)
L_list.Display()
L_list.head = L_list.Reverse_list_Groups(L_list.head, 2)
print("\nReverse Linked List: ")
L_list.Display()
| Reverse_Linked_List |
python | scipy__scipy | scipy/integrate/_cubature.py | {
"start": 20956,
"end": 25783
} | class ____(_VariableTransform):
r"""
Transformation for handling infinite limits.
Assuming ``a = [a_1, ..., a_n]`` and ``b = [b_1, ..., b_n]``:
If :math:`a_i = -\infty` and :math:`b_i = \infty`, the i-th integration variable
will use the transformation :math:`x = \frac{1-|t|}{t}` and :math:`t \in (-1, 1)`.
If :math:`a_i \ne \pm\infty` and :math:`b_i = \infty`, the i-th integration variable
will use the transformation :math:`x = a_i + \frac{1-t}{t}` and
:math:`t \in (0, 1)`.
If :math:`a_i = -\infty` and :math:`b_i \ne \pm\infty`, the i-th integration
variable will use the transformation :math:`x = b_i - \frac{1-t}{t}` and
:math:`t \in (0, 1)`.
"""
def __init__(self, f, a, b, xp):
self._xp = xp
self._f = f
self._orig_a = a
self._orig_b = b
# (-oo, oo) will be mapped to (-1, 1).
self._double_inf_pos = (a == -math.inf) & (b == math.inf)
# (start, oo) will be mapped to (0, 1).
start_inf_mask = (a != -math.inf) & (b == math.inf)
# (-oo, end) will be mapped to (0, 1).
inf_end_mask = (a == -math.inf) & (b != math.inf)
# This is handled by making the transformation t = -x and reducing it to
# the other semi-infinite case.
self._semi_inf_pos = start_inf_mask | inf_end_mask
# Since we flip the limits, we don't need to separately multiply the
# integrand by -1.
self._orig_a[inf_end_mask] = -b[inf_end_mask]
self._orig_b[inf_end_mask] = -a[inf_end_mask]
self._num_inf = self._xp.sum(
self._xp.astype(self._double_inf_pos | self._semi_inf_pos, self._xp.int64),
).__int__()
@property
def transformed_limits(self):
a = xp_copy(self._orig_a)
b = xp_copy(self._orig_b)
a[self._double_inf_pos] = -1
b[self._double_inf_pos] = 1
a[self._semi_inf_pos] = 0
b[self._semi_inf_pos] = 1
return a, b
@property
def points(self):
# If there are infinite limits, then the origin becomes a problematic point
# due to a division by zero there.
# If the function using this class only wraps f when a and b contain infinite
# limits, this condition will always be met (as is the case with cubature).
#
# If a and b do not contain infinite limits but f is still wrapped with this
# class, then without this condition the initial region of integration will
# be split around the origin unnecessarily.
if self._num_inf != 0:
return [self._xp.zeros(self._orig_a.shape)]
else:
return []
def inv(self, x):
t = xp_copy(x)
npoints = x.shape[0]
double_inf_mask = self._xp.tile(
self._double_inf_pos[self._xp.newaxis, :],
(npoints, 1),
)
semi_inf_mask = self._xp.tile(
self._semi_inf_pos[self._xp.newaxis, :],
(npoints, 1),
)
# If any components of x are 0, then this component will be mapped to infinity
# under the transformation used for doubly-infinite limits.
#
# Handle the zero values and non-zero values separately to avoid division by
# zero.
zero_mask = x[double_inf_mask] == 0
non_zero_mask = double_inf_mask & ~zero_mask
t[zero_mask] = math.inf
t[non_zero_mask] = 1/(x[non_zero_mask] + self._xp.sign(x[non_zero_mask]))
start = self._xp.tile(self._orig_a[self._semi_inf_pos], (npoints,))
t[semi_inf_mask] = 1/(x[semi_inf_mask] - start + 1)
return t
def __call__(self, t, *args, **kwargs):
x = xp_copy(t)
npoints = t.shape[0]
double_inf_mask = self._xp.tile(
self._double_inf_pos[self._xp.newaxis, :],
(npoints, 1),
)
semi_inf_mask = self._xp.tile(
self._semi_inf_pos[self._xp.newaxis, :],
(npoints, 1),
)
# For (-oo, oo) -> (-1, 1), use the transformation x = (1-|t|)/t.
x[double_inf_mask] = (
(1 - self._xp.abs(t[double_inf_mask])) / t[double_inf_mask]
)
start = self._xp.tile(self._orig_a[self._semi_inf_pos], (npoints,))
# For (start, oo) -> (0, 1), use the transformation x = start + (1-t)/t.
x[semi_inf_mask] = start + (1 - t[semi_inf_mask]) / t[semi_inf_mask]
jacobian_det = 1/self._xp.prod(
self._xp.reshape(
t[semi_inf_mask | double_inf_mask]**2,
(-1, self._num_inf),
),
axis=-1,
)
f_x = self._f(x, *args, **kwargs)
jacobian_det = self._xp.reshape(jacobian_det, (-1, *([1]*(len(f_x.shape) - 1))))
return f_x * jacobian_det
| _InfiniteLimitsTransform |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/slots.py | {
"start": 59,
"end": 338
} | class ____:
"""docstring"""
__slots__ = {
'attr1': 'docstring of attr1',
'attr2': 'docstring of attr2',
'attr3': None,
}
__annotations__ = {'attr1': int}
def __init__(self):
self.attr2 = None #: docstring of instance attr2
| Bar |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 22434,
"end": 26891
} | class ____(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, config.num_attention_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
@compile_compatible_method_lru_cache(maxsize=10)
def generate_relative_position_index(self, window_size: tuple[int, int]) -> torch.Tensor:
"""
This method creates the relative position index, modified to support arbitrary window sizes,
as introduced in [MiDaS v3.1](https://huggingface.co/papers/2307.14460).
"""
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing="ij")
coords = torch.stack(grid) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
def forward(self, window_size, interpolate_pos_encoding: bool = False, dim_size=None) -> torch.Tensor:
"""
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
"""
old_height = 2 * self.window_size[0] - 1
old_width = 2 * self.window_size[1] - 1
new_height = 2 * window_size[0] - 1
new_width = 2 * window_size[1] - 1
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = new_height * new_width + 3
old_sub_table = old_relative_position_bias_table[: old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = nn.functional.interpolate(
old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode="bilinear"
)
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat(
[new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3 :]]
)
relative_position_index = self.generate_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[relative_position_index.view(-1)]
# patch_size*num_patches_height, patch_size*num_patches_width, num_attention_heads
relative_position_bias = relative_position_bias.view(
window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1
)
# num_attention_heads, patch_size*num_patches_width, patch_size*num_patches_height
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
if interpolate_pos_encoding:
relative_position_bias = nn.functional.interpolate(
relative_position_bias.unsqueeze(1),
size=(dim_size, dim_size),
mode="bilinear",
align_corners=False,
).squeeze(1)
return relative_position_bias.unsqueeze(0)
# Copied from transformers.models.beit.modeling_beit.BeitEncoder with Beit->Data2VecVision
| Data2VecVisionRelativePositionBias |
python | bokeh__bokeh | src/bokeh/core/property/include.py | {
"start": 1439,
"end": 2904
} | class ____(PropertyDescriptorFactory[T]):
""" Include "mix-in" property collection in a Bokeh model.
See :ref:`bokeh.core.property_mixins` for more details.
"""
def __init__(self, delegate: type[HasProps], *, help: str = "", prefix: str | None = None) -> None:
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError(f"expected a subclass of HasProps, got {delegate!r}")
self.delegate = delegate
self.help = help
self.prefix = prefix + "_" if prefix else ""
def make_descriptors(self, _base_name: str) -> list[PropertyDescriptor[T]]:
descriptors = []
for descriptor in self.delegate.descriptors():
prop = copy(descriptor.property)
prop.__doc__ = self.help.format(prop=descriptor.name.replace("_", " "))
descriptors += prop.make_descriptors(self.prefix + descriptor.name)
return descriptors
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Include |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorRegistryV0.py | {
"start": 11656,
"end": 14033
} | class ____(BaseModel):
class Config:
extra = Extra.allow
sourceDefinitionId: UUID
name: str
dockerRepository: str
dockerImageTag: str
documentationUrl: str
icon: Optional[str] = None
iconUrl: Optional[str] = None
sourceType: Optional[Literal["api", "file", "database", "custom"]] = None
spec: Dict[str, Any]
tombstone: Optional[bool] = Field(
False,
description="if false, the configuration is active. if true, then this configuration is permanently off.",
)
public: Optional[bool] = Field(
False,
description="true if this connector definition is available to all workspaces",
)
custom: Optional[bool] = Field(
False, description="whether this is a custom connector definition"
)
releaseStage: Optional[ReleaseStage] = None
supportLevel: Optional[SupportLevel] = None
releaseDate: Optional[date] = Field(
None,
description="The date when this connector was first released, in yyyy-mm-dd format.",
)
resourceRequirements: Optional[ActorDefinitionResourceRequirements] = None
protocolVersion: Optional[str] = Field(
None, description="the Airbyte Protocol version supported by the connector"
)
allowedHosts: Optional[AllowedHosts] = None
suggestedStreams: Optional[SuggestedStreams] = None
maxSecondsBetweenMessages: Optional[int] = Field(
None,
description="Number of seconds allowed between 2 airbyte protocol messages. The source will timeout if this delay is reach",
)
erdUrl: Optional[str] = Field(
None, description="The URL where you can visualize the ERD"
)
releases: Optional[ConnectorRegistryReleases] = None
ab_internal: Optional[AirbyteInternal] = None
generated: Optional[GeneratedFields] = None
packageInfo: Optional[ConnectorPackageInfo] = None
language: Optional[str] = Field(
None, description="The language the connector is written in"
)
supportsFileTransfer: Optional[bool] = False
supportsDataActivation: Optional[bool] = False
ConnectorRegistryV0.update_forward_refs()
ConnectorRegistryDestinationDefinition.update_forward_refs()
ConnectorRegistryReleases.update_forward_refs()
ConnectorReleaseCandidates.update_forward_refs()
VersionReleaseCandidate.update_forward_refs()
| ConnectorRegistrySourceDefinition |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 27019,
"end": 63182
} | class ____:
"""Wrap *iterable* and return an object that buckets the iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head.copy(), chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def interleave_evenly(iterables, lengths=None):
"""
Interleave multiple iterables so that their elements are evenly distributed
throughout the output sequence.
>>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
>>> list(interleave_evenly(iterables))
[1, 2, 'a', 3, 4, 'b', 5]
>>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
>>> list(interleave_evenly(iterables))
[1, 6, 4, 2, 7, 3, 8, 5]
This function requires iterables of known length. Iterables without
``__len__()`` can be used by manually specifying lengths with *lengths*:
>>> from itertools import combinations, repeat
>>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
>>> lengths = [4 * (4 - 1) // 2, 3]
>>> list(interleave_evenly(iterables, lengths=lengths))
[(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
Based on Bresenham's algorithm.
"""
if lengths is None:
try:
lengths = [len(it) for it in iterables]
except TypeError:
raise ValueError(
'Iterable lengths could not be determined automatically. '
'Specify them with the lengths keyword.'
)
elif len(iterables) != len(lengths):
raise ValueError('Mismatching number of iterables and lengths.')
dims = len(lengths)
# sort iterables by length, descending
lengths_permute = sorted(
range(dims), key=lambda i: lengths[i], reverse=True
)
lengths_desc = [lengths[i] for i in lengths_permute]
iters_desc = [iter(iterables[i]) for i in lengths_permute]
# the longest iterable is the primary one (Bresenham: the longest
# distance along an axis)
delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
errors = [delta_primary // dims] * len(deltas_secondary)
to_yield = sum(lengths)
while to_yield:
yield next(iter_primary)
to_yield -= 1
# update errors for each secondary iterable
errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
# those iterables for which the error is negative are yielded
# ("diagonal step" in Bresenham)
for i, e_ in enumerate(errors):
if e_ < 0:
yield next(iters_secondary[i])
to_yield -= 1
errors[i] += delta_primary
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
stack = deque()
# Add our first node group, treat the iterable as a single node
stack.appendleft((0, repeat(iterable, 1)))
while stack:
node_group = stack.popleft()
level, nodes = node_group
# Check if beyond max level
if levels is not None and level > levels:
yield from nodes
continue
for node in nodes:
# Check if done iterating
if isinstance(node, (str, bytes)) or (
(base_type is not None) and isinstance(node, base_type)
):
yield node
# Otherwise try to create child nodes
else:
try:
tree = iter(node)
except TypeError:
yield node
else:
# Save our current location
stack.appendleft(node_group)
# Append the new child node
stack.appendleft((level + 1, tree))
# Break to process child node
break
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n, strict=False):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
By the default, the last yielded slice will have fewer than *n* elements
if the length of *seq* is not divisible by *n*:
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
if strict:
def ret():
for _slice in iterator:
if len(_slice) != n:
raise ValueError("seq is not divisible by n.")
yield _slice
return iter(ret())
else:
return iterator
def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
By default, the delimiting items are not included in the output.
To include them, set *keep_separator* to ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if keep_separator:
yield [item]
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
if buf:
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
buf = list(it)
if buf:
yield buf
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*:
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
To create an *iterable* of exactly size *n*, you can truncate with
:func:`islice`.
>>> list(islice(padded([1, 2, 3], '?'), 5))
[1, 2, 3, '?', '?']
>>> list(islice(padded([1, 2, 3, 4, 5, 6, 7, 8], '?'), 5))
[1, 2, 3, 4, 5]
"""
iterable = iter(iterable)
iterable_with_repeat = chain(iterable, repeat(fillvalue))
if n is None:
return iterable_with_repeat
elif n < 1:
raise ValueError('n must be at least 1')
elif next_multiple:
def slice_generator():
for first in iterable:
yield (first,)
yield islice(iterable_with_repeat, n - 1)
# While elements exist produce slices of size n
return chain.from_iterable(slice_generator())
else:
# Ensure the first batch is at least size n then iterate
return chain(islice(iterable_with_repeat, n), iterable)
def repeat_each(iterable, n=2):
"""Repeat each element in *iterable* *n* times.
>>> list(repeat_each('ABC', 3))
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
"""
return chain.from_iterable(map(repeat, iterable, repeat(n)))
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage.
If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length.
>>> it_1 = range(3)
>>> it_2 = iter('abc')
>>> list(zip_equal(it_1, it_2))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> it_1 = range(3)
>>> it_2 = iter('abcd')
>>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
more_itertools.more.UnequalIterablesError: Iterables have different
lengths
"""
if hexversion >= 0x30A00A6:
warnings.warn(
(
'zip_equal will be removed in a future version of '
'more-itertools. Use the builtin zip function with '
'strict=True instead.'
),
DeprecationWarning,
)
return _zip_equal(*iterables)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), key=None, reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
To sort by a function of the elements of the iterable, pass a *key*
function. Its arguments are the elements of the iterables corresponding to
the key list::
>>> names = ('a', 'b', 'c')
>>> lengths = (1, 2, 3)
>>> widths = (5, 2, 1)
>>> def area(length, width):
... return length * width
>>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
[('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
if key is None:
# if there is no key function, the key argument to sorted is an
# itemgetter
key_argument = itemgetter(*key_list)
else:
# if there is a key function, call it with the items at the offsets
# specified by the key function as arguments
key_list = list(key_list)
if len(key_list) == 1:
# if key_list contains a single item, pass the item at that offset
# as the only argument to the key function
key_offset = key_list[0]
key_argument = lambda zipped_items: key(zipped_items[key_offset])
else:
# if key_list contains multiple items, use itemgetter to return a
# tuple of items, which we pass as *args to the key function
get_key_items = itemgetter(*key_list)
key_argument = lambda zipped_items: key(
*get_key_items(zipped_items)
)
return list(
zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning.
If order is not important, see :func:`distribute`, which does not first
pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret
| bucket |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 170963,
"end": 175815
} | class ____(OrganizationDashboardDetailsTestCase):
"""
These tests are intended to cover and eventually replace the existing
OrganizationDashboardFavoriteTest cases.
They are updated as necessary to match the new functionality and
constraints regarding the position maintenance of the dashboard favorites.
"""
features = ["organizations:dashboards-starred-reordering"]
def do_request(self, *args, **kwargs):
with self.feature(self.features):
return super().do_request(*args, **kwargs)
def setUp(self) -> None:
super().setUp()
# Create two additional users
self.user_1 = self.create_user(email="user1@example.com")
self.user_2 = self.create_user(email="user2@example.com")
self.create_member(user=self.user_1, organization=self.organization)
self.create_member(user=self.user_2, organization=self.organization)
# Both users have favorited the dashboard
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=self.user_1.id,
dashboard=self.dashboard,
)
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=self.user_2.id,
dashboard=self.dashboard,
)
def url(self, dashboard_id):
return reverse(
"sentry-api-0-organization-dashboard-favorite",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": dashboard_id,
},
)
# PUT tests
def test_favorite_dashboard(self) -> None:
assert self.user.id not in self.dashboard.favorited_by
self.login_as(user=self.user)
# Insert an initial starred dashboard for this user
initial_dashboard = Dashboard.objects.create(
title="Other Dashboard",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=self.user.id,
dashboard=initial_dashboard,
)
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": "true"})
assert response.status_code == 204
# Assert that the dashboard is added to the end of the list by its position
assert list(
DashboardFavoriteUser.objects.filter(
organization=self.organization,
user_id=self.user.id,
)
.order_by("position")
.values_list("dashboard_id", flat=True)
) == [
initial_dashboard.id,
self.dashboard.id,
]
def test_unfavorite_dashboard(self) -> None:
assert self.user_1.id in self.dashboard.favorited_by
self.login_as(user=self.user_1)
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": False})
assert response.status_code == 204
assert (
DashboardFavoriteUser.objects.get_favorite_dashboard(
organization=self.organization,
user_id=self.user_1.id,
dashboard=self.dashboard,
)
is None
)
def test_favorite_dashboard_no_dashboard_edit_access(self) -> None:
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=self.dashboard)
self.login_as(user=self.user_2)
dashboard_detail_url = reverse(
"sentry-api-0-organization-dashboard-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": self.dashboard.id,
},
)
response = self.do_request("put", dashboard_detail_url, data={"title": "New Dashboard 9"})
# assert user cannot edit dashboard
assert response.status_code == 403
# assert if user can edit the favorite status of the dashboard
assert (
DashboardFavoriteUser.objects.get_favorite_dashboard(
organization=self.organization,
user_id=self.user_2.id,
dashboard=self.dashboard,
)
is not None
)
response = self.do_request("put", self.url(self.dashboard.id), data={"isFavorited": False})
# The dashboard was successfully unfavorited
assert response.status_code == 204
assert (
DashboardFavoriteUser.objects.get_favorite_dashboard(
organization=self.organization,
user_id=self.user_2.id,
dashboard=self.dashboard,
)
is None
)
| OrganizationDashboardFavoriteReorderingTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 85082,
"end": 85790
} | class ____(sgqlc.types.Enum):
"""A repository interaction limit.
Enumeration Choices:
* `COLLABORATORS_ONLY`: Users that are not collaborators will not
be able to interact with the repository.
* `CONTRIBUTORS_ONLY`: Users that have not previously committed to
a repository’s default branch will be unable to interact with
the repository.
* `EXISTING_USERS`: Users that have recently created their account
will be unable to interact with the repository.
* `NO_LIMIT`: No interaction limits are enabled.
"""
__schema__ = github_schema
__choices__ = ("COLLABORATORS_ONLY", "CONTRIBUTORS_ONLY", "EXISTING_USERS", "NO_LIMIT")
| RepositoryInteractionLimit |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 56559,
"end": 57927
} | class ____(BaseModel):
"""
Plugin serializer.
"""
name: Annotated[str, Field(title="Name")]
macros: Annotated[list[str], Field(title="Macros")]
flask_blueprints: Annotated[list[str], Field(title="Flask Blueprints")]
fastapi_apps: Annotated[list[FastAPIAppResponse], Field(title="Fastapi Apps")]
fastapi_root_middlewares: Annotated[
list[FastAPIRootMiddlewareResponse], Field(title="Fastapi Root Middlewares")
]
external_views: Annotated[
list[ExternalViewResponse],
Field(
description="Aggregate all external views. Both 'external_views' and 'appbuilder_menu_items' are included here.",
title="External Views",
),
]
react_apps: Annotated[list[ReactAppResponse], Field(title="React Apps")]
appbuilder_views: Annotated[list[AppBuilderViewResponse], Field(title="Appbuilder Views")]
appbuilder_menu_items: Annotated[list[AppBuilderMenuItemResponse], Field(title="Appbuilder Menu Items")]
global_operator_extra_links: Annotated[list[str], Field(title="Global Operator Extra Links")]
operator_extra_links: Annotated[list[str], Field(title="Operator Extra Links")]
source: Annotated[str, Field(title="Source")]
listeners: Annotated[list[str], Field(title="Listeners")]
timetables: Annotated[list[str], Field(title="Timetables")]
| PluginResponse |
python | nryoung__algorithms | tests/test_searching.py | {
"start": 243,
"end": 1229
} | class ____(unittest.TestCase):
"""
Tests Binary Search on a small range from 0-9
"""
def test_binarysearch(self):
self.seq = range(10)
rv1 = binary_search.search(self.seq, 0)
rv2 = binary_search.search(self.seq, 9)
rv3 = binary_search.search(self.seq, -1)
rv4 = binary_search.search(self.seq, 10)
rv5 = binary_search.search(self.seq, 4)
self.assertIs(rv1, 0)
self.assertIs(rv2, 9)
self.assertFalse(rv3)
self.assertFalse(rv4)
self.assertIs(rv5, 4)
self.seq = range(9)
rv1 = binary_search.search(self.seq, 0)
rv2 = binary_search.search(self.seq, 8)
rv3 = binary_search.search(self.seq, -1)
rv4 = binary_search.search(self.seq, 10)
rv5 = binary_search.search(self.seq, 4)
self.assertIs(rv1, 0)
self.assertIs(rv2, 8)
self.assertFalse(rv3)
self.assertFalse(rv4)
self.assertIs(rv5, 4)
| TestBinarySearch |
python | walkccc__LeetCode | solutions/268. Missing Number/268.py | {
"start": 0,
"end": 160
} | class ____:
def missingNumber(self, nums: list[int]) -> int:
ans = len(nums)
for i, num in enumerate(nums):
ans ^= i ^ num
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/admin.py | {
"start": 13313,
"end": 13407
} | class ____(admin.TabularInline):
model = HTTPHeader
@admin.register(Domain)
| HTTPHeaderInline |
python | python-openxml__python-docx | tests/styles/test_style.py | {
"start": 14029,
"end": 16990
} | class ____:
def it_knows_which_style_it_is_based_on(self, base_get_fixture):
style, StyleFactory_, StyleFactory_calls, base_style_ = base_get_fixture
base_style = style.base_style
assert StyleFactory_.call_args_list == StyleFactory_calls
assert base_style == base_style_
def it_can_change_its_base_style(self, base_set_fixture):
style, value, expected_xml = base_set_fixture
style.base_style = value
assert style._element.xml == expected_xml
def it_provides_access_to_its_font(self, font_fixture):
style, Font_, font_ = font_fixture
font = style.font
Font_.assert_called_once_with(style._element)
assert font is font_
# fixture --------------------------------------------------------
@pytest.fixture(
params=[
("w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Foo})", 1, 0),
("w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Bar})", 1, -1),
("w:styles/w:style", 0, -1),
]
)
def base_get_fixture(self, request, StyleFactory_):
styles_cxml, style_idx, base_style_idx = request.param
styles = element(styles_cxml)
style = CharacterStyle(styles[style_idx])
if base_style_idx >= 0:
base_style = styles[base_style_idx]
StyleFactory_calls = [call(base_style)]
expected_value = StyleFactory_.return_value
else:
StyleFactory_calls = []
expected_value = None
return style, StyleFactory_, StyleFactory_calls, expected_value
@pytest.fixture(
params=[
("w:style", "Foo", "w:style/w:basedOn{w:val=Foo}"),
("w:style/w:basedOn{w:val=Foo}", "Bar", "w:style/w:basedOn{w:val=Bar}"),
("w:style/w:basedOn{w:val=Bar}", None, "w:style"),
]
)
def base_set_fixture(self, request, style_):
style_cxml, base_style_id, expected_style_cxml = request.param
style = CharacterStyle(element(style_cxml))
style_.style_id = base_style_id
base_style = style_ if base_style_id is not None else None
expected_xml = xml(expected_style_cxml)
return style, base_style, expected_xml
@pytest.fixture
def font_fixture(self, Font_, font_):
style = CharacterStyle(element("w:style"))
return style, Font_, font_
# fixture components ---------------------------------------------
@pytest.fixture
def Font_(self, request, font_):
return class_mock(request, "docx.styles.style.Font", return_value=font_)
@pytest.fixture
def font_(self, request):
return instance_mock(request, Font)
@pytest.fixture
def style_(self, request):
return instance_mock(request, BaseStyle)
@pytest.fixture
def StyleFactory_(self, request):
return function_mock(request, "docx.styles.style.StyleFactory")
| DescribeCharacterStyle |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/env_vars.py | {
"start": 250,
"end": 380
} | class ____(graphene.Enum):
RESOURCE = "RESOURCE"
class Meta:
name = "EnvVarConsumerType"
| GrapheneEnvVarConsumerType |
python | pytorch__pytorch | torch/testing/_internal/common_nn.py | {
"start": 145556,
"end": 155649
} | class ____(TestBase):
@abstractmethod
def _do_test(self, test_case: Any, module: nn.Module, input: Any) -> Any:
raise NotImplementedError
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.jacobian_input = kwargs.get('jacobian_input', True)
self.should_test_cuda = kwargs.get('test_cuda', True)
self.should_test_pickle = kwargs.get('pickle', True)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.FIXME_no_cuda_gradgrad_comparison = \
kwargs.get('FIXME_no_cuda_gradgrad_comparison', False)
self.precision = kwargs.get('precision', 2e-4)
self.check_forward_only = kwargs.get('check_forward_only', False)
self.default_dtype = kwargs.get('default_dtype')
if self.default_dtype is None:
self.default_dtype = torch.get_default_dtype()
def __call__(self, test_case):
with set_default_dtype(self.default_dtype):
module = self.constructor(*self.constructor_args)
input = self._get_input()
if self.reference_fn is not None:
out = test_case._forward(module, input)
ref_input = deepcopy(input)
ref_module = deepcopy(module)
expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0], ref_module)
test_case.assertEqual(out, expected_out, exact_dtype=False)
if self.check_forward_only:
return
self.test_noncontig(test_case, module, input)
if self.should_test_pickle:
# TODO: do this with in-memory files as soon as torch.save will support it
with tempfile.TemporaryFile() as f:
test_case._forward(module, input)
torch.save(module, f)
f.seek(0)
# weights_only=False as this is legacy code that saves the model
module_copy = torch.load(f, weights_only=False)
test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input))
self._do_test(test_case, module, input)
def noncontiguize(self, obj):
if isinstance(obj, list):
return [self.noncontiguize(o) for o in obj]
elif isinstance(obj, tuple):
return tuple(self.noncontiguize(o) for o in obj)
tensor = obj
ndim = tensor.dim()
# Always making only the last dimension noncontiguous is easy to hide
# bugs because .view(-1) will still work. So try to find a dim with size
# > 1 and make that non-contiguous, i.e., stack + select on the
# dimension directly after that.
dim = ndim
for d in range(ndim):
if tensor.size(d) > 1:
dim = d + 1
break
noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach()
assert noncontig.numel() == 1 or noncontig.numel() == 0 or not noncontig.is_contiguous()
noncontig.requires_grad = tensor.requires_grad
return noncontig
def test_noncontig(self, test_case, module, input):
# check no scalars, can't make non-contig
if isinstance(input, torch.Tensor) and input.dim() == 0:
return
if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):
return
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(input)
with freeze_rng_state():
output = test_case._forward(module, input)
if getattr(module, "return_indices", False):
output = output[0]
grad_output = output.new(output.shape).normal_()
output = output.clone()
d_input = deepcopy(test_case._backward(module, input, output, grad_output))
d_param = deepcopy(test_case._get_parameters(module)[1])
nc_input = self.noncontiguize(input)
nc_grad_output = self.noncontiguize(grad_output)
for contig_i, contig_g in product((True, False), repeat=2):
i = input if contig_i else nc_input
# Some ops, e.g., nn.Flatten, return gradient that shares
# storage with the grad_output. Hence we copy here.
go = deepcopy(grad_output if contig_g else nc_grad_output)
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(i)
with freeze_rng_state():
out = test_case._forward(module, i)
if getattr(module, "return_indices", False):
out = out[0]
grad = test_case._backward(module, i, out, go)
test_case.assertEqual(out, output)
test_case.assertEqual(grad, d_input, atol=1e-4, rtol=0)
test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
def test_cuda(self, test_case):
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
with set_default_dtype(self.default_dtype):
cpu_input = self._get_input()
type_map = {torch.double: torch.float}
cpu_input_tuple = cpu_input if isinstance(cpu_input, tuple) else (cpu_input,)
is_any_input_complex = any(isinstance(t, torch.Tensor) and t.dtype.is_complex for t in cpu_input_tuple)
gpu_input_tuple = to_gpu(cpu_input_tuple, type_map=type_map)
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args).float().cuda()
cpu_param = test_case._get_parameters(cpu_module)
gpu_param = test_case._get_parameters(gpu_module)
for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0], strict=True):
gpu_p.data.copy_(cpu_p)
test_case._zero_grad_input(cpu_input_tuple)
test_case._zero_grad_input(gpu_input_tuple)
test_case._zero_grad_parameters(cpu_module)
test_case._zero_grad_parameters(gpu_module)
cpu_output = test_case._forward(cpu_module, cpu_input_tuple)
gpu_output = test_case._forward(gpu_module, gpu_input_tuple)
if getattr(cpu_module, "return_indices", False):
cpu_output = cpu_output[0]
gpu_output = gpu_output[0]
test_case.assertEqual(cpu_output, gpu_output, atol=self.precision, rtol=0, exact_dtype=False)
# Run backwards on CPU and GPU and compare results
for _ in range(5):
cpu_gradOutput = cpu_output.clone().normal_()
gpu_gradOutput = cpu_gradOutput.type_as(gpu_output)
cpu_gradInput = test_case._backward(cpu_module, cpu_input_tuple, cpu_output, cpu_gradOutput)
gpu_gradInput = test_case._backward(gpu_module, gpu_input_tuple, gpu_output, gpu_gradOutput)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False)
for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1], strict=True):
test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0)
# Run double-backwards on CPU and GPU and compare results
if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison:
cpu_output = cpu_module(*cpu_input_tuple)
gpu_output = gpu_module(*gpu_input_tuple)
if getattr(cpu_module, "return_indices", False):
cpu_output = cpu_output[0]
gpu_output = gpu_output[0]
cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True)
gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach()
gpu_gradOutput.requires_grad = True
cpu_gradInputs = torch.autograd.grad(
cpu_output,
cpu_input_tuple + tuple(cpu_module.parameters()),
cpu_gradOutput,
create_graph=True)
gpu_gradInputs = torch.autograd.grad(
gpu_output,
gpu_input_tuple + tuple(gpu_module.parameters()),
gpu_gradOutput,
create_graph=True)
for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs, strict=True):
test_case.assertEqual(cpu_d_i, gpu_d_i, atol=self.precision, rtol=0, exact_dtype=False)
# We mix output into the second backwards computation so that
# torch.autograd.grad doesn't complain that some inputs
# are unreachable (which can happen if you differentiate
# only on the gradient.
if is_any_input_complex:
outputs_cpu = cpu_output.sum().abs() + sum(x.sum().abs() for x in cpu_gradInputs)
outputs_gpu = gpu_output.sum().abs() + sum(x.sum().abs() for x in gpu_gradInputs)
else:
outputs_cpu = cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs)
outputs_gpu = gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs)
cpu_gg = torch.autograd.grad(
outputs_cpu,
cpu_input_tuple + (cpu_gradOutput,) + tuple(cpu_module.parameters()),
retain_graph=True)
gpu_gg = torch.autograd.grad(
outputs_gpu,
gpu_input_tuple + (gpu_gradOutput,) + tuple(gpu_module.parameters()),
retain_graph=True)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False)
for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg, strict=True):
test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0, exact_dtype=False)
self.test_noncontig(test_case, gpu_module, gpu_input_tuple)
| ModuleTest |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 10255,
"end": 13142
} | class ____:
def test_default(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.platforms == environment.platforms == []
def test_not_array(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"platforms": 9000}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.platforms` must be an array"):
_ = environment.platforms
def test_entry_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"platforms": [9000]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError, match="Platform #1 of field `tool.hatch.envs.default.platforms` must be a string"
):
_ = environment.platforms
def test_correct(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"platforms": ["macOS"]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.platforms == ["macos"]
| TestPlatforms |
python | sphinx-doc__sphinx | tests/roots/test-inheritance/dummy/test.py | {
"start": 254,
"end": 275
} | class ____(C):
pass
| F |
python | allegroai__clearml | examples/frameworks/fire/fire_object_cmd.py | {
"start": 127,
"end": 408
} | class ____(object):
def add(self, x, y):
return x + y
def multiply(self, x, y):
return x * y
if __name__ == "__main__":
Task.init(project_name="examples", task_name="Fire object command")
calculator = Calculator()
fire.Fire(calculator)
| Calculator |
python | PyCQA__pylint | tests/functional/s/statement_without_effect.py | {
"start": 904,
"end": 1976
} | class ____:
""" test attribute docstrings. """
class ClassLevelException(Exception):
"""Exception defined for access as a class attribute."""
good_attribute_docstring = 24
""" class level attribute docstring is fine either. """
second_good_attribute_docstring = 42
# Comments are good.
# empty lines are good, too.
""" Still a valid class level attribute docstring. """
def __init__(self):
self.attr = 42
""" Good attribute docstring """
attr = 24
""" Still a good __init__ level attribute docstring. """
val = 0
for val in range(42):
val += attr
# +1:[pointless-string-statement]
""" Invalid attribute docstring """
self.val = val
def test(self):
""" invalid attribute docstrings here. """
self.val = 42
# +1:[pointless-string-statement]
""" this is an invalid attribute docstring. """
def ellipsis():
"""Test that an Ellipsis as a body does not trigger the error"""
...
| ClassLevelAttributeTest |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22294,
"end": 22429
} | class ____(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.LifoQueue
super().setUp()
| LifoQueueTest |
python | lepture__authlib | authlib/integrations/django_oauth2/requests.py | {
"start": 957,
"end": 1422
} | class ____(OAuth2Request):
def __init__(self, request: HttpRequest):
super().__init__(
method=request.method,
uri=request.build_absolute_uri(),
headers=request.headers,
)
self.payload = DjangoOAuth2Payload(request)
self._request = request
@property
def args(self):
return self._request.GET
@property
def form(self):
return self._request.POST
| DjangoOAuth2Request |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 110077,
"end": 117092
} | class ____:
# https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
def test_1D_x_t(self):
k = 1
t = np.arange(12).reshape(2, 6)
x = np.arange(12)
with pytest.raises(ValueError, match="1D sequence"):
_b.fpcheck(x, t, k)
with pytest.raises(ValueError, match="1D sequence"):
_b.fpcheck(t, x, k)
def test_condition_1(self):
# c 1) k+1 <= n-k-1 <= m
k = 3
n = 2*(k + 1) - 1 # not OK
m = n + 11 # OK
t = np.arange(n)
x = np.arange(m)
assert dfitpack.fpchec(x, t, k) == 10
with pytest.raises(ValueError, match="Need k+1*"):
_b.fpcheck(x, t, k)
n = 2*(k+1) + 1 # OK
m = n - k - 2 # not OK
t = np.arange(n)
x = np.arange(m)
assert dfitpack.fpchec(x, t, k) == 10
with pytest.raises(ValueError, match="Need k+1*"):
_b.fpcheck(x, t, k)
def test_condition_2(self):
# c 2) t(1) <= t(2) <= ... <= t(k+1)
# c t(n-k) <= t(n-k+1) <= ... <= t(n)
k = 3
t = [0]*(k+1) + [2] + [5]*(k+1) # this is OK
x = [1, 2, 3, 4, 4.5]
assert dfitpack.fpchec(x, t, k) == 0
assert _b.fpcheck(x, t, k) is None # does not raise
tt = t.copy()
tt[-1] = tt[0] # not OK
assert dfitpack.fpchec(x, tt, k) == 20
with pytest.raises(ValueError, match="Last k knots*"):
_b.fpcheck(x, tt, k)
tt = t.copy()
tt[0] = tt[-1] # not OK
assert dfitpack.fpchec(x, tt, k) == 20
with pytest.raises(ValueError, match="First k knots*"):
_b.fpcheck(x, tt, k)
def test_condition_3(self):
# c 3) t(k+1) < t(k+2) < ... < t(n-k)
k = 3
t = [0]*(k+1) + [2, 3] + [5]*(k+1) # this is OK
x = [1, 2, 3, 3.5, 4, 4.5]
assert dfitpack.fpchec(x, t, k) == 0
assert _b.fpcheck(x, t, k) is None
t = [0]*(k+1) + [2, 2] + [5]*(k+1) # this is not OK
assert dfitpack.fpchec(x, t, k) == 30
with pytest.raises(ValueError, match="Internal knots*"):
_b.fpcheck(x, t, k)
def test_condition_4(self):
# c 4) t(k+1) <= x(i) <= t(n-k)
# NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
k = 3
t = [0]*(k+1) + [5]*(k+1)
x = [1, 2, 3, 3.5, 4, 4.5] # this is OK
assert dfitpack.fpchec(x, t, k) == 0
assert _b.fpcheck(x, t, k) is None
xx = x.copy()
xx[0] = t[0] # still OK
assert dfitpack.fpchec(xx, t, k) == 0
assert _b.fpcheck(x, t, k) is None
xx = x.copy()
xx[0] = t[0] - 1 # not OK
assert dfitpack.fpchec(xx, t, k) == 40
with pytest.raises(ValueError, match="Out of bounds*"):
_b.fpcheck(xx, t, k)
xx = x.copy()
xx[-1] = t[-1] + 1 # not OK
assert dfitpack.fpchec(xx, t, k) == 40
with pytest.raises(ValueError, match="Out of bounds*"):
_b.fpcheck(xx, t, k)
# ### Test the S-W condition (no 5)
# c 5) the conditions specified by schoenberg and whitney must hold
# c for at least one subset of data points, i.e. there must be a
# c subset of data points y(j) such that
# c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
def test_condition_5_x1xm(self):
# x(1).ge.t(k2) .or. x(m).le.t(nk1)
k = 1
t = [0, 0, 1, 2, 2]
x = [1.1, 1.1, 1.1]
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
x = [0.5, 0.5, 0.5]
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
def test_condition_5_k1(self):
# special case nk3 (== n - k - 2) < 2
k = 1
t = [0, 0, 1, 1]
x = [0.5, 0.6]
assert dfitpack.fpchec(x, t, k) == 0
assert _b.fpcheck(x, t, k) is None
def test_condition_5_1(self):
# basically, there can't be an interval of t[j]..t[j+k+1] with no x
k = 3
t = [0]*(k+1) + [2] + [5]*(k+1)
x = [3]*5
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
t = [0]*(k+1) + [2] + [5]*(k+1)
x = [1]*5
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
def test_condition_5_2(self):
# same as _5_1, only the empty interval is in the middle
k = 3
t = [0]*(k+1) + [2, 3] + [5]*(k+1)
x = [1.1]*5 + [4]
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
# and this one is OK
x = [1.1]*4 + [4, 4]
assert dfitpack.fpchec(x, t, k) == 0
assert _b.fpcheck(x, t, k) is None
def test_condition_5_3(self):
# similar to _5_2, covers a different failure branch
k = 1
t = [0, 0, 2, 3, 4, 5, 6, 7, 7]
x = [1, 1, 1, 5.2, 5.2, 5.2, 6.5]
assert dfitpack.fpchec(x, t, k) == 50
with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
_b.fpcheck(x, t, k)
# ### python replicas of generate_knots(...) implementation details, for testing.
# ### see TestGenerateKnots::test_split_and_add_knot
def _split(x, t, k, residuals):
"""Split the knot interval into "runs".
"""
ix = np.searchsorted(x, t[k:-k])
# sum half-open intervals
fparts = [residuals[ix[i]:ix[i+1]].sum() for i in range(len(ix)-1)]
carries = residuals[ix[1:-1]]
for i in range(len(carries)): # split residuals at internal knots
carry = carries[i] / 2
fparts[i] += carry
fparts[i+1] -= carry
fparts[-1] += residuals[-1] # add the contribution of the last knot
xp_assert_close(sum(fparts), sum(residuals), atol=1e-15)
return fparts, ix
def _add_knot(x, t, k, residuals):
"""Insert a new knot given reduals."""
fparts, ix = _split(x, t, k, residuals)
# find the interval with max fparts and non-zero number of x values inside
idx_max = -101
fpart_max = -1e100
for i in range(len(fparts)):
if ix[i+1] - ix[i] > 1 and fparts[i] > fpart_max:
idx_max = i
fpart_max = fparts[i]
if idx_max == -101:
raise ValueError("Internal error, please report it to SciPy developers.")
# round up, like Dierckx does? This is really arbitrary though.
idx_newknot = (ix[idx_max] + ix[idx_max+1] + 1) // 2
new_knot = x[idx_newknot]
idx_t = np.searchsorted(t, new_knot)
t_new = np.r_[t[:idx_t], new_knot, t[idx_t:]]
return t_new
@make_xp_test_case(generate_knots)
| TestFpchec |
python | python-attrs__attrs | tests/test_slots.py | {
"start": 25583,
"end": 27231
} | class ____:
x = attr.ib()
b = attr.ib()
c = attr.ib()
def test_slots_unpickle_after_attr_removed():
"""
We don't assign attributes we don't have anymore if the class has
removed it.
"""
a = A(1, 2, 3)
a_pickled = pickle.dumps(a)
a_unpickled = pickle.loads(a_pickled)
assert a_unpickled == a
@attr.s(slots=True)
class NEW_A:
x = attr.ib()
c = attr.ib()
with mock.patch(f"{__name__}.A", NEW_A):
new_a = pickle.loads(a_pickled)
assert new_a.x == 1
assert new_a.c == 3
assert not hasattr(new_a, "b")
def test_slots_unpickle_after_attr_added(frozen):
"""
We don't assign attribute we haven't had before if the class has one added.
"""
a = A(1, 2, 3)
a_pickled = pickle.dumps(a)
a_unpickled = pickle.loads(a_pickled)
assert a_unpickled == a
@attr.s(slots=True, frozen=frozen)
class NEW_A:
x = attr.ib()
b = attr.ib()
d = attr.ib()
c = attr.ib()
with mock.patch(f"{__name__}.A", NEW_A):
new_a = pickle.loads(a_pickled)
assert new_a.x == 1
assert new_a.b == 2
assert new_a.c == 3
assert not hasattr(new_a, "d")
def test_slots_unpickle_is_backward_compatible(frozen):
"""
Ensure object pickled before v22.2.0 can still be unpickled.
"""
a = A(1, 2, 3)
a_pickled = (
b"\x80\x04\x95&\x00\x00\x00\x00\x00\x00\x00\x8c\x10"
+ a.__module__.encode()
+ b"\x94\x8c\x01A\x94\x93\x94)\x81\x94K\x01K\x02K\x03\x87\x94b."
)
a_unpickled = pickle.loads(a_pickled)
assert a_unpickled == a
| A |
python | skorch-dev__skorch | skorch/callbacks/scoring.py | {
"start": 16966,
"end": 19363
} | class ____(Callback):
"""Creates scores on epoch level based on batch level scores
This callback doesn't calculate any new scores but instead passes
through a score that was created on the batch level. Based on that
score, an average across the batch is created (honoring the batch
size) and recorded in the history for the given epoch.
Use this callback when there already is a score calculated on the
batch level. If that score has yet to be calculated, use
:class:`.BatchScoring` instead.
Parameters
----------
name : str
Name of the score recorded on a batch level in the history.
lower_is_better : bool (default=True)
Whether lower (e.g. log loss) or higher (e.g. accuracy) scores
are better.
on_train : bool (default=False)
Whether this should be called during train or validation.
"""
def __init__(
self,
name,
lower_is_better=True,
on_train=False,
):
self.name = name
self.lower_is_better = lower_is_better
self.on_train = on_train
def initialize(self):
self.best_score_ = np.inf if self.lower_is_better else -np.inf
return self
def _is_best_score(self, current_score):
if self.lower_is_better is None:
return None
if self.lower_is_better:
return current_score < self.best_score_
return current_score > self.best_score_
def get_avg_score(self, history):
if self.on_train:
bs_key = 'train_batch_size'
else:
bs_key = 'valid_batch_size'
weights, scores = list(zip(
*history[-1, 'batches', :, [bs_key, self.name]]))
score_avg = np.average(scores, weights=weights)
return score_avg
# pylint: disable=unused-argument,arguments-differ
def on_epoch_end(self, net, **kwargs):
history = net.history
try: # don't raise if there is no valid data
history[-1, 'batches', :, self.name]
except KeyError:
return
score_avg = self.get_avg_score(history)
is_best = self._is_best_score(score_avg)
if is_best:
self.best_score_ = score_avg
history.record(self.name, score_avg)
if is_best is not None:
history.record(self.name + '_best', bool(is_best))
| PassthroughScoring |
python | crytic__slither | slither/detectors/variables/unused_state_variables.py | {
"start": 2392,
"end": 3621
} | class ____(AbstractDetector):
"""
Unused state variables detector
"""
ARGUMENT = "unused-state"
HELP = "Unused state variables"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unused-state-variable"
WIKI_TITLE = "Unused state variable"
WIKI_DESCRIPTION = "Unused state variable."
WIKI_EXPLOIT_SCENARIO = ""
WIKI_RECOMMENDATION = "Remove unused state variables."
def _detect(self) -> List[Output]:
"""Detect unused state variables"""
results = []
for c in self.compilation_unit.contracts_derived:
if c.is_signature_only() or c.is_library:
continue
unusedVars = detect_unused(c)
if unusedVars:
for var in unusedVars:
info: DETECTOR_INFO = [var, " is never used in ", c, "\n"]
json = self.generate_result(info)
results.append(json)
return results
@staticmethod
def _format(compilation_unit: SlitherCompilationUnit, result: Dict) -> None:
custom_format(compilation_unit, result)
| UnusedStateVars |
python | mlflow__mlflow | dev/clint/tests/rules/test_no_class_based_tests.py | {
"start": 1551,
"end": 1789
} | class ____:
def test_feature_a(self):
assert True
"""
config = Config(select={NoClassBasedTests.name})
violations = lint_file(Path("regular_file.py"), code, config, index_path)
assert len(violations) == 0
| TestSomething |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox16.py | {
"start": 315,
"end": 897
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox16.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"align": {"vertical": "middle"}}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | coleifer__peewee | tests/regressions.py | {
"start": 62195,
"end": 62271
} | class ____(TestModel):
ps = ForeignKeyField(PS)
s = ForeignKeyField(S)
| O |
python | kamyu104__LeetCode-Solutions | Python/fancy-sequence.py | {
"start": 1049,
"end": 1892
} | class ____(object):
def __init__(self):
self.__arr = []
self.__op = [1, 0]
def append(self, val):
"""
:type val: int
:rtype: None
"""
self.__arr.append((val-self.__op[1])*pow(self.__op[0], MOD-2, MOD)%MOD) # O(logMOD), we treat it as O(1) here
def addAll(self, inc):
"""
:type inc: int
:rtype: None
"""
self.__op[1] = (self.__op[1]+inc) % MOD
def multAll(self, m):
"""
:type m: int
:rtype: None
"""
self.__op = [(self.__op[0]*m) % MOD, (self.__op[1]*m) % MOD]
def getIndex(self, idx):
"""
:type idx: int
:rtype: int
"""
if idx >= len(self.__arr):
return -1
a, b = self.__op
return (self.__arr[idx]*a + b) % MOD
| Fancy2 |
python | scikit-image__scikit-image | tests/skimage/filters/rank/test_rank.py | {
"start": 2739,
"end": 38992
} | class ____:
def setup_method(self):
np.random.seed(0)
# This image is used along with @run_in_parallel
# to ensure that the same seed is used for each thread.
self.image = np.random.rand(25, 25)
np.random.seed(0)
self.volume = np.random.rand(10, 10, 10)
# Set again the seed for the other tests.
np.random.seed(0)
self.footprint = morphology.disk(1)
self.footprint_3d = morphology.ball(1)
self.refs = ref_data
self.refs_3d = ref_data_3d
@pytest.mark.parametrize('outdt', [None, np.float32, np.float64])
@pytest.mark.parametrize('filter', all_rank_filters)
def test_rank_filter(self, filter, outdt):
@run_in_parallel(warnings_matching=['Possible precision loss'])
def check():
expected = self.refs[filter]
if outdt is not None:
out = np.zeros_like(expected, dtype=outdt)
else:
out = None
result = getattr(rank, filter)(self.image, self.footprint, out=out)
if filter == "entropy":
# There may be some arch dependent rounding errors
# See the discussions in
# https://github.com/scikit-image/scikit-image/issues/3091
# https://github.com/scikit-image/scikit-image/issues/2528
if outdt is not None:
# Adjust expected precision
expected = expected.astype(outdt)
assert_allclose(expected, result, atol=0, rtol=1e-15)
elif filter == "otsu":
# OTSU May also have some optimization dependent failures
# See the discussions in
# https://github.com/scikit-image/scikit-image/issues/3091
# Pixel 3, 5 was found to be problematic. It can take either
# a value of 41 or 81 depending on the specific optimizations
# used.
assert result[3, 5] in [41, 81]
result[3, 5] = 81
# Pixel [19, 18] is also found to be problematic for the same
# reason.
assert result[19, 18] in [141, 172]
result[19, 18] = 172
assert_array_almost_equal(expected, result)
else:
if outdt is not None:
# Avoid rounding issues comparing to expected result.
# Take modulus first to avoid undefined behavior for
# float->uint8 conversions.
result = np.mod(result, 256.0).astype(expected.dtype)
assert_array_almost_equal(expected, result)
check()
@pytest.mark.parametrize('filter', all_rank_filters)
def test_rank_filter_footprint_sequence_unsupported(self, filter):
footprint_sequence = morphology.diamond(3, decomposition="sequence")
with pytest.raises(ValueError):
getattr(rank, filter)(self.image.astype(np.uint8), footprint_sequence)
@pytest.mark.parametrize('outdt', [None, np.float32, np.float64])
@pytest.mark.parametrize('filter', _3d_rank_filters)
def test_rank_filters_3D(self, filter, outdt):
@run_in_parallel(warnings_matching=['Possible precision loss'])
def check():
expected = self.refs_3d[filter]
if outdt is not None:
out = np.zeros_like(expected, dtype=outdt)
else:
out = None
result = getattr(rank, filter)(self.volume, self.footprint_3d, out=out)
if outdt is not None:
# Avoid rounding issues comparing to expected result
if filter == 'sum':
# sum test data seems to be 8-bit disguised as 16-bit
datadt = np.uint8
else:
datadt = expected.dtype
# Take modulus first to avoid undefined behavior for
# float->uint8 conversions.
result = np.mod(result, 256.0).astype(datadt)
assert_array_almost_equal(expected, result)
check()
def test_random_sizes(self):
# make sure the size is not a problem
elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
for m, n in np.random.randint(1, 101, size=(10, 2)):
mask = np.ones((m, n), dtype=np.uint8)
image8 = np.ones((m, n), dtype=np.uint8)
out8 = np.empty_like(image8)
rank.mean(
image=image8, footprint=elem, mask=mask, out=out8, shift_x=0, shift_y=0
)
assert_equal(image8.shape, out8.shape)
rank.mean(
image=image8,
footprint=elem,
mask=mask,
out=out8,
shift_x=+1,
shift_y=+1,
)
assert_equal(image8.shape, out8.shape)
rank.geometric_mean(
image=image8, footprint=elem, mask=mask, out=out8, shift_x=0, shift_y=0
)
assert_equal(image8.shape, out8.shape)
rank.geometric_mean(
image=image8,
footprint=elem,
mask=mask,
out=out8,
shift_x=+1,
shift_y=+1,
)
assert_equal(image8.shape, out8.shape)
image16 = np.ones((m, n), dtype=np.uint16)
out16 = np.empty_like(image8, dtype=np.uint16)
rank.mean(
image=image16,
footprint=elem,
mask=mask,
out=out16,
shift_x=0,
shift_y=0,
)
assert_equal(image16.shape, out16.shape)
rank.mean(
image=image16,
footprint=elem,
mask=mask,
out=out16,
shift_x=+1,
shift_y=+1,
)
assert_equal(image16.shape, out16.shape)
rank.geometric_mean(
image=image16,
footprint=elem,
mask=mask,
out=out16,
shift_x=0,
shift_y=0,
)
assert_equal(image16.shape, out16.shape)
rank.geometric_mean(
image=image16,
footprint=elem,
mask=mask,
out=out16,
shift_x=+1,
shift_y=+1,
)
assert_equal(image16.shape, out16.shape)
rank.mean_percentile(
image=image16,
mask=mask,
out=out16,
footprint=elem,
shift_x=0,
shift_y=0,
p0=0.1,
p1=0.9,
)
assert_equal(image16.shape, out16.shape)
rank.mean_percentile(
image=image16,
mask=mask,
out=out16,
footprint=elem,
shift_x=+1,
shift_y=+1,
p0=0.1,
p1=0.9,
)
assert_equal(image16.shape, out16.shape)
def test_compare_with_gray_dilation(self):
# compare the result of maximum filter with dilate
image = (np.random.rand(100, 100) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(3, 20, 2):
elem = np.ones((r, r), dtype=np.uint8)
rank.maximum(image=image, footprint=elem, out=out, mask=mask)
cm = gray.dilation(image, elem)
assert_equal(out, cm)
def test_compare_with_gray_erosion(self):
# compare the result of maximum filter with erode
image = (np.random.rand(100, 100) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(3, 20, 2):
elem = np.ones((r, r), dtype=np.uint8)
rank.minimum(image=image, footprint=elem, out=out, mask=mask)
cm = gray.erosion(image, elem)
assert_equal(out, cm)
def test_bitdepth(self):
# test the different bit depth for rank16
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty((100, 100), dtype=np.uint16)
mask = np.ones((100, 100), dtype=np.uint8)
for i in range(8, 13):
max_val = 2**i - 1
image = np.full((100, 100), max_val, dtype=np.uint16)
if i > 10:
expected = ["Bad rank filter performance"]
else:
expected = []
with expected_warnings(expected):
rank.mean_percentile(
image=image,
footprint=elem,
mask=mask,
out=out,
shift_x=0,
shift_y=0,
p0=0.1,
p1=0.9,
)
def test_population(self):
# check the number of valid pixels in the neighborhood
image = np.zeros((5, 5), dtype=np.uint8)
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.pop(image=image, footprint=elem, out=out, mask=mask)
r = np.array(
[
[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4],
]
)
assert_equal(r, out)
def test_structuring_element8(self):
# check the output for a custom footprint
r = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 0],
[0, 0, 255, 255, 255, 0],
[0, 0, 0, 255, 255, 0],
[0, 0, 0, 0, 0, 0],
]
)
# 8-bit
image = np.zeros((6, 6), dtype=np.uint8)
image[2, 2] = 255
elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=1, shift_y=1
)
assert_equal(r, out)
# 16-bit
image = np.zeros((6, 6), dtype=np.uint16)
image[2, 2] = 255
out = np.empty_like(image)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=1, shift_y=1
)
assert_equal(r, out)
def test_pass_on_bitdepth(self):
# should pass because data bitdepth is not too high for the function
image = np.full((100, 100), 2**11, dtype=np.uint16)
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
with expected_warnings(["Bad rank filter performance"]):
rank.maximum(image=image, footprint=elem, out=out, mask=mask)
def test_inplace_output(self):
# rank filters are not supposed to filter inplace
footprint = disk(20)
image = (np.random.rand(500, 500) * 256).astype(np.uint8)
out = image
with pytest.raises(NotImplementedError):
rank.mean(image, footprint, out=out)
def test_compare_autolevels(self):
# compare autolevel and percentile autolevel with p0=0.0 and p1=1.0
# should returns the same arrays
image = util.img_as_ubyte(data.camera())
footprint = disk(20)
loc_autolevel = rank.autolevel(image, footprint=footprint)
loc_perc_autolevel = rank.autolevel_percentile(
image, footprint=footprint, p0=0.0, p1=1.0
)
assert_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_autolevels_16bit(self):
# compare autolevel(16-bit) and percentile autolevel(16-bit) with
# p0=0.0 and p1=1.0 should returns the same arrays
image = data.camera().astype(np.uint16) * 4
footprint = disk(20)
loc_autolevel = rank.autolevel(image, footprint=footprint)
loc_perc_autolevel = rank.autolevel_percentile(
image, footprint=footprint, p0=0.0, p1=1.0
)
assert_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_ubyte_vs_float(self):
# Create signed int8 image that and convert it to uint8
image_uint = img_as_ubyte(data.camera()[:50, :50])
image_float = img_as_float(image_uint)
methods = [
'autolevel',
'equalize',
'gradient',
'threshold',
'subtract_mean',
'enhance_contrast',
'pop',
]
for method in methods:
func = getattr(rank, method)
out_u = func(image_uint, disk(3))
with expected_warnings(["Possible precision loss"]):
out_f = func(image_float, disk(3))
assert_equal(out_u, out_f)
def test_compare_ubyte_vs_float_3d(self):
# Create signed int8 volume that and convert it to uint8
np.random.seed(0)
volume_uint = np.random.randint(0, high=256, size=(10, 20, 30), dtype=np.uint8)
volume_float = img_as_float(volume_uint)
methods_3d = [
'equalize',
'otsu',
'autolevel',
'gradient',
'majority',
'maximum',
'mean',
'geometric_mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'sum',
'threshold',
'noise_filter',
'entropy',
]
for method in methods_3d:
func = getattr(rank, method)
out_u = func(volume_uint, ball(3))
with expected_warnings(["Possible precision loss"]):
out_f = func(volume_float, ball(3))
assert_equal(out_u, out_f)
def test_compare_8bit_unsigned_vs_signed(self):
# filters applied on 8-bit image or 16-bit image (having only real 8-bit
# of dynamic) should be identical
# Create signed int8 image that and convert it to uint8
image = img_as_ubyte(data.camera())[::2, ::2]
image[image > 127] = 0
image_s = image.astype(np.int8)
image_u = img_as_ubyte(image_s)
assert_equal(image_u, img_as_ubyte(image_s))
methods = [
'autolevel',
'equalize',
'gradient',
'maximum',
'mean',
'geometric_mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'threshold',
]
for method in methods:
func = getattr(rank, method)
out_u = func(image_u, disk(3))
with expected_warnings(["Possible precision loss"]):
out_s = func(image_s, disk(3))
assert_equal(out_u, out_s)
def test_compare_8bit_unsigned_vs_signed_3d(self):
# filters applied on 8-bit volume or 16-bit volume (having only real 8-bit
# of dynamic) should be identical
# Create signed int8 volume that and convert it to uint8
np.random.seed(0)
volume_s = np.random.randint(0, high=127, size=(10, 20, 30), dtype=np.int8)
volume_u = img_as_ubyte(volume_s)
assert_equal(volume_u, img_as_ubyte(volume_s))
methods_3d = [
'equalize',
'otsu',
'autolevel',
'gradient',
'majority',
'maximum',
'mean',
'geometric_mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'sum',
'threshold',
'noise_filter',
'entropy',
]
for method in methods_3d:
func = getattr(rank, method)
out_u = func(volume_u, ball(3))
with expected_warnings(["Possible precision loss"]):
out_s = func(volume_s, ball(3))
assert_equal(out_u, out_s)
@pytest.mark.parametrize(
'method',
[
'autolevel',
'equalize',
'gradient',
'maximum',
'mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'threshold',
],
)
def test_compare_8bit_vs_16bit(self, method):
# filters applied on 8-bit image or 16-bit image (having only real 8-bit
# of dynamic) should be identical
image8 = util.img_as_ubyte(data.camera())[::2, ::2]
image16 = image8.astype(np.uint16)
assert_equal(image8, image16)
np.random.seed(0)
volume8 = np.random.randint(128, high=256, size=(10, 10, 10), dtype=np.uint8)
volume16 = volume8.astype(np.uint16)
methods_3d = [
'equalize',
'otsu',
'autolevel',
'gradient',
'majority',
'maximum',
'mean',
'geometric_mean',
'subtract_mean',
'median',
'minimum',
'modal',
'enhance_contrast',
'pop',
'sum',
'threshold',
'noise_filter',
'entropy',
]
func = getattr(rank, method)
f8 = func(image8, disk(3))
f16 = func(image16, disk(3))
assert_equal(f8, f16)
if method in methods_3d:
f8 = func(volume8, ball(3))
f16 = func(volume16, ball(3))
assert_equal(f8, f16)
def test_trivial_footprint8(self):
# check that min, max and mean returns identity if footprint
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0)
assert_equal(image, out)
rank.geometric_mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.minimum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
def test_trivial_footprint16(self):
# check that min, max and mean returns identity if footprint
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0)
assert_equal(image, out)
rank.geometric_mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.minimum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
def test_smallest_footprint8(self):
# check that min, max and mean returns identity if footprint
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0)
assert_equal(image, out)
rank.minimum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
def test_smallest_footprint16(self):
# check that min, max and mean returns identity if footprint
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0)
assert_equal(image, out)
rank.geometric_mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.minimum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
def test_empty_footprint(self):
# check that min, max and mean returns zeros if footprint is empty
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
res = np.zeros_like(image)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0)
assert_equal(res, out)
rank.geometric_mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(res, out)
rank.minimum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(res, out)
rank.maximum(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(res, out)
def test_otsu(self):
# test the local Otsu segmentation on a synthetic image
# (left to right ramp * sinus)
test = np.tile(
[
128,
145,
103,
127,
165,
83,
127,
185,
63,
127,
205,
43,
127,
225,
23,
127,
],
(16, 1),
)
test = test.astype(np.uint8)
res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1))
footprint = np.ones((6, 6), dtype=np.uint8)
th = 1 * (test >= rank.otsu(test, footprint))
assert_equal(th, res)
def test_entropy(self):
# verify that entropy is coherent with bitdepth of the input data
footprint = np.ones((16, 16), dtype=np.uint8)
# 1 bit per pixel
data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
assert np.max(rank.entropy(data, footprint)) == 1
# 2 bit per pixel
data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
assert np.max(rank.entropy(data, footprint)) == 2
# 3 bit per pixel
data = np.tile(np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(
np.uint8
)
assert np.max(rank.entropy(data, footprint)) == 3
# 4 bit per pixel
data = np.tile(np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
assert np.max(rank.entropy(data, footprint)) == 4
# 6 bit per pixel
data = np.tile(np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
assert np.max(rank.entropy(data, footprint)) == 6
# 8-bit per pixel
data = np.tile(np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
assert np.max(rank.entropy(data, footprint)) == 8
# 12 bit per pixel
footprint = np.ones((64, 64), dtype=np.uint8)
data = np.zeros((65, 65), dtype=np.uint16)
data[:64, :64] = np.reshape(np.arange(4096), (64, 64))
with expected_warnings(['Bad rank filter performance']):
assert np.max(rank.entropy(data, footprint)) == 12
# make sure output is of dtype double
with expected_warnings(['Bad rank filter performance']):
out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))
assert out.dtype == np.float64
def test_footprint_dtypes(self):
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
for dtype in (
bool,
np.uint8,
np.uint16,
np.int32,
np.int64,
np.float32,
np.float64,
):
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)
rank.mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.geometric_mean(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
rank.mean_percentile(
image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0
)
assert_equal(image, out)
def test_16bit(self):
image = np.zeros((21, 21), dtype=np.uint16)
footprint = np.ones((3, 3), dtype=np.uint8)
for bitdepth in range(17):
value = 2**bitdepth - 1
image[10, 10] = value
if bitdepth >= 11:
expected = ['Bad rank filter performance']
else:
expected = []
with expected_warnings(expected):
assert rank.minimum(image, footprint)[10, 10] == 0
assert rank.maximum(image, footprint)[10, 10] == value
mean_val = rank.mean(image, footprint)[10, 10]
assert mean_val == int(value / footprint.size)
def test_bilateral(self):
image = np.zeros((21, 21), dtype=np.uint16)
footprint = np.ones((3, 3), dtype=np.uint8)
image[10, 10] = 1000
image[10, 11] = 1010
image[10, 9] = 900
kwargs = dict(s0=1, s1=1)
assert rank.mean_bilateral(image, footprint, **kwargs)[10, 10] == 1000
assert rank.pop_bilateral(image, footprint, **kwargs)[10, 10] == 1
kwargs = dict(s0=11, s1=11)
assert rank.mean_bilateral(image, footprint, **kwargs)[10, 10] == 1005
assert rank.pop_bilateral(image, footprint, **kwargs)[10, 10] == 2
def test_percentile_min(self):
# check that percentile p0 = 0 is identical to local min
img = data.camera()
img16 = img.astype(np.uint16)
footprint = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, footprint=footprint, p0=0)
img_min = rank.minimum(img, footprint=footprint)
assert_equal(img_p0, img_min)
# check for 16bit
img_p0 = rank.percentile(img16, footprint=footprint, p0=0)
img_min = rank.minimum(img16, footprint=footprint)
assert_equal(img_p0, img_min)
def test_percentile_max(self):
# check that percentile p0 = 1 is identical to local max
img = data.camera()
img16 = img.astype(np.uint16)
footprint = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, footprint=footprint, p0=1.0)
img_max = rank.maximum(img, footprint=footprint)
assert_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, footprint=footprint, p0=1.0)
img_max = rank.maximum(img16, footprint=footprint)
assert_equal(img_p0, img_max)
def test_percentile_median(self):
# check that percentile p0 = 0.5 is identical to local median
img = data.camera()
img16 = img.astype(np.uint16)
footprint = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, footprint=footprint, p0=0.5)
img_max = rank.median(img, footprint=footprint)
assert_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, footprint=footprint, p0=0.5)
img_max = rank.median(img16, footprint=footprint)
assert_equal(img_p0, img_max)
def test_sum(self):
# check the number of valid pixels in the neighborhood
image8 = np.array(
[
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
image16 = 400 * np.array(
[
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
],
dtype=np.uint16,
)
elem = np.ones((3, 3), dtype=np.uint8)
out8 = np.empty_like(image8)
out16 = np.empty_like(image16)
mask = np.ones(image8.shape, dtype=np.uint8)
r = np.array(
[
[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1],
],
dtype=np.uint8,
)
rank.sum(image=image8, footprint=elem, out=out8, mask=mask)
assert_equal(r, out8)
rank.sum_percentile(
image=image8, footprint=elem, out=out8, mask=mask, p0=0.0, p1=1.0
)
assert_equal(r, out8)
rank.sum_bilateral(
image=image8, footprint=elem, out=out8, mask=mask, s0=255, s1=255
)
assert_equal(r, out8)
r = 400 * np.array(
[
[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1],
],
dtype=np.uint16,
)
rank.sum(image=image16, footprint=elem, out=out16, mask=mask)
assert_equal(r, out16)
rank.sum_percentile(
image=image16, footprint=elem, out=out16, mask=mask, p0=0.0, p1=1.0
)
assert_equal(r, out16)
rank.sum_bilateral(
image=image16, footprint=elem, out=out16, mask=mask, s0=1000, s1=1000
)
assert_equal(r, out16)
def test_windowed_histogram(self):
# check the number of valid pixels in the neighborhood
image8 = np.array(
[
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
elem = np.ones((3, 3), dtype=np.uint8)
outf = np.empty(image8.shape + (2,), dtype=float)
mask = np.ones(image8.shape, dtype=np.uint8)
# Population so we can normalize the expected output while maintaining
# code readability
pop = np.array(
[
[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4],
],
dtype=float,
)
r0 = (
np.array(
[
[3, 4, 3, 4, 3],
[4, 5, 3, 5, 4],
[3, 3, 0, 3, 3],
[4, 5, 3, 5, 4],
[3, 4, 3, 4, 3],
],
dtype=float,
)
/ pop
)
r1 = (
np.array(
[
[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1],
],
dtype=float,
)
/ pop
)
rank.windowed_histogram(image=image8, footprint=elem, out=outf, mask=mask)
assert_equal(r0, outf[:, :, 0])
assert_equal(r1, outf[:, :, 1])
# Test n_bins parameter
larger_output = rank.windowed_histogram(
image=image8, footprint=elem, mask=mask, n_bins=5
)
assert larger_output.shape[2] == 5
def test_median_default_value(self):
a = np.zeros((3, 3), dtype=np.uint8)
a[1] = 1
full_footprint = np.ones((3, 3), dtype=np.uint8)
assert_equal(rank.median(a), rank.median(a, full_footprint))
assert rank.median(a)[1, 1] == 0
assert rank.median(a, disk(1))[1, 1] == 1
def test_majority(self):
img = data.camera()
elem = np.ones((3, 3), dtype=np.uint8)
expected = rank.windowed_histogram(img, elem).argmax(-1).astype(np.uint8)
assert_equal(expected, rank.majority(img, elem))
def test_output_same_dtype(self):
image = (np.random.rand(100, 100) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
elem = np.ones((3, 3), dtype=np.uint8)
rank.maximum(image=image, footprint=elem, out=out, mask=mask)
assert_equal(image.dtype, out.dtype)
def test_input_boolean_dtype(self):
image = (np.random.rand(100, 100) * 256).astype(bool)
elem = np.ones((3, 3), dtype=bool)
with pytest.raises(ValueError):
rank.maximum(image=image, footprint=elem)
@pytest.mark.parametrize("filter", all_rank_filters)
@pytest.mark.parametrize("shift_name", ["shift_x", "shift_y"])
@pytest.mark.parametrize("shift_value", [False, True])
def test_rank_filters_boolean_shift(self, filter, shift_name, shift_value):
"""Test warning if shift is provided as a boolean."""
filter_func = getattr(rank, filter)
image = img_as_ubyte(self.image)
kwargs = {"footprint": self.footprint, shift_name: shift_value}
with pytest.warns() as record:
filter_func(image, **kwargs)
expected_lineno = inspect.currentframe().f_lineno - 1
assert len(record) == 1
assert "will be interpreted as int" in record[0].message.args[0]
assert record[0].filename == __file__
assert record[0].lineno == expected_lineno
@pytest.mark.parametrize("filter", _3d_rank_filters)
@pytest.mark.parametrize("shift_name", ["shift_x", "shift_y", "shift_z"])
@pytest.mark.parametrize("shift_value", [False, True])
def test_rank_filters_3D_boolean_shift(self, filter, shift_name, shift_value):
"""Test warning if shift is provided as a boolean."""
filter_func = getattr(rank, filter)
image = img_as_ubyte(self.volume)
kwargs = {"footprint": self.footprint_3d, shift_name: shift_value}
with pytest.warns() as record:
filter_func(image, **kwargs)
expected_lineno = inspect.currentframe().f_lineno - 1
assert len(record) == 1
assert "will be interpreted as int" in record[0].message.args[0]
assert record[0].filename == __file__
assert record[0].lineno == expected_lineno
| TestRank |
python | django__django | tests/logging_tests/logconfig.py | {
"start": 298,
"end": 398
} | class ____(BaseEmailBackend):
def send_messages(self, email_messages):
pass
| MyEmailBackend |
python | numba__numba | numba/core/postproc.py | {
"start": 98,
"end": 366
} | class ____(object):
def __init__(self, block, inst):
assert isinstance(block, ir.Block)
assert isinstance(inst, ir.Yield)
self.block = block
self.inst = inst
self.live_vars = None
self.weak_live_vars = None
| YieldPoint |
python | langchain-ai__langchain | libs/core/langchain_core/example_selectors/semantic_similarity.py | {
"start": 3243,
"end": 8276
} | class ____(_VectorStoreExampleSelector):
"""Select examples based on semantic similarity."""
def select_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Select examples based on semantic similarity.
Args:
input_variables: The input variables to use for search.
Returns:
The selected examples.
"""
# Get the docs with the highest similarity.
vectorstore_kwargs = self.vectorstore_kwargs or {}
example_docs = self.vectorstore.similarity_search(
self._example_to_text(input_variables, self.input_keys),
k=self.k,
**vectorstore_kwargs,
)
return self._documents_to_examples(example_docs)
async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Asynchronously select examples based on semantic similarity.
Args:
input_variables: The input variables to use for search.
Returns:
The selected examples.
"""
# Get the docs with the highest similarity.
vectorstore_kwargs = self.vectorstore_kwargs or {}
example_docs = await self.vectorstore.asimilarity_search(
self._example_to_text(input_variables, self.input_keys),
k=self.k,
**vectorstore_kwargs,
)
return self._documents_to_examples(example_docs)
@classmethod
def from_examples(
cls,
examples: list[dict],
embeddings: Embeddings,
vectorstore_cls: type[VectorStore],
k: int = 4,
input_keys: list[str] | None = None,
*,
example_keys: list[str] | None = None,
vectorstore_kwargs: dict | None = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
string_examples = [cls._example_to_text(eg, input_keys) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(
vectorstore=vectorstore,
k=k,
input_keys=input_keys,
example_keys=example_keys,
vectorstore_kwargs=vectorstore_kwargs,
)
@classmethod
async def afrom_examples(
cls,
examples: list[dict],
embeddings: Embeddings,
vectorstore_cls: type[VectorStore],
k: int = 4,
input_keys: list[str] | None = None,
*,
example_keys: list[str] | None = None,
vectorstore_kwargs: dict | None = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExampleSelector:
"""Async create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select.
input_keys: If provided, the search is based on the input variables
instead of all variables.
example_keys: If provided, keys to filter examples to.
vectorstore_kwargs: Extra arguments passed to similarity_search function
of the `VectorStore`.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
string_examples = [cls._example_to_text(eg, input_keys) for eg in examples]
vectorstore = await vectorstore_cls.afrom_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(
vectorstore=vectorstore,
k=k,
input_keys=input_keys,
example_keys=example_keys,
vectorstore_kwargs=vectorstore_kwargs,
)
| SemanticSimilarityExampleSelector |
python | pytorch__pytorch | torch/_inductor/compile_worker/subproc_pool.py | {
"start": 2371,
"end": 2710
} | class ____:
"""
Carries exception info from subprocesses across the wire. traceback
objects are not pickleable, so we store the trace as a string and
use it for the message in the exception thrown in the main process.
"""
def __init__(self, details: str) -> None:
self.details = details
| _SubprocExceptionInfo |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/assertions.py | {
"start": 30654,
"end": 31854
} | class ____:
def compare_table_index_with_expected(
self, table: schema.Table, expected: list, dialect_name: str
):
eq_(len(table.indexes), len(expected))
idx_dict = {idx.name: idx for idx in table.indexes}
for exp in expected:
idx = idx_dict[exp["name"]]
eq_(idx.unique, exp["unique"])
cols = [c for c in exp["column_names"] if c is not None]
eq_(len(idx.columns), len(cols))
for c in cols:
is_true(c in idx.columns)
exprs = exp.get("expressions")
if exprs:
eq_(len(idx.expressions), len(exprs))
for idx_exp, expr, col in zip(
idx.expressions, exprs, exp["column_names"]
):
if col is None:
eq_(idx_exp.text, expr)
if (
exp.get("dialect_options")
and f"{dialect_name}_include" in exp["dialect_options"]
):
eq_(
idx.dialect_options[dialect_name]["include"],
exp["dialect_options"][f"{dialect_name}_include"],
)
| ComparesIndexes |
python | huggingface__transformers | tests/models/auto/test_modeling_auto.py | {
"start": 2613,
"end": 25967
} | class ____(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
@slow
def test_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
self.assertEqual(len(loading_info["missing_keys"]), 0)
# When using PyTorch checkpoint, the expected value is `8`. With `safetensors` checkpoint (if it is
# installed), the expected value becomes `7`.
EXPECTED_NUM_OF_UNEXPECTED_KEYS = 7
self.assertEqual(len(loading_info["unexpected_keys"]), EXPECTED_NUM_OF_UNEXPECTED_KEYS)
self.assertEqual(len(loading_info["mismatched_keys"]), 0)
self.assertEqual(len(loading_info["error_msgs"]), 0)
@slow
def test_model_for_pretraining_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
# Only one value should not be initialized and in the missing keys.
for value in loading_info.values():
self.assertEqual(len(value), 0)
@slow
def test_lmhead_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_causal_lm(self):
model_name = "openai-community/gpt2"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = AutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_model_for_masked_lm(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
model_name = "google-t5/t5-base"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model, loading_info = AutoModelForSequenceClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
@slow
def test_table_question_answering_model_from_pretrained(self):
model_name = "google/tapas-base"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, TapasConfig)
model = AutoModelForTableQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TapasForQuestionAnswering)
@slow
def test_token_classification_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForTokenClassification)
@slow
def test_auto_backbone_timm_model_from_pretrained(self):
# Configs can't be loaded for timm models
model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True)
with pytest.raises(ValueError):
# We can't pass output_loading_info=True as we're loading from timm
AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TimmBackbone)
# Check kwargs are correctly passed to the backbone
model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_indices=(-2, -1))
self.assertEqual(model.out_indices, [-2, -1])
# Check out_features cannot be passed to Timm backbones
with self.assertRaises(ValueError):
_ = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_features=["stage1"])
@slow
def test_auto_backbone_from_pretrained(self):
model = AutoBackbone.from_pretrained("microsoft/resnet-18")
model, loading_info = AutoBackbone.from_pretrained("microsoft/resnet-18", output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, ResNetBackbone)
# Check kwargs are correctly passed to the backbone
model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_indices=[-2, -1])
self.assertEqual(model.out_indices, [-2, -1])
self.assertEqual(model.out_features, ["stage3", "stage4"])
model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_features=["stage2", "stage4"])
self.assertEqual(model.out_indices, [2, 4])
self.assertEqual(model.out_features, ["stage2", "stage4"])
def test_from_pretrained_identifier(self):
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_pretrained_with_tuple_values(self):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
model = AutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(model, FunnelModel)
config = copy.deepcopy(model.config)
config.architectures = ["FunnelBaseModel"]
model = AutoModel.from_config(config)
self.assertIsInstance(model, FunnelBaseModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModel.from_pretrained(tmp_dir)
self.assertIsInstance(model, FunnelBaseModel)
def test_from_pretrained_dynamic_model_local(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoModel.register(CustomConfig, CustomModel)
config = CustomConfig(hidden_size=32)
model = CustomModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in MODEL_MAPPING._extra_content:
del MODEL_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_model_distant(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False)
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test the dynamic module is loaded only once.
reloaded_model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertIs(model.__class__, reloaded_model.__class__)
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Test the dynamic module is reloaded if we force it.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model", trust_remote_code=True, force_download=True
)
self.assertIsNot(model.__class__, reloaded_model.__class__)
# This one uses a relative import to a util file, this checks it is downloaded and used properly.
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test the dynamic module is loaded only once.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True
)
self.assertIs(model.__class__, reloaded_model.__class__)
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Test the dynamic module is reloaded if we force it.
reloaded_model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True, force_download=True
)
self.assertIsNot(model.__class__, reloaded_model.__class__)
def test_from_pretrained_dynamic_model_distant_with_ref(self):
model = AutoModel.from_pretrained("hf-internal-testing/ref_to_test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# This one uses a relative import to a util file, this checks it is downloaded and used properly.
model = AutoModel.from_pretrained(
"hf-internal-testing/ref_to_test_dynamic_model_with_util", trust_remote_code=True
)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test model can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_model.__class__.__name__, "NewModel")
for p1, p2 in zip(model.parameters(), reloaded_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
def test_from_pretrained_dynamic_model_with_period(self):
# We used to have issues where repos with "." in the name would cause issues because the Python
# import machinery would treat that as a directory separator, so we test that case
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=False)
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=True)
self.assertEqual(model.__class__.__name__, "NewModel")
# Test that it works with a custom cache dir too
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModel.from_pretrained(
"hf-internal-testing/test_dynamic_model_v1.0", trust_remote_code=True, cache_dir=tmp_dir
)
self.assertEqual(model.__class__.__name__, "NewModel")
def test_new_model_registration(self):
AutoConfig.register("custom", CustomConfig)
auto_classes = [
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
]
try:
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, CustomModel)
auto_class.register(CustomConfig, CustomModel)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, BertModel)
# Now that the config is registered, it can be used as any other config with the auto-API
tiny_config = BertModelTester(self).get_config()
config = CustomConfig(**tiny_config.to_dict())
model = auto_class.from_config(config)
self.assertIsInstance(model, CustomModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = auto_class.from_pretrained(tmp_dir)
# The model is a CustomModel but from the new dynamically imported class.
self.assertIsInstance(new_model, CustomModel)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
for mapping in (
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
):
if CustomConfig in mapping._extra_content:
del mapping._extra_content[CustomConfig]
def test_from_pretrained_dynamic_model_conflict(self):
class NewModelConfigLocal(BertConfig):
model_type = "new-model"
class NewModel(BertModel):
config_class = NewModelConfigLocal
try:
AutoConfig.register("new-model", NewModelConfigLocal)
AutoModel.register(NewModelConfigLocal, NewModel)
# If remote code is not set, the default is to use local
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False)
self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True)
self.assertEqual(model.config.__class__.__name__, "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
if NewModelConfigLocal in MODEL_MAPPING._extra_content:
del MODEL_MAPPING._extra_content[NewModelConfigLocal]
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, "bert-base is not a local folder and is not a valid model identifier"
):
_ = AutoModel.from_pretrained("bert-base")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
@unittest.skip("Failing on main")
def test_cached_model_has_minimum_calls_to_head(self):
# Make sure we have cached the model.
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
# With a sharded checkpoint
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
with RequestCounter() as counter:
_ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
def test_attr_not_existing(self):
from transformers.models.auto.auto_factory import _LazyAutoMapping
_CONFIG_MAPPING_NAMES = OrderedDict([("bert", "BertConfig")])
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "GhostModel")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
with pytest.raises(ValueError, match=r"Could not find GhostModel neither in .* nor in .*!"):
_MODEL_MAPPING[BertConfig]
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "BertModel")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
self.assertEqual(_MODEL_MAPPING[BertConfig], BertModel)
_MODEL_MAPPING_NAMES = OrderedDict([("bert", "GPT2Model")])
_MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES)
self.assertEqual(_MODEL_MAPPING[BertConfig], GPT2Model)
def test_custom_model_patched_generation_inheritance(self):
"""
Tests that our inheritance patching for generate-compatible models works as expected. Without this feature,
old Hub models lose the ability to call `generate`.
"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/test_dynamic_model_generation", trust_remote_code=True
)
self.assertTrue(model.__class__.__name__ == "NewModelForCausalLM")
# It inherits from GenerationMixin. This means it can `generate`. Because `PreTrainedModel` is scheduled to
# stop inheriting from `GenerationMixin` in v4.50, this check will fail if patching is not present.
self.assertTrue(isinstance(model, GenerationMixin))
# More precisely, it directly inherits from GenerationMixin. This check would fail prior to v4.45 (inheritance
# patching was added in v4.45)
self.assertTrue("GenerationMixin" in str(model.__class__.__bases__))
@unittest.skip("@Cyril: add the post_init() on the hub repo")
def test_model_with_dotted_name_and_relative_imports(self):
"""
Test for issue #40496: AutoModel.from_pretrained() doesn't work for models with '.' in their name
when there's a relative import.
Without the fix, this raises: ModuleNotFoundError:
No module named 'transformers_modules.hf-internal-testing.remote_code_model_with_dots_v1'
"""
model_id = "hf-internal-testing/remote_code_model_with_dots_v1.0"
model = AutoModel.from_pretrained(model_id, trust_remote_code=True)
self.assertIsNotNone(model)
| AutoModelTest |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 9394,
"end": 10374
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1 # The dimension which has the sequence length
self.fourier = FNetFourierTransform(config)
self.intermediate = FNetIntermediate(config)
self.output = FNetOutput(config)
def forward(self, hidden_states):
self_fourier_outputs = self.fourier(hidden_states)
fourier_output = self_fourier_outputs[0]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output
)
outputs = (layer_output,)
return outputs
def feed_forward_chunk(self, fourier_output):
intermediate_output = self.intermediate(fourier_output)
layer_output = self.output(intermediate_output, fourier_output)
return layer_output
| FNetLayer |
python | skorch-dev__skorch | skorch/tests/test_net.py | {
"start": 155813,
"end": 160350
} | class ____:
@pytest.fixture
def net_untrained(self, classifier_module):
"""A net with a custom 'module2_' criterion and a progress bar callback"""
from skorch import NeuralNetClassifier
from skorch.callbacks import ProgressBar
net = NeuralNetClassifier(
classifier_module,
max_epochs=2,
callbacks=[ProgressBar()],
)
return net
@pytest.fixture
def net(self, net_untrained, classifier_data):
X, y = classifier_data
return net_untrained.fit(X[:100], y[:100])
@pytest.fixture
def net_2_criteria(self, classifier_module, classifier_data):
"""A net with a custom 'module2_' criterion and disabled callbacks"""
# Check that not only the standard components are trimmed and that
# callbacks don't need to be lists.
from skorch import NeuralNetClassifier
class MyNet(NeuralNetClassifier):
def initialize_criterion(self):
super().initialize_criterion()
# pylint: disable=attribute-defined-outside-init
self.criterion2_ = classifier_module()
return self
X, y = classifier_data
net = MyNet(classifier_module, max_epochs=2, callbacks='disable')
net.fit(X, y)
return net
def test_trimmed_net_less_memory(self, net):
# very rough way of checking for smaller memory footprint
size_before = len(pickle.dumps(net))
net.trim_for_prediction()
size_after = len(pickle.dumps(net))
# check if there is at least 10% size gain
assert 0.9 * size_before > size_after
def test_trim_untrained_net_raises(self, net_untrained):
from skorch.exceptions import NotInitializedError
with pytest.raises(NotInitializedError):
net_untrained.trim_for_prediction()
def test_try_fitting_trimmed_net_raises(self, net, classifier_data):
from skorch.exceptions import SkorchTrainingImpossibleError
X, y = classifier_data
msg = (
"The net's attributes were trimmed for prediction, thus it cannot "
"be used for training anymore")
net.trim_for_prediction()
with pytest.raises(SkorchTrainingImpossibleError, match=msg):
net.fit(X, y)
def test_try_trimmed_net_partial_fit_raises(
self, net, classifier_data
):
from skorch.exceptions import SkorchTrainingImpossibleError
X, y = classifier_data
msg = (
"The net's attributes were trimmed for prediction, thus it cannot "
"be used for training anymore"
)
net.trim_for_prediction()
with pytest.raises(SkorchTrainingImpossibleError, match=msg):
net.partial_fit(X, y)
def test_inference_works(self, net, classifier_data):
# does not raise
net.trim_for_prediction()
X, _ = classifier_data
net.predict(X)
net.predict_proba(X)
net.forward(X)
def test_trim_twice_works(self, net):
# does not raise
net.trim_for_prediction()
net.trim_for_prediction()
def test_callbacks_trimmed(self, net):
net.trim_for_prediction()
assert not net.callbacks
assert not net.callbacks_
def test_optimizer_trimmed(self, net):
net.trim_for_prediction()
assert net.optimizer is None
assert net.optimizer_ is None
def test_criteria_trimmed(self, net_2_criteria):
net_2_criteria.trim_for_prediction()
assert net_2_criteria.criterion is None
assert net_2_criteria.criterion_ is None
assert net_2_criteria.criterion2_ is None
def test_history_trimmed(self, net):
net.trim_for_prediction()
assert not net.history
def test_train_iterator_trimmed(self, net):
net.trim_for_prediction()
assert net.iterator_train is None
def test_module_training(self, net):
# pylint: disable=protected-access
net._set_training(True)
net.trim_for_prediction()
assert net.module_.training is False
def test_can_be_pickled(self, net):
pickle.dumps(net)
net.trim_for_prediction()
pickle.dumps(net)
def test_can_be_copied(self, net):
copy.deepcopy(net)
net.trim_for_prediction()
copy.deepcopy(net)
def test_can_be_cloned(self, net):
clone(net)
net.trim_for_prediction()
clone(net)
| TestTrimForPrediction |
python | simplejson__simplejson | simplejson/tests/test_tool.py | {
"start": 801,
"end": 3304
} | class ____(unittest.TestCase):
data = """
[["blorpie"],[ "whoops" ] , [
],\t"d-shtaeou",\r"d-nthiouh",
"i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field"
:"yes"} ]
"""
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]
""")
def runTool(self, args=None, data=None):
argv = [sys.executable, '-m', 'simplejson.tool']
if args:
argv.extend(args)
proc = subprocess.Popen(argv,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = proc.communicate(data)
self.assertEqual(strip_python_stderr(err), ''.encode())
self.assertEqual(proc.returncode, 0)
return out.decode('utf8').splitlines()
def test_stdin_stdout(self):
self.assertEqual(
self.runTool(data=self.data.encode()),
self.expect.splitlines())
def test_infile_stdout(self):
infile, infile_name = open_temp_file()
try:
infile.write(self.data.encode())
infile.close()
self.assertEqual(
self.runTool(args=[infile_name]),
self.expect.splitlines())
finally:
os.unlink(infile_name)
def test_infile_outfile(self):
infile, infile_name = open_temp_file()
try:
infile.write(self.data.encode())
infile.close()
# outfile will get overwritten by tool, so the delete
# may not work on some platforms. Do it manually.
outfile, outfile_name = open_temp_file()
try:
outfile.close()
self.assertEqual(
self.runTool(args=[infile_name, outfile_name]),
[])
with open(outfile_name, 'rb') as f:
self.assertEqual(
f.read().decode('utf8').splitlines(),
self.expect.splitlines()
)
finally:
os.unlink(outfile_name)
finally:
os.unlink(infile_name)
| TestTool |
python | ray-project__ray | rllib/core/models/configs.py | {
"start": 10980,
"end": 13019
} | class ____(_MLPConfig):
"""Configuration for an MLP head.
See _MLPConfig for usage details.
Example:
.. testcode::
# Configuration:
config = MLPHeadConfig(
input_dims=[4], # must be 1D tensor
hidden_layer_dims=[8, 8],
hidden_layer_activation="relu",
hidden_layer_use_layernorm=False,
# final output layer with no activation (linear)
output_layer_dim=2,
output_layer_activation="linear",
)
model = config.build(framework="tf2")
# Resulting stack in pseudocode:
# Linear(4, 8, bias=True)
# ReLU()
# Linear(8, 8, bias=True)
# ReLU()
# Linear(8, 2, bias=True)
Example:
.. testcode::
# Configuration:
config = MLPHeadConfig(
input_dims=[2],
hidden_layer_dims=[10, 4],
hidden_layer_activation="silu",
hidden_layer_use_layernorm=True,
hidden_layer_use_bias=False,
# Initializer for `framework="torch"`.
hidden_layer_weights_initializer="xavier_normal_",
hidden_layer_weights_initializer_config={"gain": 0.8},
# No final output layer (use last dim in `hidden_layer_dims`
# as the size of the last layer in the stack).
output_layer_dim=None,
)
model = config.build(framework="torch")
# Resulting stack in pseudocode:
# Linear(2, 10, bias=False)
# LayerNorm((10,)) # layer norm always before activation
# SiLU()
# Linear(10, 4, bias=False)
# LayerNorm((4,)) # layer norm always before activation
# SiLU()
"""
@_framework_implemented()
def build(self, framework: str = "torch") -> "Model":
self._validate(framework=framework)
if framework == "torch":
from ray.rllib.core.models.torch.heads import TorchMLPHead
return TorchMLPHead(self)
@ExperimentalAPI
@dataclass
| MLPHeadConfig |
python | keon__algorithms | algorithms/tree/bst/delete_node.py | {
"start": 706,
"end": 1715
} | class ____(object):
def delete_node(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
if not root: return None
if root.val == key:
if root.left:
# Find the right most leaf of the left sub-tree
left_right_most = root.left
while left_right_most.right:
left_right_most = left_right_most.right
# Attach right child to the right of that leaf
left_right_most.right = root.right
# Return left child instead of root, a.k.a delete root
return root.left
else:
return root.right
# If left or right child got deleted, the returned root is the child of the deleted node.
elif root.val > key:
root.left = self.deleteNode(root.left, key)
else:
root.right = self.deleteNode(root.right, key)
return root
| Solution |
python | pydata__xarray | xarray/tests/test_plugins.py | {
"start": 782,
"end": 937
} | class ____(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder): # type: ignore[override]
pass
| DummyBackendEntrypoint1 |
python | pyqtgraph__pyqtgraph | pyqtgraph/exporters/Exporter.py | {
"start": 172,
"end": 5364
} | class ____(object):
"""
Abstract class used for exporting graphics to file / printer / whatever.
"""
allowCopy = False # subclasses set this to True if they can use the copy buffer
Exporters = []
@classmethod
def register(cls):
"""
Used to register Exporter classes to appear in the export dialog.
"""
Exporter.Exporters.append(cls)
def __init__(self, item):
"""
Initialize with the item to be exported.
Can be an individual graphics item or a scene.
"""
object.__init__(self)
self.item = item
def parameters(self):
"""Return the parameters used to configure this exporter."""
raise Exception("Abstract method must be overridden in subclass.")
def export(self, fileName=None, toBytes=False, copy=False):
"""
If *fileName* is None, pop-up a file dialog.
If *toBytes* is True, return a bytes object rather than writing to file.
If *copy* is True, export to the copy buffer rather than writing to file.
"""
raise Exception("Abstract method must be overridden in subclass.")
def fileSaveDialog(self, filter=None, opts=None):
## Show a file dialog, call self.export(fileName) when finished.
if opts is None:
opts = {}
self.fileDialog = FileDialog()
self.fileDialog.setFileMode(QtWidgets.QFileDialog.FileMode.AnyFile)
self.fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptMode.AcceptSave)
if filter is not None:
if isinstance(filter, str):
self.fileDialog.setNameFilter(filter)
elif isinstance(filter, list):
self.fileDialog.setNameFilters(filter)
global LastExportDirectory
exportDir = LastExportDirectory
if exportDir is not None:
self.fileDialog.setDirectory(exportDir)
self.fileDialog.show()
self.fileDialog.opts = opts
self.fileDialog.fileSelected.connect(self.fileSaveFinished)
return
def fileSaveFinished(self, fileName):
global LastExportDirectory
LastExportDirectory = os.path.split(fileName)[0]
## If file name does not match selected extension, append it now
ext = os.path.splitext(fileName)[1].lower().lstrip('.')
selectedExt = re.search(r'\*\.(\w+)\b', self.fileDialog.selectedNameFilter())
if selectedExt is not None:
selectedExt = selectedExt.groups()[0].lower()
if ext != selectedExt:
fileName = fileName + '.' + selectedExt.lstrip('.')
self.export(fileName=fileName, **self.fileDialog.opts)
def getScene(self):
if isinstance(self.item, GraphicsScene):
return self.item
else:
return self.item.scene()
def getSourceRect(self):
if isinstance(self.item, GraphicsScene):
w = self.item.getViewWidget()
return w.viewportTransform().inverted()[0].mapRect(w.rect())
else:
return self.item.sceneBoundingRect()
def getTargetRect(self):
if isinstance(self.item, GraphicsScene):
return self.item.getViewWidget().rect()
else:
return self.item.mapRectToDevice(self.item.boundingRect())
def setExportMode(self, export, opts=None):
"""
Call setExportMode(export, opts) on all items that will
be painted during the export. This informs the item
that it is about to be painted for export, allowing it to
alter its appearance temporarily
*export* - bool; must be True before exporting and False afterward
*opts* - dict; common parameters are 'antialias' and 'background'
"""
if opts is None:
opts = {}
for item in self.getPaintItems():
if hasattr(item, 'setExportMode'):
item.setExportMode(export, opts)
def getPaintItems(self, root=None):
"""Return a list of all items that should be painted in the correct order."""
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtWidgets.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
childs = root.childItems()
rootItem = [root]
childs.sort(key=lambda a: a.zValue())
while len(childs) > 0:
ch = childs.pop(0)
tree = self.getPaintItems(ch)
if (ch.flags() & ch.GraphicsItemFlag.ItemStacksBehindParent) or \
(ch.zValue() < 0 and (ch.flags() & ch.GraphicsItemFlag.ItemNegativeZStacksBehindParent)):
preItems.extend(tree)
else:
postItems.extend(tree)
return preItems + rootItem + postItems
def render(self, painter, targetRect, sourceRect, item=None):
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
| Exporter |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 19944,
"end": 20545
} | class ____(TaskStatesResponse):
type: Literal["TaskStatesResult"] = "TaskStatesResult"
@classmethod
def from_api_response(cls, task_states_response: TaskStatesResponse) -> TaskStatesResult:
"""
Create result class from API Response.
API Response is autogenerated from the API schema, so we need to convert it to Result
for communication between the Supervisor and the task process since it needs a
discriminator field.
"""
return cls(**task_states_response.model_dump(exclude_defaults=True), type="TaskStatesResult")
| TaskStatesResult |
python | coleifer__peewee | tests/sqlite_udf.py | {
"start": 5786,
"end": 14174
} | class ____(BaseTestUDF):
requires = MODELS
def test_if_then_else(self):
for i in range(4):
User.create(username='u%d' % (i + 1))
with self.assertQueryCount(1):
query = (User
.select(
User.username,
fn.if_then_else(
User.username << ['u1', 'u2'],
'one or two',
'other').alias('name_type'))
.order_by(User.id))
self.assertEqual([row.name_type for row in query], [
'one or two',
'one or two',
'other',
'other'])
def test_strip_tz(self):
dt = datetime.datetime(2015, 1, 1, 12, 0)
# 13 hours, 37 minutes.
dt_tz = dt.replace(tzinfo=FixedOffset(13 * 60 + 37, 'US/LFK'))
api_dt = APIResponse.create(timestamp=dt)
api_dt_tz = APIResponse.create(timestamp=dt_tz)
# Re-fetch from the database.
api_dt_db = APIResponse.get(APIResponse.id == api_dt.id)
api_dt_tz_db = APIResponse.get(APIResponse.id == api_dt_tz.id)
# Assert the timezone is present, first of all, and that they were
# stored in the database.
self.assertEqual(api_dt_db.timestamp, dt)
query = (APIResponse
.select(
APIResponse.id,
fn.strip_tz(APIResponse.timestamp).alias('ts'))
.order_by(APIResponse.id))
ts, ts_tz = query[:]
self.assertEqual(ts.ts, dt)
self.assertEqual(ts_tz.ts, dt)
def test_human_delta(self):
values = [0, 1, 30, 300, 3600, 7530, 300000]
for value in values:
Generic.create(value=value)
delta = fn.human_delta(Generic.value).coerce(False)
query = (Generic
.select(
Generic.value,
delta.alias('delta'))
.order_by(Generic.value))
results = query.tuples()[:]
self.assertEqual(results, [
(0, '0 seconds'),
(1, '1 second'),
(30, '30 seconds'),
(300, '5 minutes'),
(3600, '1 hour'),
(7530, '2 hours, 5 minutes, 30 seconds'),
(300000, '3 days, 11 hours, 20 minutes'),
])
def test_file_ext(self):
data = (
('test.py', '.py'),
('test.x.py', '.py'),
('test', ''),
('test.', '.'),
('/foo.bar/test/nug.py', '.py'),
('/foo.bar/test/nug', ''),
)
for filename, ext in data:
res = self.sql1('SELECT file_ext(?)', filename)
self.assertEqual(res, ext)
def test_gz(self):
random.seed(1)
A = ord('A')
z = ord('z')
with self.database.atomic():
def randstr(l):
return ''.join([
chr(random.randint(A, z))
for _ in range(l)])
data = (
'a',
'a' * 1024,
randstr(1024),
randstr(4096),
randstr(1024 * 64))
for s in data:
compressed = self.sql1('select gzip(?)', s)
decompressed = self.sql1('select gunzip(?)', compressed)
self.assertEqual(decompressed.decode('utf-8'), s)
def test_hostname(self):
r = json.dumps({'success': True})
data = (
('https://charlesleifer.com/api/', r),
('https://a.charlesleifer.com/api/foo', r),
('www.nugget.com', r),
('nugz.com', r),
('http://a.b.c.peewee/foo', r),
('https://charlesleifer.com/xx', r),
('https://charlesleifer.com/xx', r),
)
with self.database.atomic():
for url, response in data:
APIResponse.create(url=url, data=data)
with self.assertQueryCount(1):
query = (APIResponse
.select(
fn.hostname(APIResponse.url).alias('host'),
fn.COUNT(APIResponse.id).alias('count'))
.group_by(fn.hostname(APIResponse.url))
.order_by(
fn.COUNT(APIResponse.id).desc(),
fn.hostname(APIResponse.url)))
results = query.tuples()[:]
self.assertEqual(results, [
('charlesleifer.com', 3),
('', 2),
('a.b.c.peewee', 1),
('a.charlesleifer.com', 1)])
@skip_unless(IS_SQLITE_9, 'requires sqlite >= 3.9')
def test_toggle(self):
self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1)
self.assertEqual(self.sql1('select toggle(?)', 'bar'), 1)
self.assertEqual(self.sql1('select toggle(?)', 'foo'), 0)
self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1)
self.assertEqual(self.sql1('select toggle(?)', 'bar'), 0)
self.assertEqual(self.sql1('select clear_toggles()'), None)
self.assertEqual(self.sql1('select toggle(?)', 'foo'), 1)
def test_setting(self):
self.assertEqual(self.sql1('select setting(?, ?)', 'k1', 'v1'), 'v1')
self.assertEqual(self.sql1('select setting(?, ?)', 'k2', 'v2'), 'v2')
self.assertEqual(self.sql1('select setting(?)', 'k1'), 'v1')
self.assertEqual(self.sql1('select setting(?, ?)', 'k2', 'v2-x'), 'v2-x')
self.assertEqual(self.sql1('select setting(?)', 'k2'), 'v2-x')
self.assertEqual(self.sql1('select setting(?)', 'kx'), None)
self.assertEqual(self.sql1('select clear_settings()'), None)
self.assertEqual(self.sql1('select setting(?)', 'k1'), None)
def test_random_range(self):
vals = ((1, 10), (1, 100), (0, 2), (1, 5, 2))
results = []
for params in vals:
random.seed(1)
results.append(random.randrange(*params))
for params, expected in zip(vals, results):
random.seed(1)
if len(params) == 3:
pstr = '?, ?, ?'
else:
pstr = '?, ?'
self.assertEqual(
self.sql1('select randomrange(%s)' % pstr, *params),
expected)
def test_sqrt(self):
self.assertEqual(self.sql1('select sqrt(?)', 4), 2)
self.assertEqual(round(self.sql1('select sqrt(?)', 2), 2), 1.41)
def test_tonumber(self):
data = (
('123', 123),
('1.23', 1.23),
('1e4', 10000),
('-10', -10),
('x', None),
('13d', None),
)
for inp, outp in data:
self.assertEqual(self.sql1('select tonumber(?)', inp), outp)
@requires_cython
def test_leven(self):
self.assertEqual(
self.sql1('select levenshtein_dist(?, ?)', 'abc', 'ba'),
2)
self.assertEqual(
self.sql1('select levenshtein_dist(?, ?)', 'abcde', 'eba'),
4)
self.assertEqual(
self.sql1('select levenshtein_dist(?, ?)', 'abcde', 'abcde'),
0)
@requires_cython
def test_str_dist(self):
self.assertEqual(
self.sql1('select str_dist(?, ?)', 'abc', 'ba'),
3)
self.assertEqual(
self.sql1('select str_dist(?, ?)', 'abcde', 'eba'),
6)
self.assertEqual(
self.sql1('select str_dist(?, ?)', 'abcde', 'abcde'),
0)
def test_substr_count(self):
self.assertEqual(
self.sql1('select substr_count(?, ?)', 'foo bar baz', 'a'), 2)
self.assertEqual(
self.sql1('select substr_count(?, ?)', 'foo bor baz', 'o'), 3)
self.assertEqual(
self.sql1('select substr_count(?, ?)', 'foodooboope', 'oo'), 3)
self.assertEqual(self.sql1('select substr_count(?, ?)', 'xx', ''), 0)
self.assertEqual(self.sql1('select substr_count(?, ?)', '', ''), 0)
def test_strip_chars(self):
self.assertEqual(
self.sql1('select strip_chars(?, ?)', ' hey foo ', ' '),
'hey foo')
@skip_unless(cython_ext is not None, 'requires sqlite c extension')
@skip_unless(sqlite3.sqlite_version_info >= (3, 9), 'requires sqlite >= 3.9')
| TestScalarFunctions |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/dataframes.py | {
"start": 8358,
"end": 9344
} | class ____(Capture):
# TODO(VitalyFedyunin): This should be atomic and thread safe
names_idx = 0
def __init__(self, value, ctx) -> None:
if CaptureControl.disabled:
raise RuntimeError("Attempting to create capture variable with capture off")
self.ctx = ctx
self.value = value
self.name = f"var_{CaptureVariable.names_idx}"
CaptureVariable.names_idx += 1
self.ctx["variables"].append(self)
def __str__(self) -> str:
return self.name
def execute(self):
return self.calculated_value
def apply_ops(self, dataframe):
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
# pyrefly: ignore [unsupported-operation]
self.ctx["variables"][0].calculated_value = dataframe
# pyrefly: ignore [not-iterable]
for op in self.ctx["operations"]:
op.execute()
return self.calculated_value
| CaptureVariable |
python | donnemartin__system-design-primer | solutions/object_oriented_design/call_center/call_center.py | {
"start": 1977,
"end": 3263
} | class ____(object):
def __init__(self, operators, supervisors, directors):
self.operators = operators
self.supervisors = supervisors
self.directors = directors
self.queued_calls = deque()
def dispatch_call(self, call):
if call.rank not in (Rank.OPERATOR, Rank.SUPERVISOR, Rank.DIRECTOR):
raise ValueError('Invalid call rank: {}'.format(call.rank))
employee = None
if call.rank == Rank.OPERATOR:
employee = self._dispatch_call(call, self.operators)
if call.rank == Rank.SUPERVISOR or employee is None:
employee = self._dispatch_call(call, self.supervisors)
if call.rank == Rank.DIRECTOR or employee is None:
employee = self._dispatch_call(call, self.directors)
if employee is None:
self.queued_calls.append(call)
def _dispatch_call(self, call, employees):
for employee in employees:
if employee.call is None:
employee.take_call(call)
return employee
return None
def notify_call_escalated(self, call):
pass
def notify_call_completed(self, call):
pass
def dispatch_queued_call_to_newly_freed_employee(self, call, employee):
pass
| CallCenter |
python | PyCQA__pylint | tests/functional/m/modified_iterating.py | {
"start": 2973,
"end": 3299
} | class ____:
"""Regression test for https://github.com/pylint-dev/pylint/issues/7380"""
def __init__(self) -> None:
self.attribute = [1, 2, 3]
def my_method(self):
"""This should raise as we are deleting."""
for var in self.attribute:
del var # [modified-iterating-list]
| MyClass |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 60961,
"end": 61138
} | class ____(_PrintableStructure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
_fmt_ = {'<default>': "%d %%"}
# Added in 2.285
| c_nvmlUtilization_t |
python | chroma-core__chroma | chromadb/rate_limit/__init__.py | {
"start": 215,
"end": 551
} | class ____(Component):
"""
Rate limit enforcer.
Implemented as a wrapper around server functions to block requests if rate limits are exceeded.
"""
def __init__(self, system: System) -> None:
super().__init__(system)
@abstractmethod
def rate_limit(self, func: T) -> T:
pass
| RateLimitEnforcer |
python | doocs__leetcode | solution/1200-1299/1296.Divide Array in Sets of K Consecutive Numbers/Solution2.py | {
"start": 0,
"end": 475
} | class ____:
def isPossibleDivide(self, nums: List[int], k: int) -> bool:
if len(nums) % k:
return False
cnt = Counter(nums)
sd = SortedDict(cnt)
while sd:
x = next(iter(sd))
for y in range(x, x + k):
if y not in sd:
return False
if sd[y] == 1:
del sd[y]
else:
sd[y] -= 1
return True
| Solution |
python | django__django | tests/admin_scripts/tests.py | {
"start": 33657,
"end": 36840
} | class ____(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
super().setUp()
self.write_settings("settings.py")
def test_builtin_command(self):
"""
default: manage.py builtin commands succeed when default settings are
appropriate.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"""
default: manage.py builtin commands succeed if settings are provided as
argument.
"""
args = ["check", "--settings=test_project.settings", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"""
default: manage.py builtin commands succeed if settings are provided in
the environment.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"""
default: manage.py builtin commands succeed if settings file (from
argument) doesn't exist.
"""
args = ["check", "--settings=bad_settings", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"""
default: manage.py builtin commands fail if settings file (from
environment) doesn't exist.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args, "bad_settings")
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"""
default: manage.py can execute user commands when default settings are
appropriate.
"""
args = ["noargs_command"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"""
default: manage.py can execute user commands when settings are provided
as argument.
"""
args = ["noargs_command", "--settings=test_project.settings"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"""
default: manage.py can execute user commands when settings are provided
in environment.
"""
args = ["noargs_command"]
out, err = self.run_manage(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
| ManageDefaultSettings |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_beta_types.py | {
"start": 975,
"end": 1167
} | class ____(BaseModel):
type: Literal["citation"]
citation: Citation
"""The new citation"""
snapshot: List[Citation]
"""All of the accumulated citations"""
| BetaCitationEvent |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_indexing.py | {
"start": 9827,
"end": 11637
} | class ____:
@pytest.fixture(params=["increasing", "decreasing", None])
def monotonic(self, request):
return request.param
@pytest.fixture
def tdi(self, monotonic):
tdi = timedelta_range("1 Day", periods=10)
if monotonic == "decreasing":
tdi = tdi[::-1]
elif monotonic is None:
taker = np.arange(10, dtype=np.intp)
np.random.default_rng(2).shuffle(taker)
tdi = tdi.take(taker)
return tdi
def test_maybe_cast_slice_bound_invalid_str(self, tdi):
# test the low-level _maybe_cast_slice_bound and that we get the
# expected exception+message all the way up the stack
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
tdi._maybe_cast_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.get_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.slice_locs("foo", None, None)
def test_slice_invalid_str_with_timedeltaindex(
self, tdi, frame_or_series, indexer_sl
):
obj = frame_or_series(range(10), index=tdi)
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":-1]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[:"foo"]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[tdi[0] : "foo"]
| TestMaybeCastSliceBound |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.