language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | xlwings__xlwings | xlwings/constants.py | {
"start": 128218,
"end": 128856
} | class ____:
xlGuess = 0 # from enum XlYesNoGuess
xlNo = 2 # from enum XlYesNoGuess
xlYes = 1 # from enum XlYesNoGuess
shape_types = [
"auto_shape",
"callout",
"canvas",
"chart",
"comment",
"content_app",
"diagram",
"embedded_ole_object",
"form_control",
"free_form",
"group",
"igx_graphic",
"ink",
"ink_comment",
"line",
"linked_ole_object",
"linked_picture",
"media",
"ole_control_object",
"picture",
"placeholder",
"script_anchor",
"shape_type_mixed",
"table",
"text_box",
"text_effect",
"web_video",
]
| YesNoGuess |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/np_indexing_test.py | {
"start": 16719,
"end": 32939
} | class ____(jtu.TestCase):
"""Tests for Numpy indexing translation rules."""
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}".format(
name, jtu.format_shape_dtype_string( shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testStaticIndexing(self, shape, dtype, rng_factory, indexer):
# TODO(rohanj): Revisit passing in self.rng() to this to customize further.
# This would need updating lax_numpy_test as well.
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
onp_fun = lambda x: x[indexer]
jnp_fun = lambda x: tnp.asarray(x)[indexer]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True)
def _ReplaceSlicesWithTuples(self, idx):
"""Helper method to replace slices with tuples for dynamic indexing args."""
if isinstance(idx, slice):
triple = idx.start, idx.stop, idx.step
isnone = [i for i, elt in enumerate(triple) if elt is None]
zeros = itertools.repeat(0)
nones = itertools.repeat(None)
out = subvals(triple, zip(isnone, zeros))
return out, lambda out: slice(*subvals(out, zip(isnone, nones)))
elif isinstance(idx, (tuple, list)) and idx:
t = type(idx)
elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))
return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))
else:
return idx, lambda x: x
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("OneSliceIndex",
[IndexSpec(shape=(5,), indexer=slice(1, 3)),
IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),
("TwoSliceIndices",
[IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),
("NonUnitStrides", [
IndexSpec(shape=(3,), indexer=slice(None, None, -1)),
IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),
IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))
]),
("OnlyStartOrStopDynamic", [
IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))
]),
]
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testDynamicIndexingWithSlices(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
def onp_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
jnp_fun = lambda x, idx: onp_fun(tnp.asarray(x), idx)
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
# TODO(wangpeng): check_xla_forced_compile is turned off because some
# compile-time-constant requirements are violated. Investigate and turn it
# on.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True,
check_eval_on_shapes=False,
check_incomplete_shape=True,
check_xla_forced_compile=False)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2)]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testDynamicIndexingWithIntegers(self, shape, dtype, rng_factory, indexer):
# TODO(rohanj): Revisit passing in self.rng() to this to customize further.
# This would need updating lax_numpy_test as well.
rng = rng_factory()
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
def onp_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
jnp_fun = lambda x, idx: onp_fun(tnp.asarray(x), idx)
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True)
@parameterized.named_parameters(
{"testcase_name": "_{}_inshape={}_indexer={}" # pylint: disable=g-complex-comprehension
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"name": name, "shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer}
for name, index_specs in ADVANCED_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testAdvancedIntegerIndexing(self, name, shape, dtype, rng_factory,
indexer):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), indexer]
onp_fun = lambda x, idx: x[idx]
jnp_fun = lambda x, idx: onp_fun(tnp.asarray(x), idx)
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
# TODO(wangpeng): check_xla_forced_compile is turned off for
# ListOfPythonIntsAndIntArrays because it throws "The number of output
# elements has to equal to number of input elements that are sliced when
# input indices are not constant". Investigate and turn it on.
check_xla = (name != "ListOfPythonIntsAndIntArrays")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True,
check_xla_forced_compile=check_xla)
@parameterized.named_parameters(
{"testcase_name": "_{}_inshape={}_indexer={}" # pylint: disable=g-complex-comprehension
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"name": name, "shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer}
for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testMixedAdvancedIntegerIndexing(self, name, shape, dtype, rng_factory,
indexer):
rng = rng_factory()
indexer_with_dummies = [e if isinstance(e, onp.ndarray) else ()
for e in indexer]
substitutes = [(i, e) for i, e in enumerate(indexer)
if not isinstance(e, onp.ndarray)]
args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]
def np_fun(x, indexer_with_dummies):
idx = type(indexer)(subvals(indexer_with_dummies, substitutes))
return x[idx]
jnp_fun = lambda x, idx: np_fun(tnp.asarray(x), idx)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# TODO(wangpeng): check_xla_forced_compile is turned off for
# IntArrayWithInt32Type because it throws "The number of output elements has
# to equal to number of input elements that are sliced when input indices
# are not constant". Investigate and turn it on.
check_xla = (name != "IntArrayWithInt32Type")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True,
check_xla_forced_compile=check_xla)
def testAdvancedIndexingManually(self):
x = onp.random.RandomState(0).randn(3, 4, 5)
index_array = onp.array([0, 2, -1, 0])
op = lambda x, index_array: x[..., index_array, :]
cop = nje.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
op = lambda x, index_array: x[..., index_array, :, index_array, None]
cop = nje.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]
cop = nje.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
# Note that we don't currently allow __iter__ in graph mode. So this test only
# iterates over eager tensor.
def testUnpacking(self):
def foo(x):
a, b, c = x
return a + b + c
a1 = foo(onp.arange(3))
a2 = foo(tnp.arange(3))
self.assertAllClose(a1, a2, check_dtypes=True)
def testBooleanIndexingArray1D(self):
idx = onp.array([True, True, False])
x = tnp.asarray(onp.arange(3))
ans = x[idx]
expected = onp.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList1D(self):
idx = [True, True, False]
x = tnp.asarray(onp.arange(3))
ans = x[idx]
expected = onp.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingArray2DBroadcast(self):
idx = onp.array([True, True, False, True])
x = onp.arange(8).reshape(4, 2)
ans = tnp.asarray(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList2DBroadcast(self):
idx = [True, True, False, True]
x = onp.arange(8).reshape(4, 2)
ans = tnp.asarray(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingArray2D(self):
idx = onp.array([[True, False],
[False, True],
[False, False],
[True, True]])
x = onp.arange(8).reshape(4, 2)
ans = tnp.asarray(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingDynamicShape(self):
x = onp.zeros(3)
i = onp.array([True, True, False])
ans = x[i]
expected = tnp.asarray(x)[i]
self.assertAllClose(ans, expected, check_dtypes=True)
def testIssue187(self):
x = tnp.ones((5, 5))
x[[0, 2, 4], [0, 2, 4]] # doesn't crash
x = onp.arange(25).reshape((5, 5))
ans = nje.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)
expected = x[[0, 2, 4], [0, 2, 4]]
self.assertAllClose(ans, expected, check_dtypes=False)
# TODO(agarwal): Fix this use case.
@jtu.disable
def testIndexingEmptyDimension(self):
# Issue 2671: XLA error when indexing into dimension of size 0
x = tnp.ones((2, 0))
# The following work, even on axis 1 of size 0
_ = x[0, :] + x[0, None] + x[0, 1:] + x[0, 1:3:2]
with self.assertRaisesRegex(IndexError,
"index .* is out of bounds for axis .* with size 0"):
_ = onp.ones((2, 0))[0, 0] # The numpy error
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
_ = x[0, 0] # JAX indexing
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
nje.jit(lambda i: x[0, i])(0) # JAX indexing under jit
def testBooleanIndexingWithEmptyResult(self):
# based on a TensorFlow Probability test that started failing after #1623
x = tnp.array([-1])
mask = tnp.array([False])
ans = x[mask] # doesn't crash
expected = onp.array([-1])[onp.array([False])]
self.assertAllClose(ans, expected, check_dtypes=False)
def testFloatIndexingError(self):
error_regex = "only integers, slices.*are valid indices"
# Verify onp behavior
with self.assertRaisesRegex(IndexError, error_regex):
_ = onp.zeros((2, 2))[(0, 0.)]
# Test tnp
with self.assertRaisesRegex(IndexError, error_regex):
tnp.zeros(2)[0.] # pylint: disable=expression-not-assigned
with self.assertRaisesRegex(IndexError, error_regex):
tnp.zeros((2, 2))[(0, 0.)] # pylint: disable=expression-not-assigned
# Test with jit
with self.assertRaisesRegex(IndexError, error_regex):
nje.jit(lambda idx: tnp.zeros((2, 2))[idx])((0, 0.0))
def testIndexOutOfBounds(self): # https://github.com/google/jax/issues/2245
array = tnp.ones(5)
self.assertAllClose(array, array[:10], check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_size_indices={}".format( # pylint: disable=g-complex-comprehension
jtu.format_shape_dtype_string(shape, dtype),
start_indices, size_indices),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"size_indices": size_indices, "rng_factory": rng_factory}
for shape, start_indices, size_indices in [
[(3,), onp.array((1,)), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), (1, -2), (3, 1)],
[(5, 3), onp.array((1, 1)), (3, 1)],
[(7, 5, 3), onp.array((4, 1, 0)), (2, 0, 1)],
[(), (), ()],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicSlice(self, shape, dtype, start_indices, size_indices,
rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]
op = lambda x, starts: nje.dynamic_slice(x, starts, size_indices)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_size_indices={}".format( # pylint: disable=g-complex-comprehension
jtu.format_shape_dtype_string(shape, dtype),
start_indices, size_indices),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"size_indices": size_indices, "rng_factory": rng_factory}
for shape, start_indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), (1, -2), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(), (), ()],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicSliceAgainstNumpy(self, shape, dtype, start_indices,
size_indices, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]
op = lambda x, s: nje.dynamic_slice(x, s, size_indices)
numpy_op = lambda x, s: dynamic_slice_reference(x, s, size_indices)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testDynamicSliceInDim(self):
rng = jtu.rand_default()
x = rng((6, 7), onp.int32)
self.assertAllClose(
nje.dynamic_slice_in_dim(x, 2, 3), x[2:5], check_dtypes=True
)
def _broadcastable_shapes(shape):
"""Returns all shapes that broadcast to `shape`."""
def f(rshape):
yield []
if rshape:
for s in f(rshape[1:]):
yield rshape[0:1] + s
if rshape[0] != 1:
for s in f(rshape[1:]):
yield [1] + s
for x in f(list(reversed(shape))):
yield list(reversed(x))
def _update_shape(shape, indexer):
return onp.zeros(shape)[indexer].shape
| IndexingTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1054579,
"end": 1054754
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (EnterpriseUserAccount, User)
| EnterpriseMember |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_create_event_param.py | {
"start": 292,
"end": 1101
} | class ____(TypedDict, total=False):
item: Required[ConversationItemParam]
"""The item to add to the conversation."""
type: Required[Literal["conversation.item.create"]]
"""The event type, must be `conversation.item.create`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
previous_item_id: str
"""The ID of the preceding item after which the new item will be inserted.
If not set, the new item will be appended to the end of the conversation. If set
to `root`, the new item will be added to the beginning of the conversation. If
set to an existing ID, it allows an item to be inserted mid-conversation. If the
ID cannot be found, an error will be returned and the item will not be added.
"""
| ConversationItemCreateEventParam |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/scheduler_tests/test_pythonic_resources.py | {
"start": 1136,
"end": 6398
} | class ____(
dg.ConfigurableResource, dg.IAttachDifferentObjectToOpContext
):
a_str: str
def get_object_to_set_on_execution_context(self) -> str:
return self.a_str
@schedule(job_name="the_job", cron_schedule="* * * * *", required_resource_keys={"my_resource"})
def schedule_from_context(context: ScheduleEvaluationContext):
return dg.RunRequest(context.resources.my_resource.a_str, run_config={}, tags={})
@schedule(job_name="the_job", cron_schedule="* * * * *")
def schedule_from_arg(my_resource: MyResource):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@schedule(job_name="the_job", cron_schedule="* * * * *")
def schedule_from_weird_name(
my_resource: MyResource, not_called_context: ScheduleEvaluationContext
):
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@schedule(job_name="the_job", cron_schedule="* * * * *")
def schedule_with_resource_from_context(
context: ScheduleEvaluationContext, my_resource_attach: MyResourceAttachDifferentObject
):
assert context.resources.my_resource_attach == my_resource_attach.a_str
return dg.RunRequest(my_resource_attach.a_str, run_config={}, tags={})
@dg.resource
def the_inner() -> str:
return "oo"
@dg.resource(required_resource_keys={"the_inner"})
def the_outer(init_context) -> str:
return "f" + init_context.resources.the_inner
@schedule(
job_name="the_job",
required_resource_keys={"the_outer"},
cron_schedule="* * * * *",
)
def schedule_resource_deps(context):
return dg.RunRequest("foo", run_config={}, tags={})
the_repo = dg.Definitions(
jobs=[the_job],
schedules=[
schedule_from_context,
schedule_from_arg,
schedule_from_weird_name,
schedule_with_resource_from_context,
schedule_resource_deps,
],
resources={
"my_resource": MyResource(a_str="foo"),
"my_resource_attach": MyResourceAttachDifferentObject(a_str="foo"),
"the_inner": the_inner,
"the_outer": the_outer,
},
)
def create_workspace_load_target(attribute: Optional[str] = SINGLETON_REPOSITORY_NAME):
return ModuleTarget(
module_name="dagster_tests.scheduler_tests.test_pythonic_resources",
attribute=None,
working_directory=os.path.join(os.path.dirname(__file__), "..", ".."),
location_name="test_location",
)
@pytest.fixture(name="workspace_context_struct_resources", scope="module")
def workspace_fixture(instance_module_scoped):
with create_test_daemon_workspace_context(
workspace_load_target=create_workspace_load_target(),
instance=instance_module_scoped,
) as workspace:
yield workspace
@pytest.fixture(name="remote_repo_struct_resources", scope="module")
def remote_repo_fixture(workspace_context_struct_resources: WorkspaceProcessContext):
repo_loc = next(
iter(
workspace_context_struct_resources.create_request_context()
.get_code_location_entries()
.values()
)
).code_location
assert repo_loc
return repo_loc.get_repository(SINGLETON_REPOSITORY_NAME)
def loadable_target_origin() -> LoadableTargetOrigin:
return LoadableTargetOrigin(
executable_path=sys.executable,
module_name="dagster_tests.daemon_schedule_tests.test_pythonic_resources",
working_directory=os.getcwd(),
attribute=None,
)
@pytest.mark.parametrize(
"schedule_name",
[
"schedule_from_context",
"schedule_from_arg",
"schedule_from_weird_name",
"schedule_with_resource_from_context",
"schedule_resource_deps",
],
)
def test_resources(
caplog,
instance: DagsterInstance,
workspace_context_struct_resources,
remote_repo_struct_resources,
schedule_name,
) -> None:
freeze_datetime = create_datetime(
year=2019,
month=2,
day=27,
hour=23,
minute=59,
second=59,
).astimezone(get_timezone("US/Central"))
with freeze_time(freeze_datetime):
schedule = remote_repo_struct_resources.get_schedule(schedule_name)
instance.start_schedule(schedule)
assert instance.get_runs_count() == 0
ticks = instance.get_ticks(schedule.get_remote_origin_id(), schedule.selector_id)
assert len(ticks) == 0
freeze_datetime = freeze_datetime + relativedelta(seconds=30)
with freeze_time(freeze_datetime):
evaluate_schedules(workspace_context_struct_resources, None, get_current_datetime())
wait_for_all_runs_to_start(instance)
ticks: Sequence[InstigatorTick] = instance.get_ticks(
schedule.get_remote_origin_id(), schedule.selector_id
)
assert len(ticks) == 1
assert instance.get_runs_count() == 1
run = next(iter(instance.get_runs()))
assert ticks[0].run_keys == ["foo"]
expected_datetime = create_datetime(year=2019, month=2, day=28)
validate_tick(
ticks[0],
schedule,
expected_datetime,
TickStatus.SUCCESS,
expected_run_ids=[run.run_id],
)
| MyResourceAttachDifferentObject |
python | psf__black | src/black/mode.py | {
"start": 7982,
"end": 10769
} | class ____:
target_versions: set[TargetVersion] = field(default_factory=set)
line_length: int = DEFAULT_LINE_LENGTH
string_normalization: bool = True
is_pyi: bool = False
is_ipynb: bool = False
skip_source_first_line: bool = False
magic_trailing_comma: bool = True
python_cell_magics: set[str] = field(default_factory=set)
preview: bool = False
unstable: bool = False
enabled_features: set[Preview] = field(default_factory=set)
def __contains__(self, feature: Preview) -> bool:
"""
Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag.
In unstable mode, all features are enabled. In preview mode, all features
except those in UNSTABLE_FEATURES are enabled. Any features in
`self.enabled_features` are also enabled.
"""
if self.unstable:
return True
if feature in self.enabled_features:
return True
return self.preview and feature not in UNSTABLE_FEATURES
def get_cache_key(self) -> str:
if self.target_versions:
version_str = ",".join(
str(version.value)
for version in sorted(self.target_versions, key=attrgetter("value"))
)
else:
version_str = "-"
if len(version_str) > _MAX_CACHE_KEY_PART_LENGTH:
version_str = sha256(version_str.encode()).hexdigest()[
:_MAX_CACHE_KEY_PART_LENGTH
]
features_and_magics = (
",".join(sorted(f.name for f in self.enabled_features))
+ "@"
+ ",".join(sorted(self.python_cell_magics))
)
if len(features_and_magics) > _MAX_CACHE_KEY_PART_LENGTH:
features_and_magics = sha256(features_and_magics.encode()).hexdigest()[
:_MAX_CACHE_KEY_PART_LENGTH
]
parts = [
version_str,
str(self.line_length),
str(int(self.string_normalization)),
str(int(self.is_pyi)),
str(int(self.is_ipynb)),
str(int(self.skip_source_first_line)),
str(int(self.magic_trailing_comma)),
str(int(self.preview)),
str(int(self.unstable)),
features_and_magics,
]
return ".".join(parts)
def __hash__(self) -> int:
return hash((
frozenset(self.target_versions),
self.line_length,
self.string_normalization,
self.is_pyi,
self.is_ipynb,
self.skip_source_first_line,
self.magic_trailing_comma,
frozenset(self.python_cell_magics),
self.preview,
self.unstable,
frozenset(self.enabled_features),
))
| Mode |
python | mlflow__mlflow | mlflow/store/jobs/sqlalchemy_store.py | {
"start": 596,
"end": 10829
} | class ____(AbstractJobStore):
"""
SQLAlchemy compliant backend store for storing Job metadata.
This store interacts with SQL store using SQLAlchemy abstractions defined
for MLflow Job entities.
"""
def __init__(self, db_uri):
"""
Create a database backed store.
Args:
db_uri: The SQLAlchemy database URI string to connect to the database.
"""
super().__init__()
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.engine = mlflow.store.db.utils.create_sqlalchemy_engine_with_retry(db_uri)
mlflow.store.db.utils._safe_initialize_tables(self.engine)
SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.ManagedSessionMaker = mlflow.store.db.utils._get_managed_session_maker(
SessionMaker, self.db_type
)
def create_job(self, function_fullname: str, params: str, timeout: float | None = None) -> Job:
"""
Create a new job with the specified function and parameters.
Args:
function_fullname: The full name of the function to execute
params: The job parameters that are serialized as a JSON string
timeout: The job execution timeout in seconds
Returns:
Job entity instance
"""
with self.ManagedSessionMaker() as session:
job_id = str(uuid.uuid4())
creation_time = get_current_time_millis()
job = SqlJob(
id=job_id,
creation_time=creation_time,
function_fullname=function_fullname,
params=params,
timeout=timeout,
status=JobStatus.PENDING.to_int(),
result=None,
last_update_time=creation_time,
)
session.add(job)
session.flush()
return job.to_mlflow_entity()
def _update_job(self, job_id: str, new_status: JobStatus, result: str | None = None) -> None:
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
job.status = new_status.to_int()
if result is not None:
job.result = result
job.last_update_time = get_current_time_millis()
def start_job(self, job_id: str) -> None:
"""
Start a job by setting its status to RUNNING.
Only succeeds if the job is currently in PENDING state.
Args:
job_id: The ID of the job to start
Raises:
MlflowException: If job is not in PENDING state or doesn't exist
"""
with self.ManagedSessionMaker() as session:
# Atomic update: only transition from PENDING to RUNNING
rows_updated = (
session.query(SqlJob)
.filter(SqlJob.id == job_id, SqlJob.status == JobStatus.PENDING.to_int())
.update(
{
SqlJob.status: JobStatus.RUNNING.to_int(),
SqlJob.last_update_time: get_current_time_millis(),
}
)
)
if rows_updated == 0:
job = session.query(SqlJob).filter(SqlJob.id == job_id).one_or_none()
if job is None:
raise MlflowException(
f"Job with ID {job_id} not found", error_code=RESOURCE_DOES_NOT_EXIST
)
raise MlflowException(
f"Job {job_id} is in {JobStatus.from_int(job.status)} state, "
"cannot start (must be PENDING)"
)
def reset_job(self, job_id: str) -> None:
"""
Reset a job by setting its status to PENDING.
Args:
job_id: The ID of the job to re-enqueue.
"""
self._update_job(job_id, JobStatus.PENDING)
def finish_job(self, job_id: str, result: str) -> None:
"""
Finish a job by setting its status to DONE and setting the result.
Args:
job_id: The ID of the job to finish
result: The job result as a string
"""
self._update_job(job_id, JobStatus.SUCCEEDED, result)
def fail_job(self, job_id: str, error: str) -> None:
"""
Fail a job by setting its status to FAILED and setting the error message.
Args:
job_id: The ID of the job to fail
error: The error message as a string
"""
self._update_job(job_id, JobStatus.FAILED, error)
def mark_job_timed_out(self, job_id: str) -> None:
"""
Set a job status to Timeout.
Args:
job_id: The ID of the job
"""
self._update_job(job_id, JobStatus.TIMEOUT)
def retry_or_fail_job(self, job_id: str, error: str) -> int | None:
"""
If the job retry_count is less than maximum allowed retry count,
increment the retry_count and reset the job to PENDING status,
otherwise set the job to FAILED status and fill the job's error field.
Args:
job_id: The ID of the job to fail
error: The error message as a string
Returns:
If the job is allowed to retry, returns the retry count,
otherwise returns None.
"""
from mlflow.environment_variables import MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES
max_retries = MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES.get()
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
if job.retry_count >= max_retries:
job.status = JobStatus.FAILED.to_int()
job.result = error
return None
job.retry_count += 1
job.status = JobStatus.PENDING.to_int()
job.last_update_time = get_current_time_millis()
return job.retry_count
def list_jobs(
self,
function_fullname: str | None = None,
statuses: list[JobStatus] | None = None,
begin_timestamp: int | None = None,
end_timestamp: int | None = None,
params: dict[str, Any] | None = None,
) -> Iterator[Job]:
"""
List jobs based on the provided filters.
Args:
function_fullname: Filter by function full name (exact match)
statuses: Filter by a list of job status (PENDING, RUNNING, DONE, FAILED, TIMEOUT)
begin_timestamp: Filter jobs created after this timestamp (inclusive)
end_timestamp: Filter jobs created before this timestamp (inclusive)
params: Filter jobs by matching job params dict with the provided params dict.
e.g., if `params` is ``{'a': 3, 'b': 4}``, it can match the following job params:
``{'a': 3, 'b': 4}``, ``{'a': 3, 'b': 4, 'c': 5}``, but it does not match the
following job params: ``{'a': 3, 'b': 6}``, ``{'a': 3, 'c': 5}``.
Returns:
Iterator of Job entities that match the filters, ordered by creation time (oldest first)
"""
offset = 0
def filter_by_params(job_params: dict[str, Any]) -> bool:
for key in params:
if key in job_params:
if job_params[key] != params[key]:
return False
else:
return False
return True
while True:
with self.ManagedSessionMaker() as session:
# Select all columns needed for Job entity
query = session.query(SqlJob)
# Apply filters
if function_fullname is not None:
query = query.filter(SqlJob.function_fullname == function_fullname)
if statuses:
query = query.filter(
SqlJob.status.in_([status.to_int() for status in statuses])
)
if begin_timestamp is not None:
query = query.filter(SqlJob.creation_time >= begin_timestamp)
if end_timestamp is not None:
query = query.filter(SqlJob.creation_time <= end_timestamp)
# Order by creation time (oldest first) and apply pagination
jobs = (
query.order_by(SqlJob.creation_time)
.offset(offset)
.limit(_LIST_JOB_PAGE_SIZE)
.all()
)
# If no jobs returned, we've reached the end
if not jobs:
break
# Yield each job
if params:
for job in jobs:
if filter_by_params(json.loads(job.params)):
yield job.to_mlflow_entity()
else:
for job in jobs:
yield job.to_mlflow_entity()
# If we got fewer jobs than page_size, we've reached the end
if len(jobs) < _LIST_JOB_PAGE_SIZE:
break
# Move to next page
offset += _LIST_JOB_PAGE_SIZE
def _get_sql_job(self, session, job_id) -> SqlJob:
job = session.query(SqlJob).filter(SqlJob.id == job_id).one_or_none()
if job is None:
raise MlflowException(
f"Job with ID {job_id} not found", error_code=RESOURCE_DOES_NOT_EXIST
)
return job
def get_job(self, job_id: str) -> Job:
"""
Get a job by its ID.
Args:
job_id: The ID of the job to retrieve
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
if job is None:
raise MlflowException(
f"Job with ID {job_id} not found", error_code=RESOURCE_DOES_NOT_EXIST
)
return job.to_mlflow_entity()
| SqlAlchemyJobStore |
python | kamyu104__LeetCode-Solutions | Python/find-minimum-time-to-finish-all-jobs.py | {
"start": 103,
"end": 1114
} | class ____(object):
def minimumTimeRequired(self, jobs, k):
"""
:type jobs: List[int]
:type k: int
:rtype: int
"""
def backtracking(jobs, i, cap, counts):
if i == len(jobs):
return True
for j in xrange(len(counts)):
if counts[j]+jobs[i] <= cap:
counts[j] += jobs[i]
if backtracking(jobs, i+1, cap, counts):
return True
counts[j] -= jobs[i]
if counts[j] == 0:
break
return False
jobs.sort(reverse=True)
left, right = max(jobs), sum(jobs)
while left <= right:
mid = left + (right-left)//2
if backtracking(jobs, 0, mid, [0]*k):
right = mid-1
else:
left = mid+1
return left
# Time: O(k * k^n), the real complexity shoud be less, but hard to analyze
# Space: O(n + k)
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 33793,
"end": 33871
} | class ____(Enum):
None_ = "None"
Default = "Default"
| SchemaNormalization |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 38634,
"end": 41613
} | class ____(BaseAPIIntegrationTest):
def test_kill(self):
container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
assert 'State' in container_info
state = container_info['State']
assert 'ExitCode' in state
assert state['ExitCode'] != 0
assert 'Running' in state
assert state['Running'] is False
def test_kill_with_dict_instead_of_id(self):
container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
assert 'State' in container_info
state = container_info['State']
assert 'ExitCode' in state
assert state['ExitCode'] != 0
assert 'Running' in state
assert state['Running'] is False
def test_kill_with_signal(self):
id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(id)
self.client.start(id)
self.client.kill(
id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9
)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode != 0
container_info = self.client.inspect_container(id)
assert 'State' in container_info
state = container_info['State']
assert 'ExitCode' in state
assert state['ExitCode'] != 0
assert 'Running' in state
assert state['Running'] is False, state
def test_kill_with_signal_name(self):
id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
exitcode = self.client.wait(id)['StatusCode']
assert exitcode != 0
container_info = self.client.inspect_container(id)
assert 'State' in container_info
state = container_info['State']
assert 'ExitCode' in state
assert state['ExitCode'] != 0
assert 'Running' in state
assert state['Running'] is False, state
def test_kill_with_signal_integer(self):
id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode != 0
container_info = self.client.inspect_container(id)
assert 'State' in container_info
state = container_info['State']
assert 'ExitCode' in state
assert state['ExitCode'] != 0
assert 'Running' in state
assert state['Running'] is False, state
| KillTest |
python | ethereum__web3.py | ens/_normalization.py | {
"start": 1355,
"end": 2035
} | class ____:
type: Literal[TokenType.TEXT, TokenType.EMOJI]
_original_text: str
_original_codepoints: list[int]
_normalized_codepoints: list[int] | None = None
restricted: bool = False
def __init__(self, codepoints: list[int]) -> None:
self._original_codepoints = codepoints
self._original_text = "".join(chr(cp) for cp in codepoints)
@property
def codepoints(self) -> list[int]:
return (
self._normalized_codepoints
if self._normalized_codepoints
else self._original_codepoints
)
@property
def text(self) -> str:
return _codepoints_to_text(self.codepoints)
| Token |
python | tensorflow__tensorflow | third_party/xla/xla/hlo/tools/generate_hlo_test_checks.py | {
"start": 14533,
"end": 21950
} | class ____:
"""Generates FileCheck comments from HLO IR."""
_MODULE_REGEX: re.Pattern[str] = re.compile(
r"^HloModule\b",
)
_CHECK_LINE_REGEX: re.Pattern[str] = re.compile(
r"^// (CHECK(?:-\w+)?): .*?%[\w.\-]+ *(=|$)",
)
_SYMBOL_NAME_REGEX: re.Pattern[str] = re.compile(
r"(?<=%)[\w\-]+(?:\.[\w\-]+)*(?:\.\d+)?",
)
_NON_SYMBOL_NAME_CHARS_REGEX: re.Pattern[str] = re.compile(
r"\W",
)
_END_OF_FUNCTION_SCOPE_SENTINEL_VALUE: str = DirectiveComment.COM.format_line(
"(End of function scope.)"
)
def __init__(self, input_stream: Iterator[str]):
"""HloFileCheckLines constructor."""
self._input_stream: Iterator[str] = input_stream
self._on_first_line: bool = True
self._at_section_break: bool = True
self._num_symbols_with_normalized_name: dict[str, int] = dict()
self._global_symbol_replacement_cache: dict[str, str] = dict()
self._local_symbol_replacement_cache: dict[str, str] = dict()
def __iter__(self) -> Iterator[str]:
"""Converts HLO instructions to FileCheck directives where applicable."""
return self._replace_symbol_names_with_regex_captures(
self._prefix_lines_with_check_directives(self._input_stream)
)
def _prefix_lines_with_check_directives(
self, input_stream: Iterator[str]
) -> Iterator[str]:
"""Prepends "// CHECK-XXX:" directives to HLO instructions."""
for line in input_stream:
stripped_line = line.strip()
# Keep track of section breaks, i.e. empty lines. If we see multiple empty
# lines in a row, collapse them down into a single one. Also prune any
# empty lines at the very start of the file.
if not stripped_line:
self._at_section_break = True
continue
# Leave out closing-brace lines, but replace them with "// COM:" (comment)
# directives to tell the symbol replacer to clear its cache of local-scope
# symbols. (The symbol replacer will also remove these added lines.)
#
# NOTE: We could just clear the local-symbol cache here instead of telling
# the next stage of the pipeline to do it, but that would blur the API
# boundaries and could introduce bugs if the iteration behavior changed.
if stripped_line == "}":
yield self._END_OF_FUNCTION_SCOPE_SENTINEL_VALUE
continue
first_line_of_new_section = self._at_section_break
if self._at_section_break:
if self._on_first_line:
self._on_first_line = False
else:
yield "\n"
self._at_section_break = False
if self._MODULE_REGEX.match(stripped_line):
yield DirectiveComment.CHECK_LABEL.format_line(stripped_line)
elif first_line_of_new_section:
yield _format_function_declaration_file_check(stripped_line)
else:
yield DirectiveComment.CHECK_NEXT.format_line(stripped_line)
def _replace_symbol_names_with_regex_captures(
self,
input_stream: Iterator[str],
) -> Iterator[str]:
"""Replaces HLO instruction & function names with FileCheck regex captures.
Replaces explicit symbol names in FileCheck directives with regex
captures. Lines that don't start with FileCheck directives are unchanged.
Args:
input_stream: An iterator to the lines of an HLO test.
Yields:
The transformed lines of the HLO test.
"""
for line in input_stream:
match: Optional[re.Match[str]] = self._CHECK_LINE_REGEX.match(line)
if match is None:
if line == self._END_OF_FUNCTION_SCOPE_SENTINEL_VALUE:
self._local_symbol_replacement_cache.clear()
else:
yield line
continue
# "CHECK-LABEL" doesn't support regex captures; it's intended for symbols
# with explicit names that should be checked verbatim.
is_verbatim: bool = match.group(1) == "CHECK-LABEL"
# `match.group(2)` captures "=" when matching an assignment and "" when
# matching a function declaration. Functions should be treated as having
# global scope, whereas assignments should go out of scope at the end of
# a function.
assert match.group(2) == "=" or not match.group(2)
is_global: bool = not match.group(2)
yield re.sub(self._SYMBOL_NAME_REGEX,
functools.partial(self._replacer,
is_verbatim=is_verbatim,
is_global=is_global),
line)
def _replacer(
self,
match: re.Match[str],
is_verbatim: bool,
is_global: bool,
) -> str:
"""A symbol-name replacement function for use in `re.sub`.
Args:
match: The match object produced by `self._SYMBOL_NAME_REGEX`.
is_verbatim: Whether the newly matched symbol appears in a "CHECK-LABEL"
directive, in which case it should be checked verbatim (not replaced
with a regex capture).
is_global: Whether the newly matched symbol appears in a declaration at
global scope, i.e. whether it's a function name. If so, it should be
remembered across function boundaries.
Returns:
The replacement string for the symbol name.
"""
symbol_name = match.group(0)
if symbol_name in self._local_symbol_replacement_cache:
return self._local_symbol_replacement_cache[symbol_name]
if symbol_name in self._global_symbol_replacement_cache:
return self._global_symbol_replacement_cache[symbol_name]
if is_verbatim:
declaration_replacement = symbol_name
reference_replacement = symbol_name
else:
capture_name = self._generate_unique_name(symbol_name)
capture_pattern = r"[^ ]+"
maybe_global_flag = "$" if is_global else ""
declaration_replacement = (
f"[[{maybe_global_flag}{capture_name}:{capture_pattern}]]"
)
reference_replacement = f"[[{maybe_global_flag}{capture_name}]]"
if is_global:
self._global_symbol_replacement_cache[symbol_name] = reference_replacement
else:
self._local_symbol_replacement_cache[symbol_name] = reference_replacement
return declaration_replacement
def _generate_unique_name(self, symbol_name: str) -> str:
"""Translates a symbol name to a unique FileCheck capture name.
Replaces all characters other than letters, numbers, and underscores with
underscores. If the resulting name has already been used, appends a counter
to disambiguate it. For example, this could result in the following sequence
of replacements:
1.) "foo.bar.baz" -> "foo_bar_baz"
2.) "foo.bar_baz" -> "foo_bar_baz_1"
3.) "foo_bar.baz" -> "foo_bar_baz_2"
4.) "foo_bar_baz" -> "foo_bar_baz_3"
Args:
symbol_name: The original symbol name.
Returns:
The generated FileCheck capture name.
"""
normalized_symbol_name = self._NON_SYMBOL_NAME_CHARS_REGEX.sub(
"_", symbol_name
)
normalized_name_conflict_count = self._num_symbols_with_normalized_name.get(
normalized_symbol_name, 0
)
self._num_symbols_with_normalized_name[normalized_symbol_name] = (
normalized_name_conflict_count + 1
)
optional_disambiguation_suffix = ("" if normalized_name_conflict_count == 0
else f"_{normalized_name_conflict_count}")
return f"{normalized_symbol_name}{optional_disambiguation_suffix}"
| HloFileCheckLines |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 17330,
"end": 18055
} | class ____(BeforeInput):
"""
Display the 'arg' in front of the input.
This was used by the `PromptSession`, but now it uses the
`Window.get_line_prefix` function instead.
"""
def __init__(self) -> None:
super().__init__(self._get_text_fragments)
def _get_text_fragments(self) -> StyleAndTextTuples:
app = get_app()
if app.key_processor.arg is None:
return []
else:
arg = app.key_processor.arg
return [
("class:prompt.arg", "(arg: "),
("class:prompt.arg.text", str(arg)),
("class:prompt.arg", ") "),
]
def __repr__(self) -> str:
return "ShowArg()"
| ShowArg |
python | great-expectations__great_expectations | tests/datasource/fluent/_fake_cloud_api.py | {
"start": 1744,
"end": 1925
} | class ____(pydantic.BaseModel, extra="allow"):
id: Optional[str] = None
type: str
name: str
assets: List[dict] = pydantic.Field(default_factory=list)
| _DatasourceSchema |
python | jazzband__django-polymorphic | src/polymorphic/tests/test_orm.py | {
"start": 2101,
"end": 48925
} | class ____(TransactionTestCase):
"""
The test suite
"""
def test_annotate_aggregate_order(self):
# create a blog of type BlogA
# create two blog entries in BlogA
# create some blogs of type BlogB to make the BlogBase table data really polymorphic
blog = BlogA.objects.create(name="B1", info="i1")
blog.blogentry_set.create(text="bla")
BlogEntry.objects.create(blog=blog, text="bla2")
BlogB.objects.create(name="Bb1")
BlogB.objects.create(name="Bb2")
BlogB.objects.create(name="Bb3")
qs = BlogBase.objects.annotate(entrycount=Count("BlogA___blogentry"))
assert len(qs) == 4
for o in qs:
if o.name == "B1":
assert o.entrycount == 2
else:
assert o.entrycount == 0
x = BlogBase.objects.aggregate(entrycount=Count("BlogA___blogentry"))
assert x["entrycount"] == 2
# create some more blogs for next test
BlogA.objects.create(name="B2", info="i2")
BlogA.objects.create(name="B3", info="i3")
BlogA.objects.create(name="B4", info="i4")
BlogA.objects.create(name="B5", info="i5")
# test ordering for field in all entries
expected = """
[ <BlogB: id 4, name (CharField) "Bb3">,
<BlogB: id 3, name (CharField) "Bb2">,
<BlogB: id 2, name (CharField) "Bb1">,
<BlogA: id 8, name (CharField) "B5", info (CharField) "i5">,
<BlogA: id 7, name (CharField) "B4", info (CharField) "i4">,
<BlogA: id 6, name (CharField) "B3", info (CharField) "i3">,
<BlogA: id 5, name (CharField) "B2", info (CharField) "i2">,
<BlogA: id 1, name (CharField) "B1", info (CharField) "i1"> ]"""
assert repr(BlogBase.objects.order_by("-name")).strip() == expected.strip()
# different RDBMS return different orders for the nulls, and we can't use F
# and nulls_first or nulls_last here to standardize it, so our test is
# conditional
blog_names = [blg.name for blg in BlogBase.objects.order_by("-BlogA___info")]
ordered = blog_names[:3]
if all([name.startswith("Bb") for name in ordered]):
ordered = blog_names[3:]
else:
assert all([name.startswith("Bb") for name in blog_names[-3:]])
ordered = blog_names[:-3]
assert ordered == ["B5", "B4", "B3", "B2", "B1"]
def test_limit_choices_to(self):
"""
this is not really a testcase, as limit_choices_to only affects the Django admin
"""
# create a blog of type BlogA
blog_a = BlogA.objects.create(name="aa", info="aa")
blog_b = BlogB.objects.create(name="bb")
# create two blog entries
entry1 = BlogEntry_limit_choices_to.objects.create(blog=blog_b, text="bla2")
entry2 = BlogEntry_limit_choices_to.objects.create(blog=blog_b, text="bla2")
def test_primary_key_custom_field_problem(self):
"""
object retrieval problem occuring with some custom primary key fields (UUIDField as test case)
"""
UUIDProject.objects.create(topic="John's gathering")
UUIDArtProject.objects.create(topic="Sculpting with Tim", artist="T. Turner")
UUIDResearchProject.objects.create(topic="Swallow Aerodynamics", supervisor="Dr. Winter")
qs = UUIDProject.objects.all()
ol = list(qs)
a = qs[0]
b = qs[1]
c = qs[2]
assert len(qs) == 3
assert isinstance(a.uuid_primary_key, uuid.UUID)
assert isinstance(a.pk, uuid.UUID)
res = re.sub(' "(.*?)..", topic', ", topic", repr(qs))
res_exp = """[ <UUIDProject: uuid_primary_key (UUIDField/pk), topic (CharField) "John's gathering">,
<UUIDArtProject: uuid_primary_key (UUIDField/pk), topic (CharField) "Sculpting with Tim", artist (CharField) "T. Turner">,
<UUIDResearchProject: uuid_primary_key (UUIDField/pk), topic (CharField) "Swallow Aerodynamics", supervisor (CharField) "Dr. Winter"> ]"""
assert res == res_exp
a = UUIDPlainA.objects.create(field1="A1")
b = UUIDPlainB.objects.create(field1="B1", field2="B2")
c = UUIDPlainC.objects.create(field1="C1", field2="C2", field3="C3")
qs = UUIDPlainA.objects.all()
# Test that primary key values are valid UUIDs
assert uuid.UUID(f"urn:uuid:{a.pk}", version=1) == a.pk
assert uuid.UUID(f"urn:uuid:{c.pk}", version=1) == c.pk
def create_model2abcd(self):
"""
Create the chain of objects of Model2,
this is reused in various tests.
"""
a = Model2A.objects.create(field1="A1")
b = Model2B.objects.create(field1="B1", field2="B2")
c = Model2C.objects.create(field1="C1", field2="C2", field3="C3")
d = Model2D.objects.create(field1="D1", field2="D2", field3="D3", field4="D4")
return a, b, c, d
def test_simple_inheritance(self):
self.create_model2abcd()
objects = Model2A.objects.all()
self.assertQuerySetEqual(
objects,
[Model2A, Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
ordered=False,
)
def test_defer_fields(self):
self.create_model2abcd()
objects_deferred = Model2A.objects.defer("field1").order_by("id")
assert "field1" not in objects_deferred[0].__dict__, (
"field1 was not deferred (using defer())"
)
# Check that we have exactly one deferred field ('field1') per resulting object.
for obj in objects_deferred:
deferred_fields = obj.get_deferred_fields()
assert len(deferred_fields) == 1
assert "field1" in deferred_fields
objects_only = Model2A.objects.only("pk", "polymorphic_ctype", "field1")
assert "field1" in objects_only[0].__dict__, (
'qs.only("field1") was used, but field1 was incorrectly deferred'
)
assert "field1" in objects_only[3].__dict__, (
'qs.only("field1") was used, but field1 was incorrectly deferred on a child model'
)
assert "field4" not in objects_only[3].__dict__, "field4 was not deferred (using only())"
assert "field1" not in objects_only[0].get_deferred_fields()
assert "field2" in objects_only[1].get_deferred_fields()
# objects_only[2] has several deferred fields, ensure they are all set as such.
model2c_deferred = objects_only[2].get_deferred_fields()
assert "field2" in model2c_deferred
assert "field3" in model2c_deferred
assert "model2a_ptr_id" in model2c_deferred
# objects_only[3] has a few more fields that should be set as deferred.
model2d_deferred = objects_only[3].get_deferred_fields()
assert "field2" in model2d_deferred
assert "field3" in model2d_deferred
assert "field4" in model2d_deferred
assert "model2a_ptr_id" in model2d_deferred
assert "model2b_ptr_id" in model2d_deferred
ModelX.objects.create(field_b="A1", field_x="A2")
ModelY.objects.create(field_b="B1", field_y="B2")
# If we defer a field on a descendent, the parent's field is not deferred.
objects_deferred = Base.objects.defer("ModelY___field_y")
assert "field_y" not in objects_deferred[0].get_deferred_fields()
assert "field_y" in objects_deferred[1].get_deferred_fields()
objects_only = Base.objects.only(
"polymorphic_ctype", "ModelY___field_y", "ModelX___field_x"
)
assert "field_b" in objects_only[0].get_deferred_fields()
assert "field_b" in objects_only[1].get_deferred_fields()
def test_defer_related_fields(self):
self.create_model2abcd()
objects_deferred_field4 = Model2A.objects.defer("Model2D___field4")
assert "field4" not in objects_deferred_field4[3].__dict__, (
"field4 was not deferred (using defer(), traversing inheritance)"
)
assert objects_deferred_field4[0].__class__ == Model2A
assert objects_deferred_field4[1].__class__ == Model2B
assert objects_deferred_field4[2].__class__ == Model2C
assert objects_deferred_field4[3].__class__ == Model2D
objects_only_field4 = Model2A.objects.only(
"polymorphic_ctype",
"field1",
"Model2B___id",
"Model2B___field2",
"Model2B___model2a_ptr",
"Model2C___id",
"Model2C___field3",
"Model2C___model2b_ptr",
"Model2D___id",
"Model2D___model2c_ptr",
)
assert objects_only_field4[0].__class__ == Model2A
assert objects_only_field4[1].__class__ == Model2B
assert objects_only_field4[2].__class__ == Model2C
assert objects_only_field4[3].__class__ == Model2D
def test_manual_get_real_instance(self):
self.create_model2abcd()
o = Model2A.objects.non_polymorphic().get(field1="C1")
assert o.get_real_instance().__class__ == Model2C
def test_get_real_instance_with_stale_content_type(self):
ctype = ContentType.objects.create(app_label="tests", model="stale")
o = Model2A.objects.create(field1="A1", polymorphic_ctype=ctype)
assert o.get_real_instance_class() is None
match = "does not have a corresponding model"
with pytest.raises(PolymorphicTypeInvalid, match=match):
o.get_real_instance()
def test_non_polymorphic(self):
self.create_model2abcd()
objects = list(Model2A.objects.all().non_polymorphic())
self.assertQuerySetEqual(
objects,
[Model2A, Model2A, Model2A, Model2A],
transform=lambda o: o.__class__,
)
def test_get_real_instances(self):
self.create_model2abcd()
qs = Model2A.objects.all().non_polymorphic()
# from queryset
objects = qs.get_real_instances()
self.assertQuerySetEqual(
objects,
[Model2A, Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
)
# from a manual list
objects = Model2A.objects.get_real_instances(list(qs))
self.assertQuerySetEqual(
objects,
[Model2A, Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
)
# from empty list
objects = Model2A.objects.get_real_instances([])
self.assertQuerySetEqual(objects, [], transform=lambda o: o.__class__)
def test_queryset_missing_derived(self):
a = Model2A.objects.create(field1="A1")
b = Model2B.objects.create(field1="B1", field2="B2")
c = Model2C.objects.create(field1="C1", field2="C2", field3="C3")
b_base = Model2A.objects.non_polymorphic().get(pk=b.pk)
c_base = Model2A.objects.non_polymorphic().get(pk=c.pk)
b.delete(keep_parents=True) # e.g. table was truncated
qs_base = Model2A.objects.order_by("field1").non_polymorphic()
qs_polymorphic = Model2A.objects.order_by("field1").all()
assert list(qs_base) == [a, b_base, c_base]
assert list(qs_polymorphic) == [a, c]
def test_queryset_missing_contenttype(self):
stale_ct = ContentType.objects.create(app_label="tests", model="nonexisting")
a1 = Model2A.objects.create(field1="A1")
a2 = Model2A.objects.create(field1="A2")
c = Model2C.objects.create(field1="C1", field2="C2", field3="C3")
c_base = Model2A.objects.non_polymorphic().get(pk=c.pk)
Model2B.objects.filter(pk=a2.pk).update(polymorphic_ctype=stale_ct)
qs_base = Model2A.objects.order_by("field1").non_polymorphic()
qs_polymorphic = Model2A.objects.order_by("field1").all()
assert list(qs_base) == [a1, a2, c_base]
assert list(qs_polymorphic) == [a1, a2, c]
def test_translate_polymorphic_q_object(self):
self.create_model2abcd()
q = Model2A.translate_polymorphic_Q_object(Q(instance_of=Model2C))
objects = Model2A.objects.filter(q)
self.assertQuerySetEqual(
objects, [Model2C, Model2D], transform=lambda o: o.__class__, ordered=False
)
def test_create_instanceof_q(self):
q = query_translate.create_instanceof_q([Model2B])
expected = sorted(
ContentType.objects.get_for_model(m).pk for m in [Model2B, Model2C, Model2D]
)
assert dict(q.children) == dict(polymorphic_ctype__in=expected)
def test_base_manager(self):
def base_manager(model):
return (type(model._base_manager), model._base_manager.model)
assert base_manager(PlainA) == (models.Manager, PlainA)
assert base_manager(PlainB) == (models.Manager, PlainB)
assert base_manager(PlainC) == (models.Manager, PlainC)
assert base_manager(Model2A) == (PolymorphicManager, Model2A)
assert base_manager(Model2B) == (PolymorphicManager, Model2B)
assert base_manager(Model2C) == (PolymorphicManager, Model2C)
assert base_manager(One2OneRelatingModel) == (PolymorphicManager, One2OneRelatingModel)
assert base_manager(One2OneRelatingModelDerived) == (
PolymorphicManager,
One2OneRelatingModelDerived,
)
def test_instance_default_manager(self):
def default_manager(instance):
return (
type(instance.__class__._default_manager),
instance.__class__._default_manager.model,
)
plain_a = PlainA(field1="C1")
plain_b = PlainB(field2="C1")
plain_c = PlainC(field3="C1")
model_2a = Model2A(field1="C1")
model_2b = Model2B(field2="C1")
model_2c = Model2C(field3="C1")
assert default_manager(plain_a) == (models.Manager, PlainA)
assert default_manager(plain_b) == (models.Manager, PlainB)
assert default_manager(plain_c) == (models.Manager, PlainC)
assert default_manager(model_2a) == (PolymorphicManager, Model2A)
assert default_manager(model_2b) == (PolymorphicManager, Model2B)
assert default_manager(model_2c) == (PolymorphicManager, Model2C)
def test_foreignkey_field(self):
self.create_model2abcd()
object2a = Model2A.objects.get(field1="C1")
assert object2a.model2b.__class__ == Model2B
object2b = Model2B.objects.get(field1="C1")
assert object2b.model2c.__class__ == Model2C
def test_onetoone_field(self):
self.create_model2abcd()
# FIXME: We should not use base_objects here.
a = Model2A.base_objects.get(field1="C1")
b = One2OneRelatingModelDerived.objects.create(one2one=a, field1="f1", field2="f2")
# FIXME: this result is basically wrong, probably due to Django cacheing (we used base_objects), but should not be a problem
assert b.one2one.__class__ == Model2A
assert b.one2one_id == b.one2one.id
c = One2OneRelatingModelDerived.objects.get(field1="f1")
assert c.one2one.__class__ == Model2C
assert a.one2onerelatingmodel.__class__ == One2OneRelatingModelDerived
def test_manytomany_field(self):
# Model 1
o = ModelShow1.objects.create(field1="abc")
o.m2m.add(o)
o.save()
assert (
repr(ModelShow1.objects.all())
== "[ <ModelShow1: id 1, field1 (CharField), m2m (ManyToManyField)> ]"
)
# Model 2
o = ModelShow2.objects.create(field1="abc")
o.m2m.add(o)
o.save()
assert repr(ModelShow2.objects.all()) == '[ <ModelShow2: id 1, field1 "abc", m2m 1> ]'
# Model 3
o = ModelShow3.objects.create(field1="abc")
o.m2m.add(o)
o.save()
assert (
repr(ModelShow3.objects.all())
== '[ <ModelShow3: id 1, field1 (CharField) "abc", m2m (ManyToManyField) 1> ]'
)
assert (
repr(ModelShow1.objects.all().annotate(Count("m2m")))
== "[ <ModelShow1: id 1, field1 (CharField), m2m (ManyToManyField) - Ann: m2m__count (int)> ]"
)
assert (
repr(ModelShow2.objects.all().annotate(Count("m2m")))
== '[ <ModelShow2: id 1, field1 "abc", m2m 1 - Ann: m2m__count 1> ]'
)
assert (
repr(ModelShow3.objects.all().annotate(Count("m2m")))
== '[ <ModelShow3: id 1, field1 (CharField) "abc", m2m (ManyToManyField) 1 - Ann: m2m__count (int) 1> ]'
)
# no pretty printing
ModelShow1_plain.objects.create(field1="abc")
ModelShow2_plain.objects.create(field1="abc", field2="def")
self.assertQuerySetEqual(
ModelShow1_plain.objects.all(),
[ModelShow1_plain, ModelShow2_plain],
transform=lambda o: o.__class__,
ordered=False,
)
def test_extra_method(self):
from django.db import connection
a, b, c, d = self.create_model2abcd()
objects = Model2A.objects.extra(where=[f"id IN ({b.id}, {c.id})"])
self.assertQuerySetEqual(
objects, [Model2B, Model2C], transform=lambda o: o.__class__, ordered=False
)
if connection.vendor == "oracle":
objects = Model2A.objects.extra(
select={"select_test": "CASE WHEN field1 = 'A1' THEN 1 ELSE 0 END"},
where=["field1 = 'A1' OR field1 = 'B1'"],
order_by=["-id"],
)
else:
objects = Model2A.objects.extra(
select={"select_test": "field1 = 'A1'"},
where=["field1 = 'A1' OR field1 = 'B1'"],
order_by=["-id"],
)
self.assertQuerySetEqual(objects, [Model2B, Model2A], transform=lambda o: o.__class__)
ModelExtraA.objects.create(field1="A1")
ModelExtraB.objects.create(field1="B1", field2="B2")
ModelExtraC.objects.create(field1="C1", field2="C2", field3="C3")
ModelExtraExternal.objects.create(topic="extra1")
ModelExtraExternal.objects.create(topic="extra2")
ModelExtraExternal.objects.create(topic="extra3")
objects = ModelExtraA.objects.extra(
tables=["tests_modelextraexternal"],
select={"topic": "tests_modelextraexternal.topic"},
where=["tests_modelextraa.id = tests_modelextraexternal.id"],
)
assert (
repr(objects[0])
== '<ModelExtraA: id 1, field1 (CharField) "A1" - Extra: topic (str) "extra1">'
)
assert (
repr(objects[1])
== '<ModelExtraB: id 2, field1 (CharField) "B1", field2 (CharField) "B2" - Extra: topic (str) "extra2">'
)
assert (
repr(objects[2])
== '<ModelExtraC: id 3, field1 (CharField) "C1", field2 (CharField) "C2", field3 (CharField) "C3" - Extra: topic (str) "extra3">'
)
assert len(objects) == 3
def test_instance_of_filter(self):
self.create_model2abcd()
objects = Model2A.objects.instance_of(Model2B)
self.assertQuerySetEqual(
objects,
[Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
ordered=False,
)
objects = Model2A.objects.filter(instance_of=Model2B)
self.assertQuerySetEqual(
objects,
[Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
ordered=False,
)
objects = Model2A.objects.filter(Q(instance_of=Model2B))
self.assertQuerySetEqual(
objects,
[Model2B, Model2C, Model2D],
transform=lambda o: o.__class__,
ordered=False,
)
objects = Model2A.objects.not_instance_of(Model2B)
self.assertQuerySetEqual(
objects, [Model2A], transform=lambda o: o.__class__, ordered=False
)
def test_polymorphic___filter(self):
self.create_model2abcd()
objects = Model2A.objects.filter(Q(Model2B___field2="B2") | Q(Model2C___field3="C3"))
self.assertQuerySetEqual(
objects, [Model2B, Model2C], transform=lambda o: o.__class__, ordered=False
)
def test_polymorphic_applabel___filter(self):
self.create_model2abcd()
assert Model2B._meta.app_label == "tests"
objects = Model2A.objects.filter(
Q(tests__Model2B___field2="B2") | Q(tests__Model2C___field3="C3")
)
self.assertQuerySetEqual(
objects, [Model2B, Model2C], transform=lambda o: o.__class__, ordered=False
)
def test_query_filter_exclude_is_immutable(self):
# given
q_to_reuse = Q(Model2B___field2="something")
untouched_q_object = Q(Model2B___field2="something")
# when
Model2A.objects.filter(q_to_reuse).all()
# then
assert q_to_reuse.children == untouched_q_object.children
# given
q_to_reuse = Q(Model2B___field2="something")
untouched_q_object = Q(Model2B___field2="something")
# when
Model2B.objects.filter(q_to_reuse).all()
# then
assert q_to_reuse.children == untouched_q_object.children
def test_polymorphic___filter_field(self):
p = ModelUnderRelParent.objects.create(_private=True, field1="AA")
ModelUnderRelChild.objects.create(parent=p, _private2=True)
# The "___" filter should also parse to "parent" -> "_private" as fallback.
objects = ModelUnderRelChild.objects.filter(parent___private=True)
assert len(objects) == 1
def test_polymorphic___filter_reverse_field(self):
p = ModelUnderRelParent.objects.create(_private=True, field1="BB")
ModelUnderRelChild.objects.create(parent=p, _private2=True)
# Also test for reverse relations
objects = ModelUnderRelParent.objects.filter(children___private2=True)
assert len(objects) == 1
def test_delete(self):
a, b, c, d = self.create_model2abcd()
oa = Model2A.objects.get(id=b.id)
assert oa.__class__ == Model2B
assert Model2A.objects.count() == 4
oa.delete()
objects = Model2A.objects.all()
self.assertQuerySetEqual(
objects,
[Model2A, Model2C, Model2D],
transform=lambda o: o.__class__,
ordered=False,
)
def test_combine_querysets(self):
ModelX.objects.create(field_x="x", field_b="1")
ModelY.objects.create(field_y="y", field_b="2")
qs = Base.objects.instance_of(ModelX) | Base.objects.instance_of(ModelY)
qs = qs.order_by("field_b")
assert repr(qs[0]) == "<ModelX: id 1, field_b (CharField), field_x (CharField)>"
assert repr(qs[1]) == "<ModelY: id 2, field_b (CharField), field_y (CharField)>"
assert len(qs) == 2
def test_multiple_inheritance(self):
# multiple inheritance, subclassing third party models (mix PolymorphicModel with models.Model)
Enhance_Base.objects.create(field_b="b-base")
Enhance_Inherit.objects.create(field_b="b-inherit", field_p="p", field_i="i")
qs = Enhance_Base.objects.all()
assert len(qs) == 2
assert (
repr(qs[0]) == '<Enhance_Base: base_id (AutoField/pk) 1, field_b (CharField) "b-base">'
)
assert (
repr(qs[1])
== '<Enhance_Inherit: base_id (AutoField/pk) 2, field_b (CharField) "b-inherit", id 1, field_p (CharField) "p", field_i (CharField) "i">'
)
def test_relation_base(self):
# ForeignKey, ManyToManyField
obase = RelationBase.objects.create(field_base="base")
oa = RelationA.objects.create(field_base="A1", field_a="A2", fk=obase)
ob = RelationB.objects.create(field_base="B1", field_b="B2", fk=oa)
oc = RelationBC.objects.create(field_base="C1", field_b="C2", field_c="C3", fk=oa)
oa.m2m.add(oa)
oa.m2m.add(ob)
objects = RelationBase.objects.all()
assert (
repr(objects[0])
== '<RelationBase: id 1, field_base (CharField) "base", fk (ForeignKey) None, m2m (ManyToManyField) 0>'
)
assert (
repr(objects[1])
== '<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>'
)
assert (
repr(objects[2])
== '<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>'
)
assert (
repr(objects[3])
== '<RelationBC: id 4, field_base (CharField) "C1", fk (ForeignKey) RelationA, field_b (CharField) "C2", field_c (CharField) "C3", m2m (ManyToManyField) 0>'
)
assert len(objects) == 4
oa = RelationBase.objects.get(id=2)
assert (
repr(oa.fk)
== '<RelationBase: id 1, field_base (CharField) "base", fk (ForeignKey) None, m2m (ManyToManyField) 0>'
)
objects = oa.relationbase_set.order_by("pk").all()
assert (
repr(objects[0])
== '<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>'
)
assert (
repr(objects[1])
== '<RelationBC: id 4, field_base (CharField) "C1", fk (ForeignKey) RelationA, field_b (CharField) "C2", field_c (CharField) "C3", m2m (ManyToManyField) 0>'
)
assert len(objects) == 2
ob = RelationBase.objects.get(id=3)
assert (
repr(ob.fk)
== '<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>'
)
oa = RelationA.objects.get()
objects = oa.m2m.order_by("pk").all()
assert (
repr(objects[0])
== '<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>'
)
assert (
repr(objects[1])
== '<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>'
)
assert len(objects) == 2
def test_user_defined_manager(self):
self.create_model2abcd()
ModelWithMyManager.objects.create(field1="D1a", field4="D4a")
ModelWithMyManager.objects.create(field1="D1b", field4="D4b")
# MyManager should reverse the sorting of field1
objects = ModelWithMyManager.objects.all()
self.assertQuerySetEqual(
objects,
[(ModelWithMyManager, "D1b", "D4b"), (ModelWithMyManager, "D1a", "D4a")],
transform=lambda o: (o.__class__, o.field1, o.field4),
)
assert type(ModelWithMyManager.objects) is MyManager
assert type(ModelWithMyManager._default_manager) is MyManager
def test_user_defined_manager_as_secondary(self):
self.create_model2abcd()
ModelWithMyManagerNoDefault.objects.create(field1="D1a", field4="D4a")
ModelWithMyManagerNoDefault.objects.create(field1="D1b", field4="D4b")
# MyManager should reverse the sorting of field1
objects = ModelWithMyManagerNoDefault.my_objects.all()
self.assertQuerySetEqual(
objects,
[
(ModelWithMyManagerNoDefault, "D1b", "D4b"),
(ModelWithMyManagerNoDefault, "D1a", "D4a"),
],
transform=lambda o: (o.__class__, o.field1, o.field4),
)
assert type(ModelWithMyManagerNoDefault.my_objects) is MyManager
assert type(ModelWithMyManagerNoDefault.objects) is PolymorphicManager
assert type(ModelWithMyManagerNoDefault._default_manager) is PolymorphicManager
def test_user_objects_manager_as_secondary(self):
self.create_model2abcd()
ModelWithMyManagerDefault.objects.create(field1="D1a", field4="D4a")
ModelWithMyManagerDefault.objects.create(field1="D1b", field4="D4b")
assert type(ModelWithMyManagerDefault.my_objects) is MyManager
assert type(ModelWithMyManagerDefault.objects) is PolymorphicManager
assert type(ModelWithMyManagerDefault._default_manager) is MyManager
def test_user_defined_queryset_as_manager(self):
self.create_model2abcd()
ModelWithMyManager2.objects.create(field1="D1a", field4="D4a")
ModelWithMyManager2.objects.create(field1="D1b", field4="D4b")
objects = ModelWithMyManager2.objects.all()
self.assertQuerySetEqual(
objects,
[(ModelWithMyManager2, "D1a", "D4a"), (ModelWithMyManager2, "D1b", "D4b")],
transform=lambda o: (o.__class__, o.field1, o.field4),
ordered=False,
)
assert (
type(ModelWithMyManager2.objects).__name__ == "PolymorphicManagerFromMyManagerQuerySet"
)
assert (
type(ModelWithMyManager2._default_manager).__name__
== "PolymorphicManagerFromMyManagerQuerySet"
)
def test_manager_inheritance(self):
# by choice of MRO, should be MyManager from MROBase1.
assert type(MRODerived.objects) is MyManager
def test_queryset_assignment(self):
# This is just a consistency check for now, testing standard Django behavior.
parent = PlainParentModelWithManager.objects.create()
child = PlainChildModelWithManager.objects.create(fk=parent)
assert type(PlainParentModelWithManager._default_manager) is models.Manager
assert type(PlainChildModelWithManager._default_manager) is PlainMyManager
assert type(PlainChildModelWithManager.objects) is PlainMyManager
assert type(PlainChildModelWithManager.objects.all()) is PlainMyManagerQuerySet
# A related set is created using the model's _default_manager, so does gain extra methods.
assert type(parent.childmodel_set.my_queryset_foo()) is PlainMyManagerQuerySet
# For polymorphic models, the same should happen.
parent = ParentModelWithManager.objects.create()
child = ChildModelWithManager.objects.create(fk=parent)
assert type(ParentModelWithManager._default_manager) is PolymorphicManager
assert type(ChildModelWithManager._default_manager) is MyManager
assert type(ChildModelWithManager.objects) is MyManager
assert type(ChildModelWithManager.objects.my_queryset_foo()) is MyManagerQuerySet
# A related set is created using the model's _default_manager, so does gain extra methods.
assert type(parent.childmodel_set.my_queryset_foo()) is MyManagerQuerySet
def test_proxy_models(self):
# prepare some data
for data in ("bleep bloop", "I am a", "computer"):
ProxyChild.objects.create(some_data=data)
# this caches ContentType queries so they don't interfere with our query counts later
list(ProxyBase.objects.all())
# one query per concrete class
with self.assertNumQueries(1):
items = list(ProxyBase.objects.all())
assert isinstance(items[0], ProxyChild)
def test_queryset_on_proxy_model_does_not_return_superclasses(self):
ProxyBase.objects.create(some_data="Base1")
ProxyBase.objects.create(some_data="Base2")
ProxyChild.objects.create(some_data="Child1")
ProxyChild.objects.create(some_data="Child2")
ProxyChild.objects.create(some_data="Child3")
assert ProxyBase.objects.count() == 5
assert ProxyChild.objects.count() == 3
def test_proxy_get_real_instance_class(self):
"""
The call to ``get_real_instance()`` also checks whether the returned model is of the correct type.
This unit test guards that this check is working properly. For instance,
proxy child models need to be handled separately.
"""
name = "Item1"
nonproxychild = NonProxyChild.objects.create(name=name)
pb = ProxyBase.objects.get(id=1)
assert pb.get_real_instance_class() == NonProxyChild
assert pb.get_real_instance() == nonproxychild
assert pb.name == name
pbm = NonProxyChild.objects.get(id=1)
assert pbm.get_real_instance_class() == NonProxyChild
assert pbm.get_real_instance() == nonproxychild
assert pbm.name == name
def test_content_types_for_proxy_models(self):
"""Checks if ContentType is capable of returning proxy models."""
from django.contrib.contenttypes.models import ContentType
ct = ContentType.objects.get_for_model(ProxyChild, for_concrete_model=False)
assert ProxyChild == ct.model_class()
def test_proxy_model_inheritance(self):
"""
Polymorphic abilities should also work when the base model is a proxy object.
"""
# The managers should point to the proper objects.
# otherwise, the whole excersise is pointless.
assert ProxiedBase.objects.model == ProxiedBase
assert ProxyModelBase.objects.model == ProxyModelBase
assert ProxyModelA.objects.model == ProxyModelA
assert ProxyModelB.objects.model == ProxyModelB
# Create objects
object1_pk = ProxyModelA.objects.create(name="object1").pk
object2_pk = ProxyModelB.objects.create(name="object2", field2="bb").pk
# Getting single objects
object1 = ProxyModelBase.objects.get(name="object1")
object2 = ProxyModelBase.objects.get(name="object2")
assert repr(object1) == (
f'<ProxyModelA: id {object1_pk}, name (CharField) "object1", field1 (CharField) "">'
)
assert repr(object2) == (
'<ProxyModelB: id %i, name (CharField) "object2", field2 (CharField) "bb">'
% object2_pk
)
assert isinstance(object1, ProxyModelA)
assert isinstance(object2, ProxyModelB)
# Same for lists
objects = list(ProxyModelBase.objects.all().order_by("name"))
assert repr(objects[0]) == (
f'<ProxyModelA: id {object1_pk}, name (CharField) "object1", field1 (CharField) "">'
)
assert repr(objects[1]) == (
'<ProxyModelB: id %i, name (CharField) "object2", field2 (CharField) "bb">'
% object2_pk
)
assert isinstance(objects[0], ProxyModelA)
assert isinstance(objects[1], ProxyModelB)
def test_custom_pk(self):
CustomPkBase.objects.create(b="b")
CustomPkInherit.objects.create(b="b", i="i")
qs = CustomPkBase.objects.all()
assert len(qs) == 2
assert repr(qs[0]) == '<CustomPkBase: id 1, b (CharField) "b">'
assert (
repr(qs[1])
== '<CustomPkInherit: id 2, b (CharField) "b", custom_id (AutoField/pk) 1, i (CharField) "i">'
)
def test_fix_getattribute(self):
# fixed issue in PolymorphicModel.__getattribute__: field name same as model name
o = ModelFieldNameTest.objects.create(modelfieldnametest="1")
assert repr(o) == "<ModelFieldNameTest: id 1, modelfieldnametest (CharField)>"
# if subclass defined __init__ and accessed class members,
# __getattribute__ had a problem: "...has no attribute 'sub_and_superclass_dict'"
o = InitTestModelSubclass.objects.create()
assert o.bar == "XYZ"
def test_parent_link_and_related_name(self):
t = TestParentLinkAndRelatedName(field1="TestParentLinkAndRelatedName")
t.save()
p = ModelShow1_plain.objects.get(field1="TestParentLinkAndRelatedName")
# check that p is equal to the
assert isinstance(p, TestParentLinkAndRelatedName)
assert p == t
# check that the accessors to parent and sublass work correctly and return the right object
p = ModelShow1_plain.objects.non_polymorphic().get(field1="TestParentLinkAndRelatedName")
# p should be Plain1 and t TestParentLinkAndRelatedName, so not equal
assert p != t
assert p == t.superclass
assert p.related_name_subclass == t
# test that we can delete the object
t.delete()
def test_polymorphic__aggregate(self):
"""test ModelX___field syntax on aggregate (should work for annotate either)"""
Model2A.objects.create(field1="A1")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
# aggregate using **kwargs
result = Model2A.objects.aggregate(cnt=Count("Model2B___field2"))
assert result == {"cnt": 2}
# aggregate using **args
with pytest.raises(
AssertionError,
match="model lookup supported for keyword arguments only",
):
Model2A.objects.aggregate(Count("Model2B___field2"))
def test_polymorphic__aggregate_empty_queryset(self):
"""test the fix for test___lookup in Django 5.1+"""
line = ModelOrderLine.objects.create()
result = line.articles.aggregate(Sum("sales_points"))
assert result == {"sales_points__sum": None}
def test_polymorphic__complex_aggregate(self):
"""test (complex expression on) aggregate (should work for annotate either)"""
Model2A.objects.create(field1="A1")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
# aggregate using **kwargs
result = Model2A.objects.aggregate(
cnt_a1=Count(Case(When(field1="A1", then=1))),
cnt_b2=Count(Case(When(Model2B___field2="B2", then=1))),
)
assert result == {"cnt_b2": 2, "cnt_a1": 3}
# aggregate using **args
# we have to set the defaul alias or django won't except a complex expression
# on aggregate/annotate
def ComplexAgg(expression):
complexagg = Count(expression) * 10
complexagg.default_alias = "complexagg"
return complexagg
with pytest.raises(
AssertionError,
match="model lookup supported for keyword arguments only",
):
Model2A.objects.aggregate(ComplexAgg("Model2B___field2"))
def test_polymorphic__filtered_relation(self):
"""test annotation using FilteredRelation"""
blog = BlogA.objects.create(name="Ba1", info="i1 joined")
blog.blogentry_set.create(text="bla1 joined")
blog.blogentry_set.create(text="bla2 joined")
blog.blogentry_set.create(text="bla3 joined")
blog.blogentry_set.create(text="bla4")
blog.blogentry_set.create(text="bla5")
BlogA.objects.create(name="Ba2", info="i2 joined")
BlogA.objects.create(name="Ba3", info="i3")
BlogB.objects.create(name="Bb3")
result = BlogA.objects.annotate(
text_joined=FilteredRelation(
"blogentry", condition=Q(blogentry__text__contains="joined")
),
).aggregate(Count("text_joined"))
assert result == {"text_joined__count": 3}
result = BlogA.objects.annotate(
text_joined=FilteredRelation(
"blogentry", condition=Q(blogentry__text__contains="joined")
),
).aggregate(count=Count("text_joined"))
assert result == {"count": 3}
result = BlogBase.objects.annotate(
info_joined=FilteredRelation("bloga", condition=Q(BlogA___info__contains="joined")),
).aggregate(Count("info_joined"))
assert result == {"info_joined__count": 2}
result = BlogBase.objects.annotate(
info_joined=FilteredRelation("bloga", condition=Q(BlogA___info__contains="joined")),
).aggregate(count=Count("info_joined"))
assert result == {"count": 2}
# We should get a BlogA and a BlogB
result = BlogBase.objects.annotate(
info_joined=FilteredRelation("bloga", condition=Q(BlogA___info__contains="joined")),
).filter(info_joined__isnull=True)
assert result.count() == 2
assert isinstance(result.first(), BlogA)
assert isinstance(result.last(), BlogB)
def test_polymorphic__expressions(self):
from django.db.models.functions import Concat
# no exception raised
result = Model2B.objects.annotate(val=Concat("field1", "field2"))
assert list(result) == []
def test_null_polymorphic_id(self):
"""Test that a proper error message is displayed when the database lacks the ``polymorphic_ctype_id``"""
Model2A.objects.create(field1="A1")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
Model2A.objects.all().update(polymorphic_ctype_id=None)
with pytest.raises(PolymorphicTypeUndefined):
list(Model2A.objects.all())
def test_invalid_polymorphic_id(self):
"""Test that a proper error message is displayed when the database ``polymorphic_ctype_id`` is invalid"""
Model2A.objects.create(field1="A1")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
invalid = ContentType.objects.get_for_model(PlainA).pk
Model2A.objects.all().update(polymorphic_ctype_id=invalid)
with pytest.raises(PolymorphicTypeInvalid):
list(Model2A.objects.all())
def test_bulk_create_abstract_inheritance(self):
ArtProject.objects.bulk_create(
[
ArtProject(topic="Painting with Tim", artist="T. Turner"),
ArtProject(topic="Sculpture with Tim", artist="T. Turner"),
]
)
assert sorted(ArtProject.objects.values_list("topic", "artist")) == [
("Painting with Tim", "T. Turner"),
("Sculpture with Tim", "T. Turner"),
]
def test_bulk_create_proxy_inheritance(self):
RedheadDuck.objects.bulk_create(
[
RedheadDuck(name="redheadduck1"),
Duck(name="duck1"),
RubberDuck(name="rubberduck1"),
]
)
RubberDuck.objects.bulk_create(
[
RedheadDuck(name="redheadduck2"),
RubberDuck(name="rubberduck2"),
Duck(name="duck2"),
]
)
assert sorted(RedheadDuck.objects.values_list("name", flat=True)) == [
"redheadduck1",
"redheadduck2",
]
assert sorted(RubberDuck.objects.values_list("name", flat=True)) == [
"rubberduck1",
"rubberduck2",
]
assert sorted(Duck.objects.values_list("name", flat=True)) == [
"duck1",
"duck2",
"redheadduck1",
"redheadduck2",
"rubberduck1",
"rubberduck2",
]
def test_bulk_create_unsupported_multi_table_inheritance(self):
with pytest.raises(ValueError):
MultiTableDerived.objects.bulk_create(
[MultiTableDerived(field1="field1", field2="field2")]
)
def test_bulk_create_ignore_conflicts(self):
try:
ArtProject.objects.bulk_create(
[
ArtProject(topic="Painting with Tim", artist="T. Turner"),
ArtProject.objects.create(topic="Sculpture with Tim", artist="T. Turner"),
],
ignore_conflicts=True,
)
assert ArtProject.objects.count() == 2
except NotSupportedError:
from django.db import connection
assert connection.vendor in ("oracle"), (
f"{connection.vendor} should support ignore_conflicts"
)
def test_bulk_create_no_ignore_conflicts(self):
with pytest.raises(IntegrityError):
ArtProject.objects.bulk_create(
[
ArtProject(topic="Painting with Tim", artist="T. Turner"),
ArtProject.objects.create(topic="Sculpture with Tim", artist="T. Turner"),
],
ignore_conflicts=False,
)
assert ArtProject.objects.count() == 1
def test_can_query_using_subclass_selector_on_abstract_model(self):
obj = SubclassSelectorAbstractConcreteModel.objects.create(concrete_field="abc")
queried_obj = SubclassSelectorAbstractBaseModel.objects.filter(
SubclassSelectorAbstractConcreteModel___concrete_field="abc"
).get()
assert obj.pk == queried_obj.pk
def test_can_query_using_subclass_selector_on_proxy_model(self):
obj = SubclassSelectorProxyConcreteModel.objects.create(concrete_field="abc")
queried_obj = SubclassSelectorProxyBaseModel.objects.filter(
SubclassSelectorProxyConcreteModel___concrete_field="abc"
).get()
assert obj.pk == queried_obj.pk
def test_prefetch_related_behaves_normally_with_polymorphic_model(self):
b1 = RelatingModel.objects.create()
b2 = RelatingModel.objects.create()
a = b1.many2many.create() # create Model2A
b2.many2many.add(a) # add same to second relating model
qs = RelatingModel.objects.prefetch_related("many2many")
for obj in qs:
assert len(obj.many2many.all()) == 1
def test_prefetch_related_with_missing(self):
b1 = RelatingModel.objects.create()
b2 = RelatingModel.objects.create()
rel1 = Model2A.objects.create(field1="A1")
rel2 = Model2B.objects.create(field1="A2", field2="B2")
b1.many2many.add(rel1)
b2.many2many.add(rel2)
rel2.delete(keep_parents=True)
qs = RelatingModel.objects.order_by("pk").prefetch_related("many2many")
objects = list(qs)
assert len(objects[0].many2many.all()) == 1
# derived object was not fetched
assert len(objects[1].many2many.all()) == 0
# base object does exist
assert len(objects[1].many2many.non_polymorphic()) == 1
def test_refresh_from_db_fields(self):
"""Test whether refresh_from_db(fields=..) works as it performs .only() queries"""
obj = Model2B.objects.create(field1="aa", field2="bb")
Model2B.objects.filter(pk=obj.pk).update(field1="aa1", field2="bb2")
obj.refresh_from_db(fields=["field2"])
assert obj.field1 == "aa"
assert obj.field2 == "bb2"
obj.refresh_from_db(fields=["field1"])
assert obj.field1 == "aa1"
def test_non_polymorphic_parent(self):
obj = NonPolymorphicParent.objects.create()
assert obj.delete()
| PolymorphicTests |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 819,
"end": 904
} | class ____(Empty()): # [inherit-non-class]
""" Can't inherit from instance. """
| Bad4 |
python | ray-project__ray | python/ray/data/tests/test_download_expression.py | {
"start": 9658,
"end": 13702
} | class ____:
"""Test error conditions and edge cases for download expressions."""
def test_download_expression_invalid_uri_column(self):
"""Test download expression with non-existent URI column."""
table = pa.Table.from_arrays(
[
pa.array(["local://test.txt"]),
],
names=["existing_column"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("bytes", download("non_existent_column"))
# Should raise error when trying to execute
with pytest.raises(ValueError):
ds_with_downloads.take_all()
def test_download_expression_with_null_uris(self):
"""Test download expression handling of null/empty URIs."""
table = pa.Table.from_arrays(
[
pa.array(["local://test.txt", None, ""]),
],
names=["uri"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("bytes", download("uri"))
# Should handle nulls gracefully (exact behavior may vary)
# This test mainly ensures no crash occurs
try:
results = ds_with_downloads.take_all()
# If it succeeds, verify structure is reasonable
assert len(results) == 3
for result in results:
assert "bytes" in result
except Exception as e:
# If it fails, should be a reasonable error (not a crash)
assert isinstance(e, (ValueError, KeyError, RuntimeError))
def test_download_expression_with_malformed_uris(self, tmp_path):
"""Test download expression with malformed URIs.
This tests that various malformed URIs are caught and return None
instead of crashing.
All of the URIs should be malformed in order to test the ZeroDivisionError
described in https://github.com/ray-project/ray/issues/58462.
"""
malformed_uris = [
f"local://{tmp_path}/nonexistent.txt", # File doesn't exist
"local:///this/path/does/not/exist/file.txt", # Invalid path
"", # Empty URI
"foobar", # Random string
# TODO(xyuzh): Currently, using the below URIs raises an exception
# in _resolve_paths_and_filesystem. We need to fix that issue and
# add the tests in.
# "file:///\x00/null/byte", # Null byte
# "http://host/path\n\r", # Line breaks
# "foo://bar", # Invalid scheme
# "://no-scheme", # Missing scheme
# "http://host/path?query=<script>", # Injection attempts
]
ds = ray.data.from_items([{"uri": uri} for uri in malformed_uris])
ds_with_downloads = ds.with_column("bytes", download("uri"))
results = ds_with_downloads.take_all()
# All malformed URIs should return None
assert len(results) == len(malformed_uris)
for result in results:
assert result["bytes"] is None
def test_download_expression_mixed_valid_and_invalid_uris(self, tmp_path):
"""Test download expression when some but not all of the URIs are invalid."""
# Create one valid file
valid_file = tmp_path / "valid.txt"
valid_file.write_bytes(b"valid content")
# Create URIs: one valid and one non-existent file.
ds = ray.data.from_items(
[
{"uri": str(valid_file), "id": 0},
{"uri": str(tmp_path / "nonexistent.txt"), "id": 1},
]
)
ds_with_downloads = ds.with_column("bytes", download("uri"))
# Should not crash - failed downloads return None
results = sorted(ds_with_downloads.take_all(), key=lambda row: row["id"])
assert len(results) == 2
# First URI should succeed
assert results[0]["bytes"] == b"valid content"
# Second URI should fail gracefully (return None)
assert results[1]["bytes"] is None
| TestDownloadExpressionErrors |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/selector.py | {
"start": 5717,
"end": 6498
} | class ____(ComputedBase):
"""Click selector composite condition class."""
@staticmethod
def visit_eq(value: list[QueryType]) -> Condition:
if len(value) == 0:
# TODO: raise in the field or return the default condition in the field?
return Condition(Function("identity", parameters=[1]), Op.EQ, 2)
else:
return Condition(search_selector(value), Op.EQ, 1)
@staticmethod
def visit_neq(value: list[QueryType]) -> Condition:
if len(value) == 0:
# TODO: raise in the field or return the default condition in the field?
return Condition(Function("identity", parameters=[1]), Op.EQ, 2)
else:
return Condition(search_selector(value), Op.EQ, 0)
| ClickSelectorComposite |
python | realpython__materials | python-async-iterators/async_range_v2.py | {
"start": 17,
"end": 322
} | class ____:
def __init__(self, start, end):
self.data = range(start, end)
async def __aiter__(self):
for i in self.data:
await asyncio.sleep(0.5)
yield i
async def main():
async for i in AsyncRange(0, 5):
print(i)
asyncio.run(main())
| AsyncRange |
python | pallets__click | src/click/exceptions.py | {
"start": 2868,
"end": 4484
} | class ____(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(
self,
message: str,
ctx: Context | None = None,
param: Parameter | None = None,
param_hint: cabc.Sequence[str] | str | None = None,
) -> None:
super().__init__(message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self) -> str:
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
return _("Invalid value: {message}").format(message=self.message)
return _("Invalid value for {param_hint}: {message}").format(
param_hint=_join_param_hints(param_hint), message=self.message
)
| BadParameter |
python | ray-project__ray | python/ray/autoscaler/_private/resource_demand_scheduler.py | {
"start": 3032,
"end": 41312
} | class ____:
def __init__(
self,
provider: NodeProvider,
node_types: Dict[NodeType, NodeTypeConfigDict],
max_workers: int,
head_node_type: NodeType,
upscaling_speed: float,
) -> None:
self.provider = provider
self.node_types = copy.deepcopy(node_types)
self.node_resource_updated = set()
self.max_workers = max_workers
self.head_node_type = head_node_type
self.upscaling_speed = upscaling_speed
utilization_scorer_str = os.environ.get(
AUTOSCALER_UTILIZATION_SCORER_KEY,
"ray.autoscaler._private.resource_demand_scheduler"
"._default_utilization_scorer",
)
self.utilization_scorer: UtilizationScorer = load_function_or_class(
utilization_scorer_str
)
def _get_head_and_workers(self, nodes: List[NodeID]) -> Tuple[NodeID, List[NodeID]]:
"""Returns the head node's id and the list of all worker node ids,
given a list `nodes` of all node ids in the cluster.
"""
head_id, worker_ids = None, []
for node in nodes:
tags = self.provider.node_tags(node)
if tags[TAG_RAY_NODE_KIND] == NODE_KIND_HEAD:
head_id = node
elif tags[TAG_RAY_NODE_KIND] == NODE_KIND_WORKER:
worker_ids.append(node)
return head_id, worker_ids
def reset_config(
self,
provider: NodeProvider,
node_types: Dict[NodeType, NodeTypeConfigDict],
max_workers: int,
head_node_type: NodeType,
upscaling_speed: float = 1,
) -> None:
"""Updates the class state variables.
For legacy yamls, it merges previous state and new state to make sure
inferered resources are not lost.
"""
self.provider = provider
self.node_types = copy.deepcopy(node_types)
self.node_resource_updated = set()
self.max_workers = max_workers
self.head_node_type = head_node_type
self.upscaling_speed = upscaling_speed
def is_feasible(self, bundle: ResourceDict) -> bool:
for node_type, config in self.node_types.items():
max_of_type = config.get("max_workers", 0)
node_resources = config["resources"]
if (node_type == self.head_node_type or max_of_type > 0) and _fits(
node_resources, bundle
):
return True
return False
def get_nodes_to_launch(
self,
nodes: List[NodeID],
launching_nodes: Dict[NodeType, int],
resource_demands: List[ResourceDict],
unused_resources_by_ip: Dict[NodeIP, ResourceDict],
pending_placement_groups: List[PlacementGroupTableData],
max_resources_by_ip: Dict[NodeIP, ResourceDict],
ensure_min_cluster_size: List[ResourceDict],
node_availability_summary: NodeAvailabilitySummary,
) -> (Dict[NodeType, int], List[ResourceDict]):
"""Given resource demands, return node types to add to the cluster.
This method:
(1) calculates the resources present in the cluster by:
- computing available resources for each existing node
- counting the number of nodes per node type
- including both running and launching nodes
(2) calculates the remaining nodes to add to respect min_workers
constraint per node type.
(3) for each strict spread placement group, reserve space on
available nodes and launch new nodes if necessary.
(4) calculates the unfulfilled resource bundles.
(5) calculates which nodes need to be launched to fulfill all
the bundle requests, subject to max_worker constraints.
Args:
nodes: List of existing nodes in the cluster.
launching_nodes: Summary of node types currently being launched.
resource_demands: Vector of resource demands from the scheduler.
unused_resources_by_ip: Mapping from ip to available resources.
pending_placement_groups: Placement group demands.
max_resources_by_ip: Mapping from ip to static node resources.
ensure_min_cluster_size: Try to ensure the cluster can fit at least
this set of resources. This differs from resources_demands in
that we don't take into account existing usage.
node_availability_summary: A snapshot of the current
NodeAvailabilitySummary.
Returns:
Dict of count to add for each node type, and residual of resources
that still cannot be fulfilled.
"""
utilization_scorer = partial(
self.utilization_scorer, node_availability_summary=node_availability_summary
)
self._update_node_resources_from_runtime(nodes, max_resources_by_ip)
# Step 1: Calculate current cluster resources and node type counts
node_resources: List[ResourceDict]
node_type_counts: Dict[NodeType, int]
node_resources, node_type_counts = self.calculate_node_resources(
nodes, launching_nodes, unused_resources_by_ip
)
logger.debug("Cluster resources: {}".format(node_resources))
logger.debug("Node counts: {}".format(node_type_counts))
# Step 2: add nodes to add to satisfy min_workers for each type
(
node_resources,
node_type_counts,
adjusted_min_workers,
) = _add_min_workers_nodes(
node_resources,
node_type_counts,
self.node_types,
self.max_workers,
self.head_node_type,
ensure_min_cluster_size,
utilization_scorer=utilization_scorer,
)
# Step 3: get resource demands of placement groups and return the
# groups that should be strictly spread.
logger.debug(f"Placement group demands: {pending_placement_groups}")
# TODO(Clark): Refactor placement group bundle demands such that their placement
# group provenance is mantained, since we need to keep an accounting of the
# cumulative CPU cores allocated as fulfilled during bin packing in order to
# ensure that a placement group's cumulative allocation is under the placement
# group's max CPU fraction per node. Without this, and placement group with many
# bundles might not be schedulable, but will fail to trigger scale-up since the
# max CPU fraction is properly applied to the cumulative bundle requests for a
# single node.
#
# placement_group_demand_vector: List[Tuple[List[ResourceDict], double]]
#
# bin_pack_residual() can keep it's packing priority; we just need to account
# for (1) the running CPU allocation for the bundle's placement group for that
# particular node, and (2) the max CPU cores allocatable for a single placement
# group for that particular node.
(
placement_group_demand_vector,
strict_spreads,
) = placement_groups_to_resource_demands(pending_placement_groups)
# Place placement groups demand vector at the beginning of the resource
# demands vector to make it consistent (results in the same types of
# nodes to add) with pg_demands_nodes_max_launch_limit calculated later
resource_demands = placement_group_demand_vector + resource_demands
(
spread_pg_nodes_to_add,
node_resources,
node_type_counts,
) = self.reserve_and_allocate_spread(
strict_spreads,
node_resources,
node_type_counts,
utilization_scorer,
)
# Calculate the nodes to add for bypassing max launch limit for
# placement groups and spreads.
unfulfilled_placement_groups_demands, _ = get_bin_pack_residual(
node_resources,
placement_group_demand_vector,
)
# Add 1 to account for the head node.
max_to_add = self.max_workers + 1 - sum(node_type_counts.values())
pg_demands_nodes_max_launch_limit, _ = get_nodes_for(
self.node_types,
node_type_counts,
self.head_node_type,
max_to_add,
unfulfilled_placement_groups_demands,
utilization_scorer=utilization_scorer,
)
placement_groups_nodes_max_limit = {
node_type: spread_pg_nodes_to_add.get(node_type, 0)
+ pg_demands_nodes_max_launch_limit.get(node_type, 0)
for node_type in self.node_types
}
# Step 4/5: add nodes for pending tasks, actors, and non-strict spread
# groups
unfulfilled, _ = get_bin_pack_residual(node_resources, resource_demands)
logger.debug("Resource demands: {}".format(resource_demands))
logger.debug("Unfulfilled demands: {}".format(unfulfilled))
nodes_to_add_based_on_demand, final_unfulfilled = get_nodes_for(
self.node_types,
node_type_counts,
self.head_node_type,
max_to_add,
unfulfilled,
utilization_scorer=utilization_scorer,
)
logger.debug("Final unfulfilled: {}".format(final_unfulfilled))
# Merge nodes to add based on demand and nodes to add based on
# min_workers constraint. We add them because nodes to add based on
# demand was calculated after the min_workers constraint was respected.
total_nodes_to_add = {}
for node_type in self.node_types:
nodes_to_add = (
adjusted_min_workers.get(node_type, 0)
+ spread_pg_nodes_to_add.get(node_type, 0)
+ nodes_to_add_based_on_demand.get(node_type, 0)
)
if nodes_to_add > 0:
total_nodes_to_add[node_type] = nodes_to_add
# Limit the number of concurrent launches
total_nodes_to_add = self._get_concurrent_resource_demand_to_launch(
total_nodes_to_add,
unused_resources_by_ip.keys(),
nodes,
launching_nodes,
adjusted_min_workers,
placement_groups_nodes_max_limit,
)
logger.debug("Node requests: {}".format(total_nodes_to_add))
return total_nodes_to_add, final_unfulfilled
def _update_node_resources_from_runtime(
self, nodes: List[NodeID], max_resources_by_ip: Dict[NodeIP, ResourceDict]
):
"""Update static node type resources with runtime resources
This will update the cached static node type resources with the runtime
resources. Because we can not know the exact autofilled memory or
object_store_memory from config file.
"""
need_update = len(self.node_types) != len(self.node_resource_updated)
if not need_update:
return
for node_id in nodes:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE not in tags:
continue
node_type = tags[TAG_RAY_USER_NODE_TYPE]
if (
node_type in self.node_resource_updated
or node_type not in self.node_types
):
# continue if the node type has been updated or is not an known
# node type
continue
ip = self.provider.internal_ip(node_id)
runtime_resources = max_resources_by_ip.get(ip)
if runtime_resources:
runtime_resources = copy.deepcopy(runtime_resources)
resources = self.node_types[node_type].get("resources", {})
for key in ["CPU", "GPU", "memory"]:
if key in runtime_resources:
resources[key] = runtime_resources[key]
self.node_types[node_type]["resources"] = resources
node_kind = tags[TAG_RAY_NODE_KIND]
if node_kind == NODE_KIND_WORKER:
# Here, we do not record the resources have been updated
# if it is the head node kind. Because it need be updated
# by worker kind runtime resource. The most difference
# between head and worker is the memory resources. The head
# node needs to configure redis memory which is not needed
# for worker nodes.
self.node_resource_updated.add(node_type)
def _get_concurrent_resource_demand_to_launch(
self,
to_launch: Dict[NodeType, int],
connected_nodes: List[NodeIP],
non_terminated_nodes: List[NodeID],
pending_launches_nodes: Dict[NodeType, int],
adjusted_min_workers: Dict[NodeType, int],
placement_group_nodes: Dict[NodeType, int],
) -> Dict[NodeType, int]:
"""Updates the max concurrent resources to launch for each node type.
Given the current nodes that should be launched, the non terminated
nodes (running and pending) and the pending to be launched nodes. This
method calculates the maximum number of nodes to launch concurrently
for each node type as follows:
1) Calculates the running nodes.
2) Calculates the pending nodes and gets the launching nodes.
3) Limits the total number of pending + currently-launching +
to-be-launched nodes to:
max(
5,
self.upscaling_speed * max(running_nodes[node_type], 1)
).
Args:
to_launch: List of number of nodes to launch based on resource
demand for every node type.
connected_nodes: Running nodes (from LoadMetrics).
non_terminated_nodes: Non terminated nodes (pending/running).
pending_launches_nodes: Nodes that are in the launch queue.
adjusted_min_workers: Nodes to launch to satisfy
min_workers and request_resources(). This overrides the launch
limits since the user is hinting to immediately scale up to
this size.
placement_group_nodes: Nodes to launch for placement groups.
This overrides the launch concurrency limits.
Returns:
Dict[NodeType, int]: Maximum number of nodes to launch for each
node type.
"""
updated_nodes_to_launch = {}
running_nodes, pending_nodes = self._separate_running_and_pending_nodes(
non_terminated_nodes,
connected_nodes,
)
for node_type in to_launch:
# Enforce here max allowed pending nodes to be frac of total
# running nodes.
max_allowed_pending_nodes = max(
AUTOSCALER_UPSCALING_INITIAL_NUM_NODES,
int(self.upscaling_speed * max(running_nodes[node_type], 1)),
)
total_pending_nodes = (
pending_launches_nodes.get(node_type, 0) + pending_nodes[node_type]
)
upper_bound = max(
max_allowed_pending_nodes - total_pending_nodes,
# Allow more nodes if this is to respect min_workers or
# request_resources() or placement groups.
adjusted_min_workers.get(node_type, 0)
+ placement_group_nodes.get(node_type, 0),
)
if upper_bound > 0:
updated_nodes_to_launch[node_type] = min(
upper_bound, to_launch[node_type]
)
return updated_nodes_to_launch
def _separate_running_and_pending_nodes(
self,
non_terminated_nodes: List[NodeID],
connected_nodes: List[NodeIP],
) -> (Dict[NodeType, int], Dict[NodeType, int]):
"""Splits connected and non terminated nodes to pending & running."""
running_nodes = collections.defaultdict(int)
pending_nodes = collections.defaultdict(int)
for node_id in non_terminated_nodes:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE in tags:
node_type = tags[TAG_RAY_USER_NODE_TYPE]
node_ip = self.provider.internal_ip(node_id)
if node_ip in connected_nodes:
running_nodes[node_type] += 1
else:
pending_nodes[node_type] += 1
return running_nodes, pending_nodes
def calculate_node_resources(
self,
nodes: List[NodeID],
pending_nodes: Dict[NodeID, int],
unused_resources_by_ip: Dict[str, ResourceDict],
) -> (List[ResourceDict], Dict[NodeType, int]):
"""Returns node resource list and node type counts.
Counts the running nodes, pending nodes.
Args:
nodes: Existing nodes.
pending_nodes: Pending nodes.
Returns:
node_resources: a list of running + pending resources.
E.g., [{"CPU": 4}, {"GPU": 2}].
node_type_counts: running + pending workers per node type.
"""
node_resources = []
node_type_counts = collections.defaultdict(int)
def add_node(node_type, available_resources=None):
if node_type not in self.node_types:
# We should not get here, but if for some reason we do, log an
# error and skip the errant node_type.
logger.error(
f"Missing entry for node_type {node_type} in "
f"cluster config: {self.node_types} under entry "
"available_node_types. This node's resources will be "
"ignored. If you are using an unmanaged node, manually "
f"set the {TAG_RAY_NODE_KIND} tag to "
f'"{NODE_KIND_UNMANAGED}" in your cloud provider\'s '
"management console."
)
return None
# Careful not to include the same dict object multiple times.
available = copy.deepcopy(self.node_types[node_type]["resources"])
# If available_resources is None this might be because the node is
# no longer pending, but the raylet hasn't sent a heartbeat to gcs
# yet.
if available_resources is not None:
available = copy.deepcopy(available_resources)
node_resources.append(available)
node_type_counts[node_type] += 1
for node_id in nodes:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE in tags:
node_type = tags[TAG_RAY_USER_NODE_TYPE]
ip = self.provider.internal_ip(node_id)
available_resources = unused_resources_by_ip.get(ip)
add_node(node_type, available_resources)
for node_type, count in pending_nodes.items():
for _ in range(count):
add_node(node_type)
return node_resources, node_type_counts
def reserve_and_allocate_spread(
self,
strict_spreads: List[List[ResourceDict]],
node_resources: List[ResourceDict],
node_type_counts: Dict[NodeType, int],
utilization_scorer: Callable[
[NodeResources, ResourceDemands], Optional[UtilizationScore]
],
):
"""For each strict spread, attempt to reserve as much space as possible
on the node, then allocate new nodes for the unfulfilled portion.
Args:
strict_spreads (List[List[ResourceDict]]): A list of placement
groups which must be spread out.
node_resources (List[ResourceDict]): Available node resources in
the cluster.
node_type_counts (Dict[NodeType, int]): The amount of each type of
node pending or in the cluster.
utilization_scorer: A function that, given a node
type, its resources, and resource demands, returns what its
utilization would be.
Returns:
Dict[NodeType, int]: Nodes to add.
List[ResourceDict]: The updated node_resources after the method.
Dict[NodeType, int]: The updated node_type_counts.
"""
to_add = collections.defaultdict(int)
for bundles in strict_spreads:
# Try to pack as many bundles of this group as possible on existing
# nodes. The remaining will be allocated on new nodes.
unfulfilled, updated_node_resources = get_bin_pack_residual(
node_resources, bundles, strict_spread=True
)
max_to_add = self.max_workers + 1 - sum(node_type_counts.values())
# Allocate new nodes for the remaining bundles that don't fit.
to_launch, _ = get_nodes_for(
self.node_types,
node_type_counts,
self.head_node_type,
max_to_add,
unfulfilled,
utilization_scorer=utilization_scorer,
strict_spread=True,
)
new_node_resources = _node_type_counts_to_node_resources(
self.node_types, to_launch
)
# Update node resources to include newly launched nodes and their
# bundles.
unfulfilled, including_reserved = get_bin_pack_residual(
new_node_resources, unfulfilled, strict_spread=True
)
if unfulfilled:
logger.debug(
"Unfulfilled strict spread placement group: {}".format(bundles)
)
continue
_inplace_add(node_type_counts, to_launch)
_inplace_add(to_add, to_launch)
node_resources = updated_node_resources + including_reserved
return to_add, node_resources, node_type_counts
def debug_string(
self,
nodes: List[NodeID],
pending_nodes: Dict[NodeID, int],
unused_resources_by_ip: Dict[str, ResourceDict],
) -> str:
node_resources, node_type_counts = self.calculate_node_resources(
nodes, pending_nodes, unused_resources_by_ip
)
out = "Worker node types:"
for node_type, count in node_type_counts.items():
out += "\n - {}: {}".format(node_type, count)
if pending_nodes.get(node_type):
out += " ({} pending)".format(pending_nodes[node_type])
return out
def _node_type_counts_to_node_resources(
node_types: Dict[NodeType, NodeTypeConfigDict],
node_type_counts: Dict[NodeType, int],
) -> List[ResourceDict]:
"""Converts a node_type_counts dict into a list of node_resources."""
resources = []
for node_type, count in node_type_counts.items():
# Be careful, each entry in the list must be deep copied!
resources += [node_types[node_type]["resources"].copy() for _ in range(count)]
return resources
def _add_min_workers_nodes(
node_resources: List[ResourceDict],
node_type_counts: Dict[NodeType, int],
node_types: Dict[NodeType, NodeTypeConfigDict],
max_workers: int,
head_node_type: NodeType,
ensure_min_cluster_size: List[ResourceDict],
utilization_scorer: Callable[
[NodeResources, ResourceDemands, str], Optional[UtilizationScore]
],
) -> (List[ResourceDict], Dict[NodeType, int], Dict[NodeType, int]):
"""Updates resource demands to respect the min_workers and
request_resources() constraints.
Args:
node_resources: Resources of existing nodes already launched/pending.
node_type_counts: Counts of existing nodes already launched/pending.
node_types: Node types config.
max_workers: global max_workers constaint.
ensure_min_cluster_size: resource demands from request_resources().
utilization_scorer: A function that, given a node
type, its resources, and resource demands, returns what its
utilization would be.
Returns:
node_resources: The updated node resources after adding min_workers
and request_resources() constraints per node type.
node_type_counts: The updated node counts after adding min_workers
and request_resources() constraints per node type.
total_nodes_to_add_dict: The nodes to add to respect min_workers and
request_resources() constraints.
"""
total_nodes_to_add_dict = {}
for node_type, config in node_types.items():
existing = node_type_counts.get(node_type, 0)
target = min(config.get("min_workers", 0), config.get("max_workers", 0))
if node_type == head_node_type:
# Add 1 to account for head node.
target = target + 1
if existing < target:
total_nodes_to_add_dict[node_type] = target - existing
node_type_counts[node_type] = target
node_resources.extend(
[
copy.deepcopy(node_types[node_type]["resources"])
for _ in range(total_nodes_to_add_dict[node_type])
]
)
if ensure_min_cluster_size:
max_to_add = max_workers + 1 - sum(node_type_counts.values())
max_node_resources = []
# Fit request_resources() on all the resources as if they are idle.
for node_type in node_type_counts:
max_node_resources.extend(
[
copy.deepcopy(node_types[node_type]["resources"])
for _ in range(node_type_counts[node_type])
]
)
# Get the unfulfilled to ensure min cluster size.
resource_requests_unfulfilled, _ = get_bin_pack_residual(
max_node_resources, ensure_min_cluster_size
)
# Get the nodes to meet the unfulfilled.
nodes_to_add_request_resources, _ = get_nodes_for(
node_types,
node_type_counts,
head_node_type,
max_to_add,
resource_requests_unfulfilled,
utilization_scorer=utilization_scorer,
)
# Update the resources, counts and total nodes to add.
for node_type in nodes_to_add_request_resources:
nodes_to_add = nodes_to_add_request_resources.get(node_type, 0)
if nodes_to_add > 0:
node_type_counts[node_type] = nodes_to_add + node_type_counts.get(
node_type, 0
)
node_resources.extend(
[
copy.deepcopy(node_types[node_type]["resources"])
for _ in range(nodes_to_add)
]
)
total_nodes_to_add_dict[
node_type
] = nodes_to_add + total_nodes_to_add_dict.get(node_type, 0)
return node_resources, node_type_counts, total_nodes_to_add_dict
def get_nodes_for(
node_types: Dict[NodeType, NodeTypeConfigDict],
existing_nodes: Dict[NodeType, int],
head_node_type: NodeType,
max_to_add: int,
resources: List[ResourceDict],
utilization_scorer: Callable[
[NodeResources, ResourceDemands, str], Optional[UtilizationScore]
],
strict_spread: bool = False,
) -> (Dict[NodeType, int], List[ResourceDict]):
"""Determine nodes to add given resource demands and constraints.
Args:
node_types: node types config.
existing_nodes: counts of existing nodes already launched.
This sets constraints on the number of new nodes to add.
max_to_add: global constraint on nodes to add.
resources: resource demands to fulfill.
strict_spread: If true, each element in `resources` must be placed on a
different node.
utilization_scorer: A function that, given a node
type, its resources, and resource demands, returns what its
utilization would be.
Returns:
Dict of count to add for each node type, and residual of resources
that still cannot be fulfilled.
"""
nodes_to_add: Dict[NodeType, int] = collections.defaultdict(int)
while resources and sum(nodes_to_add.values()) < max_to_add:
utilization_scores = []
for node_type in node_types:
max_workers_of_node_type = node_types[node_type].get("max_workers", 0)
if head_node_type == node_type:
# Add 1 to account for head node.
max_workers_of_node_type = max_workers_of_node_type + 1
if (
existing_nodes.get(node_type, 0) + nodes_to_add.get(node_type, 0)
>= max_workers_of_node_type
):
continue
node_resources = node_types[node_type]["resources"]
if strict_spread:
# If handling strict spread, only one bundle can be placed on
# the node.
score = utilization_scorer(node_resources, [resources[0]], node_type)
else:
score = utilization_scorer(node_resources, resources, node_type)
if score is not None:
utilization_scores.append((score, node_type))
# Give up, no feasible node.
if not utilization_scores:
if not any(
is_placement_group_resource(resource)
for resources_dict in resources
for resource in resources_dict
):
logger.warning(
f"The autoscaler could not find a node type to satisfy the "
f"request: {resources}. Please specify a node type with the "
f"necessary resources."
)
break
utilization_scores = sorted(utilization_scores, reverse=True)
best_node_type = utilization_scores[0][1]
nodes_to_add[best_node_type] += 1
if strict_spread:
resources = resources[1:]
else:
allocated_resource = node_types[best_node_type]["resources"]
residual, _ = get_bin_pack_residual([allocated_resource], resources)
assert len(residual) < len(resources), (resources, residual)
resources = residual
return nodes_to_add, resources
def _resource_based_utilization_scorer(
node_resources: ResourceDict,
resources: List[ResourceDict],
*,
node_availability_summary: NodeAvailabilitySummary,
) -> Optional[Tuple[bool, int, float, float]]:
remaining = copy.deepcopy(node_resources)
fittable = []
resource_types = set()
for r in resources:
for k, v in r.items():
if v > 0:
resource_types.add(k)
if _fits(remaining, r):
fittable.append(r)
_inplace_subtract(remaining, r)
if not fittable:
return None
util_by_resources = []
num_matching_resource_types = 0
for k, v in node_resources.items():
# Don't divide by zero.
if v < 1:
# Could test v == 0 on the nose, but v < 1 feels safer.
# (Note that node resources are integers.)
continue
if k in resource_types:
num_matching_resource_types += 1
util = (v - remaining[k]) / v
util_by_resources.append(v * (util**3))
# Could happen if node_resources has only zero values.
if not util_by_resources:
return None
# Prefer not to launch a GPU node if there aren't any GPU requirements in the
# resource bundle.
gpu_ok = True
if AUTOSCALER_CONSERVE_GPU_NODES:
is_gpu_node = "GPU" in node_resources and node_resources["GPU"] > 0
any_gpu_task = any("GPU" in r for r in resources)
if is_gpu_node and not any_gpu_task:
gpu_ok = False
# Prioritize avoiding gpu nodes for non-gpu workloads first,
# then prioritize matching multiple resource types,
# then prioritize using all resources,
# then prioritize overall balance of multiple resources.
return (
gpu_ok,
num_matching_resource_types,
min(util_by_resources),
# util_by_resources should be non empty
float(sum(util_by_resources)) / len(util_by_resources),
)
def _default_utilization_scorer(
node_resources: ResourceDict,
resources: List[ResourceDict],
node_type: str,
*,
node_availability_summary: NodeAvailabilitySummary,
):
return _resource_based_utilization_scorer(
node_resources, resources, node_availability_summary=node_availability_summary
)
def get_bin_pack_residual(
node_resources: List[ResourceDict],
resource_demands: List[ResourceDict],
strict_spread: bool = False,
) -> (List[ResourceDict], List[ResourceDict]):
"""Return a subset of resource_demands that cannot fit in the cluster.
TODO(ekl): this currently does not guarantee the resources will be packed
correctly by the Ray scheduler. This is only possible once the Ray backend
supports a placement groups API.
Args:
node_resources (List[ResourceDict]): List of resources per node.
resource_demands (List[ResourceDict]): List of resource bundles that
need to be bin packed onto the nodes.
strict_spread: If true, each element in resource_demands must be
placed on a different entry in `node_resources`.
Returns:
List[ResourceDict]: the residual list resources that do not fit.
List[ResourceDict]: The updated node_resources after the method. The order of the list elements remains unchanged.
"""
unfulfilled = []
# A most naive bin packing algorithm.
nodes = copy.deepcopy(node_resources)
# List of nodes that cannot be used again due to strict spread.
used = set()
# We order the resource demands in the following way:
# More complex demands first.
# Break ties: heavier demands first.
# Break ties: lexicographically (to ensure stable ordering).
for demand in sorted(
resource_demands,
key=lambda demand: (
len(demand.values()),
sum(demand.values()),
sorted(demand.items()),
),
reverse=True,
):
found = False
node = None
for i in range(len(nodes)):
if i in used:
continue
node = nodes[i]
if _fits(node, demand):
found = True
# In the strict_spread case, we can't reuse nodes.
if strict_spread:
used.add(i)
break
if found and node:
_inplace_subtract(node, demand)
else:
unfulfilled.append(demand)
return unfulfilled, nodes
def _fits(node: ResourceDict, resources: ResourceDict) -> bool:
for k, v in resources.items():
# TODO(jjyao): Change ResourceDict to a class so we can
# hide the implicit resource handling.
if v > node.get(
k, 1.0 if k.startswith(ray._raylet.IMPLICIT_RESOURCE_PREFIX) else 0.0
):
return False
return True
def _inplace_subtract(node: ResourceDict, resources: ResourceDict) -> None:
for k, v in resources.items():
if v == 0:
# This is an edge case since some reasonable programs/computers can
# do `ray.autoscaler.sdk.request_resources({"GPU": 0}"})`.
continue
if k not in node:
assert k.startswith(ray._raylet.IMPLICIT_RESOURCE_PREFIX), (k, node)
node[k] = 1
assert k in node, (k, node)
node[k] -= v
assert node[k] >= 0.0, (node, k, v)
def _inplace_add(a: collections.defaultdict, b: Dict) -> None:
"""Generically adds values in `b` to `a`.
a[k] should be defined for all k in b.keys()"""
for k, v in b.items():
a[k] += v
def placement_groups_to_resource_demands(
pending_placement_groups: List[PlacementGroupTableData],
) -> Tuple[List[ResourceDict], List[List[ResourceDict]]]:
"""Preprocess placement group requests into regular resource demand vectors
when possible. The policy is:
* STRICT_PACK - Convert to a single bundle.
* PACK - Flatten into a resource demand vector.
* STRICT_SPREAD - Cannot be converted.
* SPREAD - Flatten into a resource demand vector.
Args:
pending_placement_groups (List[PlacementGroupData]): List of
PlacementGroupLoad's.
Returns:
List[ResourceDict]: The placement groups which were converted to a
resource demand vector.
List[List[ResourceDict]]: The placement groups which should be strictly
spread.
"""
resource_demand_vector = []
unconverted = []
for placement_group in pending_placement_groups:
# Skip **placed** bundle (which has node id associated with it).
shapes = []
for bundle in placement_group.bundles:
if bundle.node_id != b"":
continue
shapes.append(dict(bundle.unit_resources))
if (
placement_group.strategy == PlacementStrategy.PACK
or placement_group.strategy == PlacementStrategy.SPREAD
):
resource_demand_vector.extend(shapes)
elif placement_group.strategy == PlacementStrategy.STRICT_PACK:
combined = collections.defaultdict(float)
for shape in shapes:
for label, quantity in shape.items():
combined[label] += quantity
resource_demand_vector.append(combined)
elif placement_group.strategy == PlacementStrategy.STRICT_SPREAD:
unconverted.append(shapes)
else:
logger.error(
f"Unknown placement group request type: {placement_group}. "
f"Please file a bug report "
f"https://github.com/ray-project/ray/issues/new."
)
return resource_demand_vector, unconverted
| ResourceDemandScheduler |
python | wandb__wandb | wandb/vendor/pygments/lexers/supercollider.py | {
"start": 457,
"end": 3516
} | class ____(RegexLexer):
"""
For `SuperCollider <http://supercollider.github.io/>`_ source code.
.. versionadded:: 2.1
"""
name = 'SuperCollider'
aliases = ['sc', 'supercollider']
filenames = ['*.sc', '*.scd']
mimetypes = ['application/supercollider', 'text/supercollider', ]
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(words((
'for', 'in', 'while', 'do', 'break', 'return', 'continue',
'switch', 'case', 'default', 'if', 'else', 'throw', 'try',
'catch', 'finally', 'new', 'delete', 'typeof', 'instanceof',
'void'), suffix=r'\b'),
Keyword, 'slashstartsregex'),
(words(('var', 'let', 'with', 'function', 'arg'), suffix=r'\b'),
Keyword.Declaration, 'slashstartsregex'),
(words((
'(abstract', 'boolean', 'byte', 'char', 'class', 'const',
'debugger', 'double', 'enum', 'export', 'extends', 'final',
'float', 'goto', 'implements', 'import', 'int', 'interface',
'long', 'native', 'package', 'private', 'protected', 'public',
'short', 'static', 'super', 'synchronized', 'throws',
'transient', 'volatile'), suffix=r'\b'),
Keyword.Reserved),
(words(('true', 'false', 'nil', 'inf'), suffix=r'\b'), Keyword.Constant),
(words((
'Array', 'Boolean', 'Date', 'Error', 'Function', 'Number',
'Object', 'Packages', 'RegExp', 'String',
'isFinite', 'isNaN', 'parseFloat', 'parseInt', 'super',
'thisFunctionDef', 'thisFunction', 'thisMethod', 'thisProcess',
'thisThread', 'this'), suffix=r'\b'),
Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'\\?[$a-zA-Z_]\w*', String.Symbol),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
| SuperColliderLexer |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-operations-with-the-same-score-i.py | {
"start": 37,
"end": 378
} | class ____(object):
def maxOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 1
target = nums[0]+nums[1]
for i in xrange(2, len(nums)-1, 2):
if nums[i]+nums[i+1] != target:
break
result += 1
return result
| Solution |
python | pytorch__pytorch | test/dynamo/test_backends.py | {
"start": 648,
"end": 982
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.Sigmoid(),
)
def forward(self, x):
return self.layers(x)
| Seq |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/gcs.py | {
"start": 1744,
"end": 6212
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new bucket.
Google Cloud Storage uses a flat namespace, so you
can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso:: https://developers.google.com/storage/docs/bucket-locations
:param project_id: The ID of the Google Cloud Project. (templated)
:param labels: User-provided labels, in key/value pairs.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region
.. code-block:: python
CreateBucket = GCSCreateBucketOperator(
task_id="CreateNewBucket",
bucket_name="test-bucket",
storage_class="MULTI_REGIONAL",
location="EU",
labels={"env": "dev", "team": "airflow"},
gcp_conn_id="airflow-conn-id",
)
"""
template_fields: Sequence[str] = (
"bucket_name",
"storage_class",
"location",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
ui_color = "#f0eee4"
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket_name: str,
resource: dict | None = None,
storage_class: str = "MULTI_REGIONAL",
location: str = "US",
project_id: str = PROVIDE_PROJECT_ID,
labels: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.resource = resource
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
StorageLink.persist(
context=context,
uri=self.bucket_name,
project_id=self.project_id or hook.project_id,
)
try:
hook.create_bucket(
bucket_name=self.bucket_name,
resource=self.resource,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels,
)
except Conflict: # HTTP 409
self.log.warning("Bucket %s already exists", self.bucket_name)
| GCSCreateBucketOperator |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py | {
"start": 290,
"end": 4896
} | class ____(BaseImagePreprocessingLayer):
"""Randomly performs the color degeneration operation on given images.
The sharpness operation first converts an image to gray scale, then back to
color. It then takes a weighted average between original image and the
degenerated image. This makes colors appear more dull.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A tuple of two floats or a single float.
`factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the degenerated result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
(batch_size, 1, 1, 1),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
factor = factor
return {"factor": factor}
def transform_images(self, images, transformation=None, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
factor = self.backend.cast(
transformation["factor"], self.compute_dtype
)
degenerates = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
images = images + factor * (degenerates - images)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| RandomColorDegeneration |
python | fastapi__sqlmodel | docs_src/tutorial/delete/tutorial001.py | {
"start": 100,
"end": 2796
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
results = session.exec(statement)
hero_1 = results.one()
print("Hero 1:", hero_1)
statement = select(Hero).where(Hero.name == "Captain North America")
results = session.exec(statement)
hero_2 = results.one()
print("Hero 2:", hero_2)
hero_1.age = 16
hero_1.name = "Spider-Youngster"
session.add(hero_1)
hero_2.name = "Captain North America Except Canada"
hero_2.age = 110
session.add(hero_2)
session.commit()
session.refresh(hero_1)
session.refresh(hero_2)
print("Updated hero 1:", hero_1)
print("Updated hero 2:", hero_2)
def delete_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Youngster")
results = session.exec(statement)
hero = results.one()
print("Hero: ", hero)
session.delete(hero)
session.commit()
print("Deleted hero:", hero)
statement = select(Hero).where(Hero.name == "Spider-Youngster")
results = session.exec(statement)
hero = results.first()
if hero is None:
print("There's no hero named Spider-Youngster")
def main():
create_db_and_tables()
create_heroes()
update_heroes()
delete_heroes()
if __name__ == "__main__":
main()
| Hero |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 30976,
"end": 34658
} | class ____(AwsBaseOperator[DmsHook]):
"""
Stops an AWS DMS Serverless replication.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStopReplicationOperator`
:param replication_config_arn: ARN of the replication config
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
"""
STOPPED_STATES = ["stopped"]
NON_STOPPABLE_STATES = ["stopped"]
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields("replication_config_arn")
def __init__(
self,
*,
replication_config_arn: str,
wait_for_completion: bool = True,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(
aws_conn_id=aws_conn_id,
**kwargs,
)
self.replication_config_arn = replication_config_arn
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> None:
results = self.hook.describe_replications(
filters=[{"Name": "replication-config-arn", "Values": [self.replication_config_arn]}]
)
current_state = results[0].get("Status", "")
self.log.info(
"Current state of replication config(%s) is %s.", self.replication_config_arn, current_state
)
if current_state.lower() in self.STOPPED_STATES:
self.log.info("DMS replication config(%s) is already stopped.", self.replication_config_arn)
else:
resp = self.hook.stop_replication(self.replication_config_arn)
status = resp.get("Replication", {}).get("Status", "Unknown")
self.log.info(
"Stopping DMS replication config(%s). Current status: %s", self.replication_config_arn, status
)
if self.wait_for_completion:
self.log.info("Waiting for %s replication to stop.", self.replication_config_arn)
if self.deferrable:
self.log.info("Deferring until %s replication stops.", self.replication_config_arn)
self.defer(
trigger=DmsReplicationStoppedTrigger(
replication_config_arn=self.replication_config_arn,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
self.hook.get_waiter("replication_stopped").wait(
Filters=[{"Name": "replication-config-arn", "Values": [self.replication_config_arn]}],
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
def execute_complete(self, context, event=None):
self.replication_config_arn = event.get("replication_config_arn")
self.log.info("Replication(%s) has stopped.", self.replication_config_arn)
| DmsStopReplicationOperator |
python | joke2k__faker | faker/providers/company/fr_CH/__init__.py | {
"start": 75,
"end": 1312
} | class ____(CompanyProvider):
company_suffixes = ("SA", "Sàrl.")
def ide(self) -> str:
"""
Generates a IDE number (9 digits).
http://www.bfs.admin.ch/bfs/portal/fr/index/themen/00/05/blank/03/02.html
"""
def _checksum(digits: List[int]) -> int:
factors = (5, 4, 3, 2, 7, 6, 5, 4)
sum_ = 0
for i in range(len(digits)):
sum_ += digits[i] * factors[i]
return sum_ % 11
while True:
# create an array of first 8 elements initialized randomly
digits = self.generator.random.sample(range(10), 8)
# sum those 8 digits according to (part of) the "modulo 11"
sum_ = _checksum(digits)
# determine the last digit to make it qualify the test
control_number = 11 - sum_
if control_number != 10:
digits.append(control_number)
break
digits = "".join([str(digit) for digit in digits])
# finally return our random but valid BSN
return "CHE-" + digits[0:3] + "." + digits[3:6] + "." + digits[6:9]
uid = ide
# uid: german name for ide
idi = ide
# idi: italian name for ide
| Provider |
python | kamyu104__LeetCode-Solutions | Python/rearrange-array-elements-by-sign.py | {
"start": 1031,
"end": 1565
} | class ____(object):
def rearrangeArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
pos, neg = [], []
for i in reversed(xrange(len(nums))):
if nums[i] > 0:
pos.append(nums[i])
else:
neg.append(nums[i])
result = []
for i in xrange(len(nums)):
if i%2 == 0:
result.append(pos.pop())
else:
result.append(neg.pop())
return result
| Solution3 |
python | django__django | tests/i18n/test_extraction.py | {
"start": 32972,
"end": 34470
} | class ____(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(
ignore_patterns=[
os.path.join("ignore_dir", "*"),
]
)
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId("This literal should be included.", po_contents)
self.assertNotMsgId("This should be ignored.", po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(
ignore_patterns=[
"templates/*/ignore.html",
"templates/subdir/*",
]
)
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId("This subdir should be ignored too.", po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(
ignore_patterns=[
"xxx_*",
]
)
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId("This should be ignored too.", po_contents)
def test_media_static_dirs_ignored(self):
with override_settings(
STATIC_ROOT=os.path.join(self.test_dir, "static/"),
MEDIA_ROOT=os.path.join(self.test_dir, "media_root/"),
):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
| IgnoredExtractorTests |
python | ZoranPandovski__al-go-rithms | puzzles/CollectRubicCube/Python/solve_rubic_cube.py | {
"start": 10235,
"end": 10629
} | class ____:
def __init__(self):
self.items = []
def empty(self):
return len(self.items) == 0
def pushBack(self, item):
self.items.append(item)
def popFront(self):
if self.empty():
raise Exception("Queue: 'popFront' applied to empty container")
return self.items.pop(0)
def size(self):
return len(self.items)
| Queue |
python | dask__dask | dask/dataframe/dask_expr/_cumulative.py | {
"start": 1644,
"end": 1970
} | class ____(Blockwise):
_parameters = ["frame", "skipna"]
_projection_passthrough = True
@staticmethod
def operation(a, skipna=True):
if skipna:
if a.ndim == 1 and (a.empty or a.isna().all()):
return None
a = a.ffill()
return a.tail(n=1).squeeze()
| TakeLast |
python | pypa__pip | docs/pip_sphinxext.py | {
"start": 6297,
"end": 6571
} | class ____(PipOptions):
required_arguments = 1
def process_options(self) -> None:
cmd_name = self.arguments[0]
self._format_options(
[o() for o in cmdoptions.index_group["options"]],
cmd_name=cmd_name,
)
| PipIndexOptions |
python | wandb__wandb | tools/perf/scripts/bench_run_log.py | {
"start": 11258,
"end": 24301
} | class ____:
"""A class to run the performance test.
Args:
num_steps: The number of logging steps per run.
num_metrics: The number of metrics to log per step.
metric_key_size: The length of metric names.
output_file: The output file to store the performance test results.
data_type: The wandb data type to log.
is_unique_payload: Whether to use a new set of metrics or reuse the same set for each step.
time_delay_second: Sleep time between step.
run_id: ID of the existing run to resume from.
resume_mode: The mode of resuming. Used when run_id is passed in.
fraction: The % (in fraction) of metrics to log in each step.
dense_metric_count: Number of dense metrics to be logged every step.
The dense metrics is a separate set of metrics from the sparse metrics.
fork_from: The fork from string (formatted) e.g. f"{original_run.id}?_step=200"
project: The W&B project name to log to
sparse_stride_size: The number of steps to skip before logging the sparse metrics
starting_global_step: The starting global step for this run
mode: The mode to run the experiment. Defaults to "online".
When to set "is_unique_payload" to True?
Performance benchmarks are usually done on the basic use case to form the baseline, then on top
of it, scale tests of various dimensions are run (# of steps, # of metrics, metric size, etc) to
characterize its scalability.
For benchmarks or regression detection testings, set is_unique_payload to False (default). For stress
testings or simulating huge workload w/ million+ metrics, set is_unique_payload to True.
"""
def __init__(
self,
*,
num_steps: int = 10,
num_metrics: int = 100,
metric_key_size: int = 10,
output_file: str = "results.json",
data_type: Literal[
"scalar", "audio", "video", "image", "table", "prefixed_scalar"
] = "scalar",
is_unique_payload: bool = False,
time_delay_second: float = 0.0,
run_id: str | None = None,
resume_mode: str | None = None,
fraction: float = 1.0,
dense_metric_count: int = 0,
fork_from: str | None = None,
project: str = "perf-test",
sparse_stride_size: int = 0,
starting_global_step: int = 0,
mode: Literal["shared", "online"] = "online",
):
self.num_steps = num_steps
self.num_metrics = num_metrics
self.metric_key_size = metric_key_size
self.output_file = output_file
self.data_type = data_type
self.is_unique_payload = is_unique_payload
self.time_delay_second = time_delay_second
self.run_id = run_id
self.resume_mode = resume_mode
self.fraction = fraction
self.dense_metric_count = dense_metric_count
self.fork_from = fork_from
self.project = project
self.sparse_stride_size = sparse_stride_size
self.starting_global_step = starting_global_step
self.mode = mode
def run(self, repeat: int = 1):
for _ in range(repeat):
self.single_run()
def parallel_runs(self, num_of_parallel_runs: int = 1):
"""Runs multiple instances of single_run() in parallel processes.
Args:
num_of_parallel_runs (int): Number of parallel runs to execute.
"""
wandb.setup()
processes = []
for i in range(num_of_parallel_runs):
p = mp.Process(target=self.run)
p.start()
logger.info(f"The {i}-th process (pid: {p.pid}) has started.")
processes.append(p)
for p in processes:
p.join()
def single_run(self):
"""Run a simple experiment to log metrics to W&B.
Measuring the time for init(), log(), and finish() operations.
"""
start_time = datetime.now()
start_time_str = start_time.strftime("%m%d%YT%H%M%S")
logger.info(f"Test start time: {start_time_str}")
result_data = {
"num_steps": self.num_steps,
"num_metrics": self.num_metrics,
"metric_key_size": self.metric_key_size,
"data_type": self.data_type,
}
# Initialize W&B
with Timer() as timer:
name = (
f"perf_run={start_time_str}_steps={self.num_steps}_metrics={self.num_metrics}"
if self.run_id is None
else None
)
init_timeout = 600 if self.fork_from else 90
run = wandb.init(
project=self.project,
name=name,
id=self.run_id,
mode=self.mode,
resume=self.resume_mode,
fork_from=self.fork_from,
config=result_data if self.run_id is None else None,
settings=wandb.Settings(
init_timeout=init_timeout,
),
)
if self.run_id is None:
logger.info(f"New run {run.id} initialized.")
elif self.resume_mode:
logger.info(f"Resuming run {self.run_id} with {self.resume_mode}.")
if self.mode == "shared":
logger.info(f"Shared mode enabled, logging to run {self.run_id}.")
result_data["init_time"] = timer.stop()
# pre-generate all the payloads
logger.info("Generating test payloads ...")
generator = PayloadGenerator(
data_type=self.data_type,
sparse_metric_count=self.num_metrics,
metric_key_size=self.metric_key_size,
num_steps=self.num_steps,
fraction=self.fraction,
is_unique_payload=self.is_unique_payload,
dense_metric_count=self.dense_metric_count,
sparse_stride_size=self.sparse_stride_size,
)
payloads = generator.generate()
logger.info(f"Start logging {self.num_steps} steps ...")
with Timer() as timer:
for s in range(self.num_steps):
global_values = {}
global_values["global_step"] = self.starting_global_step + s
if self.is_unique_payload or self.fraction < 1.0:
run.log({**global_values, **(payloads[s % len(payloads)])})
else:
if self.sparse_stride_size > 0 and s % self.sparse_stride_size == 0:
# log the sparse + dense metrics
run.log(
{
**global_values,
**(generator.sparse_metrics),
**(payloads[0]),
}
)
else:
# log only the dense metric
run.log({**global_values, **(payloads[0])})
if self.time_delay_second > 0:
time.sleep(self.time_delay_second)
result_data["log_time"] = timer.stop()
result_data["run_id"] = run.id
# compute the log() throughput rps (request per sec)
if result_data["log_time"] == 0:
logger.warning("the measured time for log() is 0.")
# Setting it to 0.1ms to avoid failing the math.
result_data["log_time"] = 0.01
# adjust for the sleep time injected
if self.time_delay_second > 0:
result_data["log_time"] -= self.time_delay_second * self.num_steps
result_data["log_rps"] = round(self.num_steps / result_data["log_time"], 2)
# Finish W&B run
with Timer() as timer:
run.finish()
result_data["finish_time"] = timer.stop()
# Display experiment timing
run_time = (
result_data["init_time"]
+ result_data["log_time"]
+ result_data["finish_time"]
)
result_data["sdk_run_time"] = round(run_time, 2)
# write the result data to a json file
with open(self.output_file, "w") as file:
json.dump(result_data, file, indent=4)
logger.info(json.dumps(result_data, indent=4))
total_time = (datetime.now() - start_time).total_seconds()
logger.info(f"\nTotal run duration: {total_time:.2f} seconds")
if __name__ == "__main__":
setup_package_logger()
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--repeat",
type=int,
default=1,
help="The number of times to repeat the experiment.",
)
parser.add_argument(
"-s",
"--steps",
type=int,
default=10,
help="The number of logging steps per run.",
)
parser.add_argument(
"-n",
"--num-metrics",
type=int,
default=100,
help="The number of sparse metrics to log per step (optional: "
"use together with -f to control %).",
)
parser.add_argument(
"-m",
"--metric-key-size",
type=int,
default=10,
help='The length of metric names. If the --data-type is "video", '
"then this represents the video length in second.",
)
parser.add_argument(
"-o",
"--outfile",
type=str,
default="results.json",
help="The output file to store the performance test results.",
)
parser.add_argument(
"-d",
"--data-type",
type=str,
choices=["scalar", "audio", "video", "image", "table", "prefixed_scalar"],
default="scalar",
help="The wandb data type to log. Defaults to scalar.",
)
parser.add_argument(
"-u",
"--unique-payload",
type=bool,
default=False,
help="If false, it logs the same payload at each step. "
"If true, each step has different payload.",
)
parser.add_argument(
"-t",
"--time-delay-second",
type=float,
default=0,
help="The sleep time between step in seconds e.g. -t 1.0",
)
parser.add_argument(
"-i",
"--run-id",
type=str,
help="The run id. e.g. -i 123abc to resume this run id.",
)
parser.add_argument(
"-j",
"--resume-mode",
type=str,
choices=["must", "allow", "never"],
default=None,
help="Use with --run-id. The resume mode.",
)
parser.add_argument(
"-g",
"--global-step",
type=int,
default=0,
help="Set the global_step",
)
parser.add_argument(
"-f",
"--fraction",
type=float,
default=1.0,
help="The fraction (i.e. percentage) of sparse metrics to log in each step.",
)
parser.add_argument(
"-c",
"--dense_metric_count",
type=int,
default=0,
help="The number of dense metrics that are logged at every step. "
"This is a separate set from the sparse metrics.",
)
parser.add_argument(
"-x",
"--fork-run-id",
type=str,
help="The source run's id to fork from.",
)
parser.add_argument(
"-y",
"--fork-step",
type=str,
default="1",
help="The step to fork from.",
)
parser.add_argument(
"-p",
"--project",
type=str,
default="perf-test",
help="The W&B project to log to.",
)
parser.add_argument(
"-z",
"--parallel",
type=int,
default=1,
help="The number of wandb instances to launch",
)
parser.add_argument(
"-w",
"--sparse-stride-size",
type=int,
default=0,
help="The number of steps to skip for logging the sparse payload",
)
parser.add_argument(
"-a",
"--mode",
type=str,
choices=["shared", "online"],
default="online",
help="The mode to run the experiment.",
)
args = parser.parse_args()
fork_from: str | None = None
if args.fork_run_id:
fork_from = f"{args.fork_run_id}?_step={args.fork_step}"
logger.info(f"Setting fork_from = {fork_from}")
experiment = Experiment(
num_steps=args.steps,
num_metrics=args.num_metrics,
metric_key_size=args.metric_key_size,
output_file=args.outfile,
data_type=args.data_type,
is_unique_payload=args.unique_payload,
time_delay_second=args.time_delay_second,
run_id=args.run_id,
resume_mode=args.resume_mode,
fraction=args.fraction,
dense_metric_count=args.dense_metric_count,
fork_from=fork_from,
project=args.project,
sparse_stride_size=args.sparse_stride_size,
starting_global_step=args.global_step,
mode=args.mode,
)
experiment.parallel_runs(args.parallel)
| Experiment |
python | django__django | tests/admin_views/admin.py | {
"start": 7333,
"end": 8074
} | class ____(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = "custom_admin/change_list.html"
change_form_template = "custom_admin/change_form.html"
add_form_template = "custom_admin/add_form.html"
object_history_template = "custom_admin/object_history.html"
delete_confirmation_template = "custom_admin/delete_confirmation.html"
delete_selected_confirmation_template = (
"custom_admin/delete_selected_confirmation.html"
)
popup_response_template = "custom_admin/popup_response.html"
def changelist_view(self, request):
return super().changelist_view(request, extra_context={"extra_var": "Hello!"})
| CustomArticleAdmin |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/assignment2.py | {
"start": 830,
"end": 1056
} | class ____:
def __setitem__(self, i: int, value: object) -> None: ...
def __getitem__(self, i: int) -> int: ...
v5 = Asymmetric()
v5[0] = 3
reveal_type(v5[0], expected_text="int")
v6 = [1, 2, 3]
v6[1:] = []
| Asymmetric |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 571953,
"end": 572711
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for DeploymentReview."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("DeploymentReviewEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("DeploymentReview"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| DeploymentReviewConnection |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-of-cutting-a-pizza.py | {
"start": 55,
"end": 1439
} | class ____(object):
def ways(self, pizza, k):
"""
:type pizza: List[str]
:type k: int
:rtype: int
"""
MOD = 10**9+7
prefix = [[0]*len(pizza[0]) for _ in xrange(len(pizza))]
for j in reversed(xrange(len(pizza[0]))):
accu = 0
for i in reversed(xrange(len(pizza))):
accu += int(pizza[i][j] == 'A')
prefix[i][j] = (prefix[i][j+1] if (j+1 < len(pizza[0])) else 0) + accu
dp = [[[0]*k for _ in xrange(len(pizza[0]))] for _ in xrange(len(pizza))]
for i in reversed(xrange(len(pizza))):
for j in reversed(xrange(len(pizza[0]))):
dp[i][j][0] = 1
for m in xrange(1, k):
for n in xrange(i+1, len(pizza)):
if prefix[i][j] == prefix[n][j]:
continue
if prefix[n][j] == 0:
break
dp[i][j][m] = (dp[i][j][m] + dp[n][j][m-1]) % MOD
for n in xrange(j+1, len(pizza[0])):
if prefix[i][j] == prefix[i][n]:
continue
if prefix[i][n] == 0:
break
dp[i][j][m] = (dp[i][j][m] + dp[i][n][m-1]) % MOD
return dp[0][0][k-1]
| Solution |
python | doocs__leetcode | solution/0700-0799/0708.Insert into a Sorted Circular Linked List/Solution.py | {
"start": 140,
"end": 693
} | class ____:
def insert(self, head: 'Optional[Node]', insertVal: int) -> 'Node':
node = Node(insertVal)
if head is None:
node.next = node
return node
prev, curr = head, head.next
while curr != head:
if prev.val <= insertVal <= curr.val or (
prev.val > curr.val and (insertVal >= prev.val or insertVal <= curr.val)
):
break
prev, curr = curr, curr.next
prev.next = node
node.next = curr
return head
| Solution |
python | docker__docker-py | docker/models/secrets.py | {
"start": 537,
"end": 1845
} | class ____(Collection):
"""Secrets on the Docker server."""
model = Secret
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
def get(self, secret_id):
"""
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_secret(secret_id))
def list(self, **kwargs):
"""
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp]
| SecretCollection |
python | doocs__leetcode | solution/1500-1599/1564.Put Boxes Into the Warehouse I/Solution.py | {
"start": 0,
"end": 482
} | class ____:
def maxBoxesInWarehouse(self, boxes: List[int], warehouse: List[int]) -> int:
n = len(warehouse)
left = [warehouse[0]] * n
for i in range(1, n):
left[i] = min(left[i - 1], warehouse[i])
boxes.sort()
i, j = 0, n - 1
while i < len(boxes):
while j >= 0 and left[j] < boxes[i]:
j -= 1
if j < 0:
break
i, j = i + 1, j - 1
return i
| Solution |
python | pytorch__pytorch | test/autograd/test_functional.py | {
"start": 2833,
"end": 60627
} | class ____(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError(
"The base given to `_assert_same_struct` doesn't have"
" the right structure."
)
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(
el_el_res.size(), el_base1.size() + el_base2.size()
)
else:
# Wrong bases
raise RuntimeError(
"The bases given to `_assert_interleaved_struct` don't have"
" the right structure."
)
@base_and_logging_tensor
def test_vjp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
v = ctors.ones(3)
with self.assertRaisesRegex(
TypeError, "The inputs given to vjp must be either a Tensor"
):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(
TypeError, "The outputs of the user-provided function given to vjp must"
):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(
RuntimeError,
"The vector v can only be None if the user-provided function returns",
):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(
RuntimeError, "The given v should contain a single Tensor."
):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(
RuntimeError, "v has invalid size: should be torch.Size"
):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
@base_and_logging_tensor
def test_vjp_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"The output of the user-provided function is independent of input 0",
):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function is independent of input 0.",
):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
@base_and_logging_tensor
def test_vjp_no_grad(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_vjp_output(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (ctors.rand(2), ctors.rand(2))
v = ctors.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.tensor([1.0, 0.0]), ctors.tensor([1.0, 0.0]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
@base_and_logging_tensor
def test_vjp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
v = ctors.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_vjp_create_graph(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(2, 2, dtype=torch.double)
v = ctors.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(
lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True),
(inputs, v),
)
gradgradcheck(
lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True),
(inputs, v),
)
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (
ctors.rand(2, dtype=torch.double, requires_grad=True),
ctors.rand(2, dtype=torch.double, requires_grad=True),
)
v = (
ctors.tensor([1.0, 0.0], dtype=torch.double, requires_grad=True),
ctors.tensor([1.0, 0.0], dtype=torch.double, requires_grad=True),
)
gradcheck(
lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[
1
],
inputs + v,
)
gradgradcheck(
lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[
1
],
inputs + v,
)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return (
val[0].exp()
+ val[1].exp()
+ grad[0].exp()
+ grad[1].exp()
+ x.exp()
+ y.exp()
)
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_jvp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
TypeError, "The inputs given to jvp must be either a Tensor"
):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(
TypeError, "The outputs of the user-provided function given to jvp must"
):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(
RuntimeError,
"The vector v can only be None if the input to the user-provided function",
):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(
RuntimeError, "The given v should contain a single Tensor."
):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(
RuntimeError, "v has invalid size: should be torch.Size"
):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
@base_and_logging_tensor
def test_jvp_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"The output of the user-provided function is independent of input 0",
):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.0)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function is independent of input 0.",
):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
@base_and_logging_tensor
def test_jvp_no_grad(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_jvp_output(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.ones(2), ctors.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.tensor([1.0, 0.0]), ctors.tensor([1.0, 0.0]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
@base_and_logging_tensor
def test_jvp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], ctors.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
v = ctors.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], ctors.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], ctors.zeros(4))
self._assert_same_struct(res[1], res[0])
@base_and_logging_tensor
def test_jvp_create_graph(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(2, 2, dtype=torch.double)
v = ctors.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(
lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True),
(inputs, v),
)
gradgradcheck(
lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True),
(inputs, v),
)
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (
ctors.rand(2, dtype=torch.double, requires_grad=True),
ctors.rand(2, dtype=torch.double, requires_grad=True),
)
v = (
ctors.tensor([1.0, 0.0], dtype=torch.double, requires_grad=True),
ctors.tensor([1.0, 0.0], dtype=torch.double, requires_grad=True),
)
gradcheck(
lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[
1
],
inputs + v,
)
gradgradcheck(
lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[
1
],
inputs + v,
)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return (
val[0].exp()
+ val[1].exp()
+ grad[0].exp()
+ grad[1].exp()
+ x.exp()
+ y.exp()
)
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat(
[result.to(device="cpu", dtype=torch.float) for result in results], dim=1
)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
@base_and_logging_tensor
def test_construct_standard_basis_for(self, ctors):
test_cases = [
(ctors.randn(2, 3),),
(ctors.randn(1),),
(ctors.randn([]),),
(ctors.randn(1), ctors.randn([]), ctors.randn([])),
(ctors.randn(2), ctors.randn(3), ctors.randn([])),
(ctors.randn(2), ctors.randn([]), ctors.randn(3)),
(ctors.randn(2, 3), ctors.randn(3), ctors.randn(3, 4, 2)),
(ctors.randn(2, dtype=torch.float64), ctors.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
@base_and_logging_tensor
def test_construct_standard_basis_for_cuda(self, ctors):
test_cases = [
(ctors.randn(2), ctors.randn(3, device="cuda")),
(ctors.randn(3, device="cuda"), ctors.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api, ctors):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a**2).sum()
x = ctors.randn(3)
with warnings.catch_warnings(record=True) as wa:
api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
@base_and_logging_tensor
def test_jacobian_vectorize_raises_no_warnings(self, ctors):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian, ctors)
@base_and_logging_tensor
def test_hessian_vectorize_raises_no_warnings(self, ctors):
return self._test_vectorize_raises_no_warnings(autogradF.hessian, ctors)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_jacobian_err_check(self, vectorize, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
with self.assertRaisesRegex(
TypeError, "The inputs given to jacobian must be either a Tensor"
):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(
TypeError,
"The outputs of the user-provided function given to jacobian must",
):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (ctors.rand(4), ctors.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
@base_and_logging_tensor
def test_jacobian_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function is independent of input 0.",
):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.0)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function is independent of input 0.",
):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
@base_and_logging_tensor
def test_jacobian_err_check_strict_vectorize(self, ctors):
def foo(x):
return x
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
autogradF.jacobian(foo, inp, strict=True, vectorize=True)
@base_and_logging_tensor
def test_jacobian_no_grad(self, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, ctors.zeros(4, 4))
@vectorized_logging_tensor
def test_jacobian_output(self, vectorize, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = ctors.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (ctors.rand(4, 4), ctors.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
@vectorized_logging_tensor
def test_jacobian_scalar(self, vectorize, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, ctors.zeros(4))
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_jacobian_create_graph(self, vectorize, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(
exp_reducer, inputs, create_graph=True, vectorize=vectorize
)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(
lambda inp: autogradF.jacobian(
exp_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
gradgradcheck(
lambda inp: autogradF.jacobian(
exp_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (
ctors.rand(4, 4, dtype=torch.double, requires_grad=True),
ctors.rand(4, 4, dtype=torch.double, requires_grad=True),
)
res = autogradF.jacobian(
add_exp_reducer, inputs, create_graph=True, vectorize=vectorize
)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(
lambda *inp: autogradF.jacobian(
add_exp_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
gradgradcheck(
lambda *inp: autogradF.jacobian(
add_exp_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(
add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize
)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def _check_jacobian_vectorize_correctness(self, f, inputs, test_forward_ad=True):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result_backward_mode = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result_backward_mode, expected)
if test_forward_ad:
result_forward_mode = autogradF.jacobian(
f, inputs, strategy="forward-mode", vectorize=True
)
self.assertEqual(result_forward_mode, expected)
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_simple(self, ctors):
def f(x):
return 3 * x**2
x = ctors.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_multi_input(self, ctors):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = ctors.randn(2, 3)
y = ctors.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_multi_input_multi_output(self, ctors):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = ctors.randn(5, 3)
y = ctors.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_unrelated_outputs(self, ctors):
def f(x, y):
return x, y, x, y
x = ctors.randn(2)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_zero_dim(self, ctors):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = ctors.randn(3)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = ctors.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = ctors.randn([])
y = ctors.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_different_devices(self, ctors):
def f(x, y):
return x * y, (x * y).cuda()
x = ctors.randn(3)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_different_dtype(self, ctors):
def f(x, y):
return (x * y).float(), (x * y).double()
x = ctors.randn(3)
y = ctors.randn(3)
# The Jacobian computed using forward AD has the dtype of the output
# but the Jacobian computed with reverse AD has dtype of input
self._check_jacobian_vectorize_correctness(f, (x, y), test_forward_ad=False)
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
result_forward_mode = autogradF.hessian(
f, inputs, outer_jacobian_strategy="forward-mode", vectorize=True
)
self.assertEqual(result_forward_mode, expected)
@base_and_logging_tensor
def test_hessian_vectorize_correctness_simple(self, ctors):
def f(x):
return (3 * x**2).sum()
x = ctors.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
@base_and_logging_tensor
def test_hessian_vectorize_correctness_multi_input(self, ctors):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = ctors.randn(2, 3)
y = ctors.randn(3, 5)
z = ctors.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
@base_and_logging_tensor
def test_hessian_vectorize_correctness_unrelated_outputs(self, ctors):
# output unrelated to one input
def f(x, y):
return (x**2).sum()
x = ctors.randn(2)
y = ctors.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return ctors.ones([])
x = ctors.randn(2)
y = ctors.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_err_check(self, vectorize, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
with self.assertRaisesRegex(
TypeError, "The inputs given to hessian must be either a Tensor"
):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(
TypeError, "The outputs of the user-provided function given to hessian must"
):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(
RuntimeError, "The function given to hessian should return a single Tensor"
):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
@base_and_logging_tensor
def test_hessian_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function with respect to input 0",
):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function with respect to input 0 is",
):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.0)
@base_and_logging_tensor
def test_hessian_err_check_strict_vectorize(self, ctors):
def foo(x):
return (x**3).sum()
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
autogradF.hessian(foo, inp, strict=True, vectorize=True)
@base_and_logging_tensor
def test_hessian_no_grad(self, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, ctors.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, ctors.zeros(2, 2, 2))
@vectorized_logging_tensor
def test_hessian_output(self, vectorize, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (ctors.rand(2, 2), ctors.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_scalar(self, vectorize, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = ctors.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_create_graph(self, vectorize, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(
pow_reducer, inputs, create_graph=True, vectorize=vectorize
)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(
lambda inp: autogradF.hessian(
pow_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
gradgradcheck(
lambda inp: autogradF.hessian(
pow_reducer, inp, create_graph=True, vectorize=vectorize
),
inputs,
)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (
ctors.rand(2, 2, dtype=torch.double, requires_grad=True),
ctors.rand(2, 2, dtype=torch.double, requires_grad=True),
)
res = autogradF.hessian(
add_pow_reducer, inputs, create_graph=True, vectorize=vectorize
)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(
lambda *inp: flatten(
autogradF.hessian(
add_pow_reducer, inp, create_graph=True, vectorize=vectorize
)
),
inputs,
)
gradgradcheck(
lambda *inp: flatten(
autogradF.hessian(
add_pow_reducer, inp, create_graph=True, vectorize=vectorize
)
),
inputs,
)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(
add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize
)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
@base_and_logging_tensor
def test_vhp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
TypeError, "The inputs given to vhp must be either a Tensor"
):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(
TypeError, "The outputs of the user-provided function given to vhp must"
):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, ctors.rand(5))
with self.assertRaisesRegex(
TypeError,
"The v given to vhp must be either a Tensor or a tuple of Tensors",
):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
v = (ctors.rand(4), ctors.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
@base_and_logging_tensor
def test_vhp_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"The output of the user-provided function is independent of input 0",
):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function with respect to input 0 is",
):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
@base_and_logging_tensor
def test_vhp_no_grad(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_vhp_output(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3), ctors.rand(4))
v = (ctors.ones(3), ctors.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
@base_and_logging_tensor
def test_vhp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = ctors.rand([])
v = ctors.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
v = ctors.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_vhp_create_graph(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
v = ctors.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(
lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)
)
gradgradcheck(
lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)
)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (
ctors.rand(3, dtype=torch.double, requires_grad=True),
ctors.rand(4, dtype=torch.double, requires_grad=True),
)
v = (
ctors.ones(3, dtype=torch.double, requires_grad=True),
ctors.ones(4, dtype=torch.double, requires_grad=True),
)
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(
lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1],
inputs + v,
)
gradgradcheck(
lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1],
inputs + v,
)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return (
val.cos()
+ grad[0].cos().sum()
+ grad[1].cos()
+ x.cos().sum()
+ y.cos()
)
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_hvp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
v = ctors.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(
TypeError, "The inputs given to hvp must be either a Tensor"
):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(
TypeError, "The outputs of the user-provided function given to hvp must"
):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, ctors.rand(5))
with self.assertRaisesRegex(
TypeError,
"The v given to hvp must be either a Tensor or a tuple of Tensors",
):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
v = (ctors.rand(4), ctors.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
@base_and_logging_tensor
def test_hvp_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of the user-provided function does not require gradients.",
):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"The output of the user-provided function is independent of input 0",
):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
with self.assertRaisesRegex(
RuntimeError,
"jacobian of the user-provided function with respect to input 0 is",
):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.0)
@base_and_logging_tensor
def test_hvp_no_grad(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_hvp_output(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3), ctors.rand(4))
v = (ctors.ones(3), ctors.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
@base_and_logging_tensor
def test_hvp_scalar(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = ctors.rand([])
v = ctors.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
v = ctors.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_hvp_create_graph(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
v = ctors.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(
lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)
)
gradgradcheck(
lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)
)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (
ctors.rand(3, dtype=torch.double, requires_grad=True),
ctors.rand(4, dtype=torch.double, requires_grad=True),
)
v = (
ctors.ones(3, dtype=torch.double, requires_grad=True),
ctors.ones(4, dtype=torch.double, requires_grad=True),
)
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(
lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1],
inputs + v,
)
gradgradcheck(
lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1],
inputs + v,
)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return (
val.cos()
+ grad[0].cos().sum()
+ grad[1].cos()
+ x.cos().sum()
+ y.cos()
)
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_jacobian_match_vjp_jvp(self, ctors):
def foo(x):
return x**3 + x.sum()
inputs = ctors.rand(4)
v = ctors.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
@base_and_logging_tensor
def test_hessian_match_vhp_hvp(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4)
v = ctors.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
instantiate_parametrized_tests(TestAutogradFunctional)
if __name__ == "__main__":
run_tests()
| TestAutogradFunctional |
python | huggingface__transformers | tests/models/clipseg/test_modeling_clipseg.py | {
"start": 1569,
"end": 4535
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return CLIPSegVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = CLIPSegVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| CLIPSegVisionModelTester |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 20694,
"end": 23521
} | class ____(nn.Module):
"""Linear head for Classification and Regression.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig, distribution_output=None):
super().__init__()
self.head_aggregation = config.head_aggregation
self.output_range = config.output_range
if config.head_aggregation is None:
mul_factor = config.num_patches
else:
mul_factor = 1
self.distribution_output = distribution_output
if distribution_output is None:
self.projection = nn.Linear(
config.d_model * config.num_input_channels * mul_factor,
config.num_targets,
)
else:
self.projection = distribution_output.get_parameter_projection(
config.d_model * config.num_input_channels * mul_factor
)
if config.head_aggregation is None:
self.flatten = nn.Flatten(start_dim=-3)
else:
self.flatten = nn.Flatten(start_dim=-2)
self.dropout = nn.Dropout(config.head_dropout)
def forward(self, hidden_features):
"""
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x num_targets)`.
"""
# batch_size x d_model x num_patch or batch_size x n_vars x d_model x num_patch
hidden_features = hidden_features.transpose(-1, -2)
if self.head_aggregation == "use_last":
# batch_size x d_model (flatten) or # batch_size x n_vars x d_model (common_channel)
hidden_features = hidden_features[..., -1]
elif self.head_aggregation == "max_pool":
# batch_size x n_vars x d_model or batch_size x d_model
hidden_features = hidden_features.max(dim=-1).values
elif self.head_aggregation == "avg_pool":
# batch_size x n_vars x d_model or batch_size x d_model
hidden_features = hidden_features.mean(dim=-1)
if self.flatten:
hidden_features = self.flatten(hidden_features)
hidden_features = self.dropout(hidden_features)
hidden_features = self.projection(hidden_features) # batch_size x num_targets
if (self.distribution_output is None) and (self.output_range is not None):
hidden_features = (
torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0]
)
return hidden_features
@auto_docstring
| PatchTSMixerLinearHead |
python | doocs__leetcode | solution/0000-0099/0031.Next Permutation/Solution.py | {
"start": 0,
"end": 358
} | class ____:
def nextPermutation(self, nums: List[int]) -> None:
n = len(nums)
i = next((i for i in range(n - 2, -1, -1) if nums[i] < nums[i + 1]), -1)
if ~i:
j = next((j for j in range(n - 1, i, -1) if nums[j] > nums[i]))
nums[i], nums[j] = nums[j], nums[i]
nums[i + 1 :] = nums[i + 1 :][::-1]
| Solution |
python | PrefectHQ__prefect | tests/test_transactions.py | {
"start": 32875,
"end": 38614
} | class ____:
class TestTransaction:
def test_get_and_set_data(self):
with transaction(key="test") as txn:
txn.set("x", 42)
assert txn.get("x") == 42
def test_get_and_set_data_in_nested_context(self):
with transaction(key="test") as top:
top.set("key", 42)
with transaction(key="nested") as inner:
assert (
inner.get("key") == 42
) # children inherit from their parents first
inner.set("key", "string") # and can override
assert inner.get("key") == "string"
assert top.get("key") == 42
assert top.get("key") == 42
def test_get_and_set_data_doesnt_mutate_parent(self):
with transaction(key="test") as top:
top.set("key", {"x": [42]})
with transaction(key="nested") as inner:
inner_value = inner.get("key")
inner_value["x"].append(43)
inner.set("key", inner_value)
assert inner.get("key") == {"x": [42, 43]}
assert top.get("key") == {"x": [42]}
assert top.get("key") == {"x": [42]}
def test_get_raises_on_unknown_but_allows_default(self):
with transaction(key="test") as txn:
with pytest.raises(ValueError, match="foobar"):
txn.get("foobar")
assert txn.get("foobar", None) is None
assert txn.get("foobar", "string") == "string"
def test_parent_values_set_after_child_open_are_available(self):
parent_transaction = Transaction()
child_transaction = Transaction()
parent_transaction.__enter__()
child_transaction.__enter__()
try:
parent_transaction.set("key", "value")
# child can access parent's values
assert child_transaction.get("key") == "value"
parent_transaction.set("list", [1, 2, 3])
assert child_transaction.get("list") == [1, 2, 3]
# Mutating the value doesn't update the stored value
child_transaction.get("list").append(4)
assert child_transaction.get("list") == [1, 2, 3]
child_transaction.set("list", [1, 2, 3, 4])
assert child_transaction.get("list") == [1, 2, 3, 4]
# parent transaction isn't affected by child's modifications
assert parent_transaction.get("list") == [1, 2, 3]
finally:
child_transaction.__exit__(None, None, None)
parent_transaction.__exit__(None, None, None)
class TestAsyncTransaction:
async def test_get_and_set_data(self):
async with atransaction(key="test") as txn:
txn.set("x", 42)
assert txn.get("x") == 42
async def test_get_and_set_data_in_nested_context(self):
async with atransaction(key="test") as top:
top.set("key", 42)
async with atransaction(key="nested") as inner:
assert (
inner.get("key") == 42
) # children inherit from their parents first
inner.set("key", "string") # and can override
assert inner.get("key") == "string"
assert top.get("key") == 42
assert top.get("key") == 42
async def test_get_and_set_data_doesnt_mutate_parent(self):
async with atransaction(key="test") as top:
top.set("key", {"x": [42]})
async with atransaction(key="nested") as inner:
inner_value = inner.get("key")
inner_value["x"].append(43)
inner.set("key", inner_value)
assert inner.get("key") == {"x": [42, 43]}
assert top.get("key") == {"x": [42]}
assert top.get("key") == {"x": [42]}
async def test_get_raises_on_unknown_but_allows_default(self):
async with atransaction(key="test") as txn:
with pytest.raises(ValueError, match="foobar"):
txn.get("foobar")
assert txn.get("foobar", None) is None
assert txn.get("foobar", "string") == "string"
async def test_parent_values_set_after_child_open_are_available(self):
parent_transaction = AsyncTransaction()
child_transaction = AsyncTransaction()
await parent_transaction.__aenter__()
await child_transaction.__aenter__()
try:
parent_transaction.set("key", "value")
# child can access parent's values
assert child_transaction.get("key") == "value"
parent_transaction.set("list", [1, 2, 3])
assert child_transaction.get("list") == [1, 2, 3]
# Mutating the value doesn't update the stored value
child_transaction.get("list").append(4)
assert child_transaction.get("list") == [1, 2, 3]
child_transaction.set("list", [1, 2, 3, 4])
assert child_transaction.get("list") == [1, 2, 3, 4]
# parent transaction isn't affected by child's modifications
assert parent_transaction.get("list") == [1, 2, 3]
finally:
await child_transaction.__aexit__(None, None, None)
await parent_transaction.__aexit__(None, None, None)
| TestGetAndSetData |
python | huggingface__transformers | src/transformers/models/whisper/english_normalizer.py | {
"start": 19372,
"end": 22815
} | class ____:
def __init__(self, english_spelling_mapping):
self.ignore_patterns = r"\b(hmm|mm|mhm|mmm|uh|um)\b"
self.replacers = {
# common contractions
r"\bwon't\b": "will not",
r"\bcan't\b": "can not",
r"\blet's\b": "let us",
r"\bain't\b": "aint",
r"\by'all\b": "you all",
r"\bwanna\b": "want to",
r"\bgotta\b": "got to",
r"\bgonna\b": "going to",
r"\bi'ma\b": "i am going to",
r"\bimma\b": "i am going to",
r"\bwoulda\b": "would have",
r"\bcoulda\b": "could have",
r"\bshoulda\b": "should have",
r"\bma'am\b": "madam",
# contractions in titles/prefixes
r"\bmr\b": "mister ",
r"\bmrs\b": "missus ",
r"\bst\b": "saint ",
r"\bdr\b": "doctor ",
r"\bprof\b": "professor ",
r"\bcapt\b": "captain ",
r"\bgov\b": "governor ",
r"\bald\b": "alderman ",
r"\bgen\b": "general ",
r"\bsen\b": "senator ",
r"\brep\b": "representative ",
r"\bpres\b": "president ",
r"\brev\b": "reverend ",
r"\bhon\b": "honorable ",
r"\basst\b": "assistant ",
r"\bassoc\b": "associate ",
r"\blt\b": "lieutenant ",
r"\bcol\b": "colonel ",
r"\bjr\b": "junior ",
r"\bsr\b": "senior ",
r"\besq\b": "esquire ",
# prefect tenses, ideally it should be any past participles, but it's harder..
r"'d been\b": " had been",
r"'s been\b": " has been",
r"'d gone\b": " had gone",
r"'s gone\b": " has gone",
r"'d done\b": " had done", # "'s done" is ambiguous
r"'s got\b": " has got",
# general contractions
r"n't\b": " not",
r"'re\b": " are",
r"'s\b": " is",
r"'d\b": " would",
r"'ll\b": " will",
r"'t\b": " not",
r"'ve\b": " have",
r"'m\b": " am",
}
self.standardize_numbers = EnglishNumberNormalizer()
self.standardize_spellings = EnglishSpellingNormalizer(english_spelling_mapping)
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = re.sub(self.ignore_patterns, "", s)
s = re.sub(r"\s+'", "'", s) # standardize when there's a space before an apostrophe
for pattern, replacement in self.replacers.items():
s = re.sub(pattern, replacement, s)
s = re.sub(r"(\d),(\d)", r"\1\2", s) # remove commas between digits
s = re.sub(r"\.([^0-9]|$)", r" \1", s) # remove periods not followed by numbers
s = remove_symbols_and_diacritics(s, keep=".%$¢€£") # keep some symbols for numerics
s = self.standardize_numbers(s)
s = self.standardize_spellings(s)
# now remove prefix/suffix symbols that are not preceded/followed by numbers
s = re.sub(r"[.$¢€£]([^0-9])", r" \1", s)
s = re.sub(r"([^0-9])%", r"\1 ", s)
s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space
return s
| EnglishTextNormalizer |
python | kamyu104__LeetCode-Solutions | Python/find-the-duplicate-number.py | {
"start": 802,
"end": 1359
} | class ____(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left, right = 1, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
# Get count of num <= mid.
count = 0
for num in nums:
if num <= mid:
count += 1
if count > mid:
right = mid - 1
else:
left = mid + 1
return left
# Time: O(n)
# Space: O(n)
| Solution2 |
python | apache__airflow | airflow-core/tests/unit/core/test_stats.py | {
"start": 1690,
"end": 6433
} | class ____:
def setup_method(self):
self.statsd_client = Mock(spec=statsd.StatsClient)
self.stats = SafeStatsdLogger(self.statsd_client)
def test_increment_counter_with_valid_name(self):
self.stats.incr("test_stats_run")
self.statsd_client.incr.assert_called_once_with("test_stats_run", 1, 1)
def test_stat_name_must_be_a_string(self):
self.stats.incr([])
self.statsd_client.assert_not_called()
def test_stat_name_must_not_exceed_max_length(self):
self.stats.incr("X" * 300)
self.statsd_client.assert_not_called()
def test_stat_name_must_only_include_allowed_characters(self):
self.stats.incr("test/$tats")
self.statsd_client.assert_not_called()
def test_timer(self):
with self.stats.timer("empty_timer") as t:
pass
self.statsd_client.timer.assert_called_once_with("empty_timer")
assert isinstance(t.duration, float)
def test_empty_timer(self):
with self.stats.timer():
pass
self.statsd_client.timer.assert_not_called()
def test_timing(self):
self.stats.timing("empty_timer", 123)
self.statsd_client.timing.assert_called_once_with("empty_timer", 123)
def test_gauge(self):
self.stats.gauge("empty", 123)
self.statsd_client.gauge.assert_called_once_with("empty", 123, 1, False)
def test_decr(self):
self.stats.decr("empty")
self.statsd_client.decr.assert_called_once_with("empty", 1, 1)
def test_enabled_by_config(self):
"""Test that enabling this sets the right instance properties"""
with conf_vars({("metrics", "statsd_on"): "True"}):
importlib.reload(airflow.stats)
assert isinstance(airflow.stats.Stats.statsd, statsd.StatsClient)
assert not hasattr(airflow.stats.Stats, "dogstatsd")
# Avoid side-effects
importlib.reload(airflow.stats)
def test_load_custom_statsd_client(self):
with conf_vars(
{
("metrics", "statsd_on"): "True",
("metrics", "statsd_custom_client_path"): f"{__name__}.CustomStatsd",
}
):
importlib.reload(airflow.stats)
assert isinstance(airflow.stats.Stats.statsd, CustomStatsd)
# Avoid side-effects
importlib.reload(airflow.stats)
def test_load_invalid_custom_stats_client(self):
with conf_vars(
{
("metrics", "statsd_on"): "True",
("metrics", "statsd_custom_client_path"): f"{__name__}.InvalidCustomStatsd",
}
):
importlib.reload(airflow.stats)
error_message = re.escape(
"Your custom StatsD client must extend the statsd."
"StatsClient in order to ensure backwards compatibility."
)
with pytest.raises(AirflowConfigException, match=error_message):
airflow.stats.Stats.incr("empty_key")
importlib.reload(airflow.stats)
def test_load_allow_list_validator(self):
with conf_vars(
{
("metrics", "statsd_on"): "True",
("metrics", "metrics_allow_list"): "name1,name2",
}
):
importlib.reload(airflow.stats)
assert type(airflow.stats.Stats.metrics_validator) is PatternAllowListValidator
assert airflow.stats.Stats.metrics_validator.validate_list == ("name1", "name2")
# Avoid side-effects
importlib.reload(airflow.stats)
def test_load_block_list_validator(self):
with conf_vars(
{
("metrics", "statsd_on"): "True",
("metrics", "metrics_block_list"): "name1,name2",
}
):
importlib.reload(airflow.stats)
assert type(airflow.stats.Stats.metrics_validator) is PatternBlockListValidator
assert airflow.stats.Stats.metrics_validator.validate_list == ("name1", "name2")
# Avoid side-effects
importlib.reload(airflow.stats)
def test_load_allow_and_block_list_validator_loads_only_allow_list_validator(self):
with conf_vars(
{
("metrics", "statsd_on"): "True",
("metrics", "metrics_allow_list"): "name1,name2",
("metrics", "metrics_block_list"): "name1,name2",
}
):
importlib.reload(airflow.stats)
assert type(airflow.stats.Stats.metrics_validator) is PatternAllowListValidator
assert airflow.stats.Stats.metrics_validator.validate_list == ("name1", "name2")
# Avoid side-effects
importlib.reload(airflow.stats)
| TestStats |
python | numpy__numpy | numpy/random/tests/test_random.py | {
"start": 1879,
"end": 2403
} | class ____:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
| TestBinomial |
python | google__jax | jax/experimental/mosaic/gpu/layout_inference.py | {
"start": 2833,
"end": 4891
} | class ____:
"""A unique identifier for a variable.
This class describes a particular role of a Value, either as a result of an
operation, an operand of an operation, or a block argument.
"""
# A MLIR operation. If the type is `ARGUMENT`, this is the owner of the block
# and region_index is the region that contains the block with the argument.
# The block is always the first block of the region.
operation: ir.OpView
# Whether this represents an operand, a result, or an argument.
type: VariableType
# The index of the operand/result/argument within the op's
# operands/results/arguments.
index: int
# The index of the region that contains the block with the argument.
region_index: int | None = None
def __post_init__(self):
assert (self.type != VariableType.ARGUMENT) == (self.region_index is None)
@property
def value(self) -> ir.Value:
"""Returns the IR value corresponding to this value site."""
if self.type == VariableType.OPERAND:
return self.operation.operands[self.index]
elif self.type == VariableType.RESULT:
return self.operation.results[self.index]
else:
return self.operation.regions[self.region_index].blocks[0].arguments[self.index]
@property
def memory_space(self) -> MemorySpace:
"""Returns the memory space associated with this value."""
type = self.value.type
if ir.VectorType.isinstance(type):
return MemorySpace.REG
assert ir.MemRefType.isinstance(type)
if utils.is_tmem_ref(type):
return MemorySpace.TMEM
elif utils.is_smem_ref(type):
return MemorySpace.SMEM
raise ValueError(f"Unsupported memory space for: {type}")
def __str__(self):
match = _op_name_regex.match(str(self.operation))
assert match is not None
if self.type == VariableType.OPERAND:
return f"{match.group(0)}:o-{self.index}"
elif self.type == VariableType.RESULT:
return f"{match.group(0)}:r-{self.index}"
else:
return f"{match.group(0)}:a-{self.index}"
@dataclasses.dataclass(frozen=True)
| ValueSite |
python | huggingface__transformers | src/transformers/models/kosmos2/modeling_kosmos2.py | {
"start": 24731,
"end": 29933
} | class ____(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
past_key_values_length: int = 0,
position_ids: Optional[torch.Tensor] = None,
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
if position_ids is None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
if position_ids is None:
position_ids = self.create_position_ids_from_inputs_embeds(
inputs_embeds, past_key_values_length, self.padding_idx
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
@staticmethod
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
@staticmethod
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| Kosmos2TextSinusoidalPositionalEmbedding |
python | dagster-io__dagster | python_modules/libraries/dagster-airflow/dagster_airflow/hooks/dagster_hook.py | {
"start": 728,
"end": 9290
} | class ____(BaseHook):
conn_name_attr = "dagster_conn_id"
default_conn_name = "dagster_default"
conn_type = "dagster"
hook_name = "Dagster"
@staticmethod
def get_ui_field_behaviour() -> Mapping[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["port", "schema", "extra"],
"relabeling": {
"description": "Dagster Cloud Organization ID",
"host": "Dagster Cloud Deployment Name",
"login": "Dagster URL",
"password": "Dagster Cloud User Token",
},
"placeholders": {
"password": "",
"login": "https://dagster.cloud/",
"description": "",
"host": "prod",
},
}
def get_conn(self) -> None:
pass
def get_pandas_df(self, _sql) -> None:
pass
def get_records(self, _sql) -> None:
pass
def run(self, _sql) -> None:
pass
def __init__(
self,
dagster_conn_id: Optional[str] = "dagster_default",
organization_id: Optional[str] = None,
deployment_name: Optional[str] = None,
url: str = "",
user_token: Optional[str] = None,
) -> None:
if is_airflow_2_loaded_in_environment():
super().__init__()
else:
super().__init__(source=None)
self.url = url
self.user_token = user_token
self.organization_id = organization_id
self.deployment_name = deployment_name
if (deployment_name or organization_id) and dagster_conn_id:
raise AirflowException(
"Cannot set both dagster_conn_id and organization_id/deployment_name"
)
if dagster_conn_id is not None and is_airflow_2_loaded_in_environment():
conn = self.get_connection(dagster_conn_id)
base_url = conn.login if conn.login else "https://dagster.cloud/"
if base_url == "https://dagster.cloud/":
self.set_hook_for_cloud(conn)
else:
self.set_hook_for_oss(conn)
if self.user_token is None:
raise AirflowException(
"Cannot get user_token: No valid user_token or dagster_conn_id supplied."
)
if self.url == "":
raise AirflowException(
"Cannot get dagster url: No valid url or dagster_conn_id supplied."
)
def set_hook_for_cloud(self, conn: Connection):
self.organization_id = conn.description
self.deployment_name = conn.host
self.user_token = conn.get_password()
base_url = conn.login if conn.login else "https://dagster.cloud/"
if self.organization_id is None or self.deployment_name is None:
raise AirflowException(
"Dagster Cloud connection requires organization_id and deployment_name to be set"
)
self.url = f"{base_url}{self.organization_id}/{self.deployment_name}/graphql"
def set_hook_for_oss(self, conn: Connection):
self.url = cast("str", conn.login)
def launch_run(
self,
repository_name: str = "my_dagster_project",
repostitory_location_name: str = "example_location",
job_name: str = "all_assets_job",
run_config: Optional[Mapping[str, Any]] = None,
) -> str:
query = """
mutation LaunchJobExecution($executionParams: ExecutionParams!) {
launchPipelineExecution(executionParams: $executionParams) {
__typename
... on LaunchRunSuccess {
run {
id
__typename
}
__typename
}
... on PipelineNotFoundError {
message
__typename
}
... on InvalidSubsetError {
message
__typename
}
... on RunConfigValidationInvalid {
errors {
message
__typename
}
__typename
}
...PythonErrorFragment
}
}
fragment PythonErrorFragment on PythonError {
__typename
message
stack
causes {
message
stack
__typename
}
}
"""
variables = {
"executionParams": {
"runConfigData": json.dumps({} if run_config is None else run_config),
"selector": {
"repositoryName": repository_name,
"repositoryLocationName": repostitory_location_name,
"jobName": job_name,
},
"mode": "default",
"executionMetadata": {"tags": [{"key": "dagster/solid_selection", "value": "*"}]},
}
}
headers = {"Dagster-Cloud-Api-Token": self.user_token if self.user_token else ""}
response = requests.post(
url=self.url, json={"query": query, "variables": variables}, headers=headers
)
response.raise_for_status()
response_json = response.json()
if response_json["data"]["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess":
run = response_json["data"]["launchPipelineExecution"]["run"]
logging.info(f"Run {run['id']} launched successfully")
return run["id"]
else:
raise AirflowException(
"Error launching run:"
f" {response_json['data']['launchPipelineExecution']['message']}"
)
def wait_for_run(
self,
run_id: str,
) -> None:
query = """
query RunQuery($runId: ID!) {
runOrError(runId: $runId) {
__typename
...PythonErrorFragment
...NotFoundFragment
... on Run {
id
status
__typename
}
}
}
fragment NotFoundFragment on RunNotFoundError {
__typename
message
}
fragment PythonErrorFragment on PythonError {
__typename
message
stack
causes {
message
stack
__typename
}
}
"""
variables = {"runId": run_id}
headers = {"Dagster-Cloud-Api-Token": self.user_token if self.user_token else ""}
status = ""
while status not in [
DagsterRunStatus.SUCCESS.value,
DagsterRunStatus.FAILURE.value,
DagsterRunStatus.CANCELED.value,
]:
response = requests.post(
url=self.url, json={"query": query, "variables": variables}, headers=headers
)
response.raise_for_status()
response_json = response.json()
if response_json["data"]["runOrError"]["__typename"] == "Run":
status = response_json["data"]["runOrError"]["status"]
else:
raise AirflowException(
f"Error fetching run status: {response_json['data']['runOrError']['message']}"
)
if status == DagsterRunStatus.SUCCESS.value:
logging.info(f"Run {run_id} completed successfully")
elif status == DagsterRunStatus.FAILURE.value:
raise AirflowException(f"Run {run_id} failed")
elif status == DagsterRunStatus.CANCELED.value:
raise AirflowException(f"Run {run_id} was cancelled")
time.sleep(5)
def terminate_run(
self,
run_id: str,
):
query = """
mutation Terminate($runId: String!, $terminatePolicy: TerminateRunPolicy) {
terminatePipelineExecution(runId: $runId, terminatePolicy: $terminatePolicy) {
__typename
... on TerminateRunFailure {
message
__typename
}
... on RunNotFoundError {
message
__typename
}
... on TerminateRunSuccess {
run {
id
runId
canTerminate
__typename
}
__typename
}
...PythonErrorFragment
}
}
fragment PythonErrorFragment on PythonError {
__typename
message
stack
causes {
message
stack
__typename
}
}
"""
variables = {"runId": run_id, "terminatePolicy": "MARK_AS_CANCELED_IMMEDIATELY"}
headers = {"Dagster-Cloud-Api-Token": self.user_token if self.user_token else ""}
response = requests.post(
url=self.url, json={"query": query, "variables": variables}, headers=headers
)
response.raise_for_status()
response_json = response.json()
if (
response_json["data"]["terminatePipelineExecution"]["__typename"]
!= "TerminateRunSuccess"
):
raise AirflowException(
"Error terminating run:"
f" {response_json['data']['terminatePipelineExecution']['message']}"
)
| DagsterHook |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_itertools.py | {
"start": 101106,
"end": 101444
} | class ____:
'Test propagation of exceptions after two iterations'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i == 2:
raise ZeroDivisionError
v = self.seqn[self.i]
self.i += 1
return v
| E2 |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 13987,
"end": 16693
} | class ____:
def __init__(
self,
child_iter,
input_type_desc,
item_callback=None,
track_callback=None,
set_seed=True,
restrict_to_index=None
):
self.child_iter = enumerate(child_iter)
# Input type describes the things we're tracking (e.g. "sample input", "error input").
self.input_type_desc = input_type_desc
# NB: The two types of callbacks below exist because the thing we want to track isn't
# always the same as the thing we want returned from the iterator. An example of this
# is ErrorInput, which we want returned from the iterator, but which contains a
# SampleInput that we want to track.
# Item callback is run on each (iterated thing, index) to get the thing to return.
self.item_callback = item_callback
if self.item_callback is None:
self.item_callback = lambda x, i: x
# Track callback is run on each iterated thing to get the thing to track.
self.track_callback = track_callback
if self.track_callback is None:
self.track_callback = lambda x: x
self.test_fn = extract_test_fn()
# Indicates whether the random seed should be set before each call to the iterator
self.set_seed = set_seed
# Indicates that iteration should be restricted to only the provided index.
# If None, no restriction is done
self.restrict_to_index = restrict_to_index
def __iter__(self):
return self
def __next__(self):
while True:
if self.set_seed:
# use a test-name-specific hash for the seed if possible
seed = (
int.from_bytes(hashlib.sha256(
self.test_fn.__qualname__.encode("utf-8")).digest()[:4], 'little')
if self.test_fn is not None else SEED
)
set_rng_seed(seed)
# allow StopIteration to bubble up
input_idx, input_val = next(self.child_iter)
if (self.restrict_to_index is None) or (input_idx == self.restrict_to_index):
break
self._set_tracked_input(
TrackedInput(
index=input_idx, val=self.track_callback(input_val), type_desc=self.input_type_desc
)
)
return self.item_callback(input_val, input_idx)
def _set_tracked_input(self, tracked_input: TrackedInput):
if self.test_fn is None:
return
if not hasattr(self.test_fn, "tracked_input"):
return
self.test_fn.tracked_input = tracked_input # type: ignore[attr-defined]
| TrackedInputIter |
python | ray-project__ray | python/ray/serve/tests/test_config_files/test_dag/dir/subdir/a/add_and_sub.py | {
"start": 558,
"end": 889
} | class ____:
# Requires the test_module repo as a py_module:
# https://github.com/ray-project/test_module
def subtract(self, input: int) -> int:
from test_module.test import one
return input - one() # Returns input - 2
@serve.deployment(
ray_actor_options={
"num_cpus": 0.1,
}
)
| Subtract |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator4.py | {
"start": 755,
"end": 1401
} | class ____:
def __init__(self):
self.x = 1
def __iter__(self) -> Iterator[int]:
yield self.x
async def func1() -> SomeIterable:
return SomeIterable()
def func2() -> Iterator[int]:
yield 2
def g5() -> None:
val = (y for y in func2())
reveal_type(val, expected_text="Generator[int, None, None]")
async def g6() -> None:
val = (x + y for y in func2() for x in await func1())
reveal_type(val, expected_text="AsyncGenerator[int, None]")
async def g7() -> None:
val = (x + y for y in await func1() for x in func2())
reveal_type(val, expected_text="Generator[int, None, None]")
| SomeIterable |
python | pyinstaller__pyinstaller | PyInstaller/fake-modules/_pyi_rth_utils/_win32.py | {
"start": 1187,
"end": 1488
} | class ____(ctypes.Structure):
_fields_ = [
("TokenAppContainer", PSID),
]
PTOKEN_APPCONTAINER_INFORMATION = ctypes.POINTER(TOKEN_APPCONTAINER_INFORMATION)
# SECURITY_ATTRIBUTES structure for CreateDirectoryW
PSECURITY_DESCRIPTOR = ctypes.wintypes.LPVOID
| TOKEN_APPCONTAINER_INFORMATION |
python | scrapy__scrapy | tests/test_http2_client_protocol.py | {
"start": 1796,
"end": 1926
} | class ____(Spider):
name = "dummy"
start_urls: list = []
def parse(self, response):
print(response)
| DummySpider |
python | viewflow__viewflow | tests/fsm/test_fsm__advanced.py | {
"start": 250,
"end": 1328
} | class ____(object):
stage = State(ReviewState, default=ReviewState.NEW)
def __init__(self, text):
self.text = text
@stage.transition(source=ReviewState.NEW)
def notify(self):
pass
@stage.transition(
source={ReviewState.NEW, ReviewState.HIDDEN}, target=ReviewState.PUBLISHED
)
def publish(self):
pass
@stage.transition(source=ReviewState.NEW, target=ReviewState.PUBLISHED)
@stage.transition(
source=ReviewState.PUBLISHED, target=ReviewState.NEW, label=_("Return to new")
)
def toggle(self):
pass
toggle.label = _("Toggle publication state")
@stage.transition(source=ReviewState.PUBLISHED, target=ReviewState.REJECTED)
def trash(self):
if len(self.text) > 1000:
self.hide()
else:
self.remove()
@stage.transition(source=ReviewState.REJECTED, target=ReviewState.REMOVED)
def remove(self):
pass
@stage.transition(source=ReviewState.REJECTED, target=ReviewState.HIDDEN)
def hide(self):
pass
| Publication |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 153540,
"end": 156955
} | class ____(
AssertsCompiledSQL, _RangeTests, fixtures.TestBase
):
__dialect__ = "postgresql"
@property
def _col_str_arr(self):
return self._col_str
# operator tests
@classmethod
def setup_test_class(cls):
table = Table(
"data_table",
MetaData(),
Column("range", cls._col_type, primary_key=True),
)
cls.col = table.c.range
def _test_clause(self, colclause, expected, type_):
self.assert_compile(colclause, expected)
is_(colclause.type._type_affinity, type_._type_affinity)
_comparisons = [
(lambda col, other: col == other, "="),
(lambda col, other: col != other, "!="),
(lambda col, other: col > other, ">"),
(lambda col, other: col < other, "<"),
(lambda col, other: col >= other, ">="),
(lambda col, other: col <= other, "<="),
(lambda col, other: col.contains(other), "@>"),
(lambda col, other: col.contained_by(other), "<@"),
(lambda col, other: col.overlaps(other), "&&"),
(lambda col, other: col << other, "<<"),
(lambda col, other: col.strictly_left_of(other), "<<"),
(lambda col, other: col >> other, ">>"),
(lambda col, other: col.strictly_right_of(other), ">>"),
(lambda col, other: col.not_extend_left_of(other), "&>"),
(lambda col, other: col.not_extend_right_of(other), "&<"),
(lambda col, other: col.adjacent_to(other), "-|-"),
]
_operations = [
(lambda col, other: col + other, "+"),
(lambda col, other: col.union(other), "+"),
(lambda col, other: col - other, "-"),
(lambda col, other: col.difference(other), "-"),
(lambda col, other: col * other, "*"),
(lambda col, other: col.intersection(other), "*"),
]
_all_fns = _comparisons + _operations
_not_compare_op = ("+", "-", "*")
@testing.combinations(*_all_fns, id_="as")
def test_data_str(self, fn, op):
self._test_clause(
fn(self.col, self._data_str()),
f"data_table.range {op} %(range_1)s",
(
self.col.type
if op in self._not_compare_op
else sqltypes.BOOLEANTYPE
),
)
@testing.combinations(*_all_fns, id_="as")
def test_data_obj(self, fn, op):
self._test_clause(
fn(self.col, self._data_obj()),
f"data_table.range {op} %(range_1)s::{self._col_str}",
(
self.col.type
if op in self._not_compare_op
else sqltypes.BOOLEANTYPE
),
)
@testing.combinations(*_comparisons, id_="as")
def test_data_str_any(self, fn, op):
self._test_clause(
fn(self.col, any_(array([self._data_str()]))),
f"data_table.range {op} ANY (ARRAY[%(param_1)s])",
(
self.col.type
if op in self._not_compare_op
else sqltypes.BOOLEANTYPE
),
)
def test_where_is_null(self):
self._test_clause(
self.col == None, "data_table.range IS NULL", sqltypes.BOOLEANTYPE
)
def test_where_is_not_null(self):
self._test_clause(
self.col != None,
"data_table.range IS NOT NULL",
sqltypes.BOOLEANTYPE,
)
| _RangeTypeCompilation |
python | falconry__falcon | examples/asgilook/asgilook/config.py | {
"start": 61,
"end": 732
} | class ____:
DEFAULT_CONFIG_PATH = '/tmp/asgilook'
DEFAULT_MIN_THUMB_SIZE = 64
DEFAULT_REDIS_FROM_URL = redis.asyncio.from_url
DEFAULT_REDIS_HOST = 'redis://localhost'
DEFAULT_UUID_GENERATOR = uuid.uuid4
def __init__(self):
self.storage_path = pathlib.Path(
os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)
)
self.storage_path.mkdir(parents=True, exist_ok=True)
self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL
self.redis_host = self.DEFAULT_REDIS_HOST
self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
| Config |
python | huggingface__transformers | src/transformers/models/gemma2/modular_gemma2.py | {
"start": 25131,
"end": 25389
} | class ____(GemmaForTokenClassification):
pass
__all__ = [
"Gemma2Config",
"Gemma2ForCausalLM",
"Gemma2Model",
"Gemma2PreTrainedModel",
"Gemma2ForSequenceClassification",
"Gemma2ForTokenClassification",
]
| Gemma2ForTokenClassification |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/environments.py | {
"start": 22971,
"end": 36667
} | class ____(BaseBuildEnvironment):
"""
Docker build environment, uses docker to contain builds.
If :py:data:`settings.DOCKER_ENABLE` is true, build documentation inside a
docker container, instead of the host system, using this build environment
class. The build command creates a docker container from a pre-built image,
defined by :py:data:`settings.DOCKER_IMAGE`. This container is started with
a mount to the project's build path under ``user_builds`` on the host
machine, walling off project builds from reading/writing other projects'
data.
"""
command_class = DockerBuildCommand
container_image = DOCKER_IMAGE
@staticmethod
def _get_docker_exception_message(exc):
"""Return a human readable message from a Docker exception."""
# ``docker.errors.DockerException`` usually exposes ``explanation`` but
# some subclasses created when wrapping other libraries (``requests``,
# ``urllib3``) do not. Accessing it blindly raises ``AttributeError``.
# Fallback to ``str(exc)`` so we always have a useful message.
message = getattr(exc, "explanation", None)
if not message:
message = str(exc)
if not message:
message = repr(exc)
return message
def __init__(self, *args, **kwargs):
container_image = kwargs.pop("container_image", None)
super().__init__(*args, **kwargs)
self.client = None
self.container = None
self.container_name = self.get_container_name()
# Decide what Docker image to use, based on priorities:
# The image set by user or,
if self.config and self.config.docker_image:
self.container_image = self.config.docker_image
# the image overridden by the project (manually set by an admin).
if self.project.container_image:
self.container_image = self.project.container_image
# Override the ``container_image`` if we pass it via argument.
#
# FIXME: This is a temporal fix while we explore how to make
# ``ubuntu-20.04`` the default build image without breaking lot of
# builds. For now, we are passing
# ``container_image='readthedocs/build:ubuntu-20.04'`` for the setup
# VCS step.
if container_image:
self.container_image = container_image
self.container_mem_limit = self.project.container_mem_limit or settings.BUILD_MEMORY_LIMIT
self.container_time_limit = self.project.container_time_limit or settings.BUILD_TIME_LIMIT
structlog.contextvars.bind_contextvars(
project_slug=self.project.slug,
version_slug=self.version.slug,
)
# NOTE: as this environment is used for `sync_repository_task` it may
# not have a build associated
if self.build:
structlog.contextvars.bind_contextvars(
build_id=self.build.get("id"),
)
def __enter__(self):
"""Start of environment context."""
try:
# Test for existing container. We remove any stale containers that
# are no longer running here if there is a collision. We throw an
# exception
state = self.container_state()
if state is not None:
if state.get("Running") is True:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message=_(
"A build environment is currently running for this version",
),
)
log.warning(
"Removing stale container.",
container_id=self.container_id,
)
client = self.get_client()
client.remove_container(self.container_id)
except (DockerAPIError, ConnectionError) as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message=self._get_docker_exception_message(exc),
) from exc
# Create the checkout path if it doesn't exist to avoid Docker creation
if not os.path.exists(self.project.doc_path):
os.makedirs(self.project.doc_path)
try:
self.create_container()
except: # noqa
self.__exit__(*sys.exc_info())
raise
return self
def __exit__(self, exc_type, exc_value, tb):
"""End of environment context."""
client = self.get_client()
try:
client.kill(self.container_id)
except DockerNotFoundError:
log.info(
"Container does not exists, nothing to kill.",
container_id=self.container_id,
)
except DockerAPIError:
# Logging this as warning because it usually happens due memory
# limit or build timeout. In those cases, the container is not
# running and can't be killed
log.warning(
"Unable to kill container.",
container_id=self.container_id,
)
# Save the container's state before removing it to know what exception
# to raise in the next step (`update_build_from_container_state`)
state = self.container_state()
try:
log.info("Removing container.", container_id=self.container_id)
client.remove_container(self.container_id)
except DockerNotFoundError:
log.info(
"Container does not exists, nothing to remove.",
container_id=self.container_id,
)
# Catch direct failures from Docker API or with an HTTP request.
# These errors should not surface to the user.
except (DockerAPIError, ConnectionError, ReadTimeout):
log.exception("Couldn't remove container")
self.raise_container_error(state)
def get_container_name(self):
if self.build:
name = "build-{build}-project-{project_id}-{project_name}".format(
build=self.build.get("id"),
project_id=self.project.pk,
project_name=self.project.slug,
)
else:
# An uuid is added, so the container name is unique per sync.
uuid_ = uuid.uuid4().hex[:8]
name = f"sync-{uuid_}-project-{self.project.pk}-{self.project.slug}"
return slugify(name[:DOCKER_HOSTNAME_MAX_LEN])
def get_client(self):
"""Create Docker client connection."""
try:
if self.client is None:
self.client = APIClient(
base_url=DOCKER_SOCKET,
version=DOCKER_VERSION,
)
return self.client
except DockerException as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message=self._get_docker_exception_message(exc),
) from exc
def _get_binds(self):
"""
Return proper Docker Binds depending on settings.
It uses Docker Volume if running on a docker-compose. Otherwise, it
returns just a regular mountpoint path.
"""
if getattr(settings, "RTD_DOCKER_COMPOSE", False):
from pathlib import Path
binds = {
settings.RTD_DOCKER_COMPOSE_VOLUME: {
"bind": str(Path(settings.DOCROOT).parent),
"mode": "rw",
},
}
else:
binds = {
self.project.doc_path: {
"bind": self.project.doc_path,
"mode": "rw",
},
}
binds.update(settings.RTD_DOCKER_ADDITIONAL_BINDS)
return binds
def get_container_host_config(self):
"""
Create the ``host_config`` settings for the container.
It mainly generates the proper path bindings between the Docker
container and the Host by mounting them with the proper permissions.
The object returned is passed to Docker function
``client.create_container``.
"""
return self.get_client().create_host_config(
binds=self._get_binds(),
mem_limit=self.container_mem_limit,
)
@property
def container_id(self):
"""Return id of container if it is valid."""
if self.container_name:
return self.container_name
if self.container:
return self.container.get("Id")
def container_state(self):
"""Get container state."""
client = self.get_client()
try:
info = client.inspect_container(self.container_id)
return info.get("State", {})
except DockerAPIError:
return None
def raise_container_error(self, state):
"""
Raise an exception based on the container's state.
In the case of the parent command exiting before the exec commands
finish, or in the case of OOM on the container, raise a
`BuildUserError` with an error message explaining the failure.
Otherwise, raise a `BuildAppError`.
"""
if state is not None and state.get("Running") is False:
if state.get("ExitCode") == DOCKER_TIMEOUT_EXIT_CODE:
raise BuildUserError(message_id=BuildUserError.BUILD_TIME_OUT)
if state.get("OOMKilled", False):
raise BuildUserError(message_id=BuildUserError.BUILD_EXCESSIVE_MEMORY)
if state.get("Error"):
raise BuildAppError(
message_id=BuildAppError.BUILD_DOCKER_UNKNOWN_ERROR,
format_values={
"message": state.get("Error"),
},
)
def create_container(self):
"""Create docker container."""
client = self.get_client()
try:
log.info(
"Creating Docker container.",
container_image=self.container_image,
container_id=self.container_id,
container_time_limit=self.container_time_limit,
container_mem_limit=self.container_mem_limit,
)
networking_config = None
if settings.RTD_DOCKER_COMPOSE:
# Create the container in the same network the web container is
# running, so we can hit its healthcheck API.
networking_config = client.create_networking_config(
{
settings.RTD_DOCKER_COMPOSE_NETWORK: client.create_endpoint_config(),
}
)
self.container = client.create_container(
image=self.container_image,
command=(
'/bin/sh -c "sleep {time}; exit {exit}"'.format(
time=self.container_time_limit,
exit=DOCKER_TIMEOUT_EXIT_CODE,
)
),
name=self.container_id,
hostname=self.container_id,
host_config=self.get_container_host_config(),
detach=True,
user=settings.RTD_DOCKER_USER,
runtime="runsc", # gVisor runtime
networking_config=networking_config,
)
client.start(container=self.container_id)
# NOTE: as this environment is used for `sync_repository_task` it may
# not have a build associated. We skip running a healthcheck on those cases.
if self.project.has_feature(Feature.BUILD_HEALTHCHECK) and self.build:
self._run_background_healthcheck()
except (DockerAPIError, ConnectionError) as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message=self._get_docker_exception_message(exc),
) from exc
def _run_background_healthcheck(self):
"""
Run a cURL command in the background to ping the healthcheck API.
The API saves the last ping timestamp on each call. Then a periodic Celery task
checks this value for all the running builds and decide if the build is stalled or not.
If it's stalled, it terminates those builds and mark them as fail.
"""
log.debug("Running build with healthcheck.")
build_id = self.build.get("id")
build_builder = self.build.get("builder")
healthcheck_url = reverse("build-healthcheck", kwargs={"pk": build_id})
url = f"{settings.SLUMBER_API_HOST}{healthcheck_url}?builder={build_builder}"
# We use --insecure because we are hitting the internal load balancer here that doesn't have a SSL certificate
# The -H "Host: " header is required because of internal load balancer URL
cmd = f"/bin/bash -c 'while true; do curl --insecure --max-time 2 -H \"Host: {settings.PRODUCTION_DOMAIN}\" -X POST {url}; sleep {settings.RTD_BUILD_HEALTHCHECK_DELAY}; done;'"
log.info("Healthcheck command to run.", command=cmd)
client = self.get_client()
exec_cmd = client.exec_create(
container=self.container_id,
cmd=cmd,
user=settings.RTD_DOCKER_USER,
stdout=True,
stderr=True,
)
# `detach=True` allows us to run this command in the background
client.exec_start(exec_id=exec_cmd["Id"], stream=False, detach=True)
| DockerBuildEnvironment |
python | Lightning-AI__lightning | src/lightning/fabric/plugins/precision/double.py | {
"start": 997,
"end": 1963
} | class ____(Precision):
"""Plugin for training with double (``torch.float64``) precision."""
precision: Literal["64-true"] = "64-true"
@override
def convert_module(self, module: Module) -> Module:
return module.double()
@override
def tensor_init_context(self) -> AbstractContextManager:
return _DtypeContextManager(torch.double)
@override
def module_init_context(self) -> AbstractContextManager:
return self.tensor_init_context()
@override
def forward_context(self) -> AbstractContextManager:
return self.tensor_init_context()
@override
def convert_input(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.double)
@override
def convert_output(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.get_default_dtype())
| DoublePrecision |
python | MongoEngine__mongoengine | tests/fields/test_geo_fields.py | {
"start": 85,
"end": 16440
} | class ____(MongoDBTestCase):
def _test_for_expected_error(self, Cls, loc, expected):
try:
Cls(loc=loc).validate()
self.fail(f"Should not validate the location {loc}")
except ValidationError as e:
assert expected == e.to_dict()["loc"]
def test_geopoint_validation(self):
class Location(Document):
loc = GeoPointField()
invalid_coords = [{"x": 1, "y": 2}, 5, "a"]
expected = "GeoPointField can only accept tuples or lists of (x, y)"
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [21, 4, "a"]
for coord in invalid_coords:
expected = "GeoPointField can only accept tuples or lists of (x, y)"
self._test_for_expected_error(Location, coord, expected)
def test_point_validation(self):
class Location(Document):
loc = PointField()
invalid_coords = {"x": 1, "y": 2}
expected = (
"PointField can only accept a valid GeoJson dictionary or lists of (x, y)"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": []}
expected = 'PointField type must be "Point"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Point", "coordinates": [1, 2, 3]}
expected = "Value ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "PointField can only accept lists of [x, y]"
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
Location(loc=[1, 2]).validate()
Location(
loc={"type": "Point", "coordinates": [81.4471435546875, 23.61432859499169]}
).validate()
def test_linestring_validation(self):
class Location(Document):
loc = LineStringField()
invalid_coords = {"x": 1, "y": 2}
expected = "LineStringField can only accept a valid GeoJson dictionary or lists of (x, y)"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'LineStringField type must be "LineString"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "LineString", "coordinates": [[1, 2, 3]]}
expected = (
"Invalid LineString:\nValue ([1, 2, 3]) must be a two-dimensional point"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "Invalid LineString must contain at least one valid point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1]]
expected = (
"Invalid LineString:\nValue (%s) must be a two-dimensional point"
% repr(invalid_coords[0])
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1, 2, 3]]
expected = (
"Invalid LineString:\nValue (%s) must be a two-dimensional point"
% repr(invalid_coords[0])
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
for coord in invalid_coords:
expected = (
"Invalid LineString:\nBoth values (%s) in point must be float or int"
% repr(coord[0])
)
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[1, 2], [3, 4], [5, 6], [1, 2]]).validate()
def test_polygon_validation(self):
class Location(Document):
loc = PolygonField()
invalid_coords = {"x": 1, "y": 2}
expected = (
"PolygonField can only accept a valid GeoJson dictionary or lists of (x, y)"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'PolygonField type must be "Polygon"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Polygon", "coordinates": [[[1, 2, 3]]]}
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[5, "a"]]]
expected = (
"Invalid Polygon:\nBoth values ([5, 'a']) in point must be float or int"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[]]]
expected = "Invalid Polygon must contain at least one valid linestring"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2, 3]]]
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
expected = "Invalid Polygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2], [3, 4]]]
expected = "Invalid Polygon:\nLineStrings must start and end at the same point"
self._test_for_expected_error(Location, invalid_coords, expected)
Location(loc=[[[1, 2], [3, 4], [5, 6], [1, 2]]]).validate()
def test_multipoint_validation(self):
class Location(Document):
loc = MultiPointField()
invalid_coords = {"x": 1, "y": 2}
expected = "MultiPointField can only accept a valid GeoJson dictionary or lists of (x, y)"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiPointField type must be "MultiPoint"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiPoint", "coordinates": [[1, 2, 3]]}
expected = "Value ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[]]
expected = "Invalid MultiPoint must contain at least one valid point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1]], [[1, 2, 3]]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[1, 2]]).validate()
Location(
loc={
"type": "MultiPoint",
"coordinates": [[1, 2], [81.4471435546875, 23.61432859499169]],
}
).validate()
def test_multilinestring_validation(self):
class Location(Document):
loc = MultiLineStringField()
invalid_coords = {"x": 1, "y": 2}
expected = "MultiLineStringField can only accept a valid GeoJson dictionary or lists of (x, y)"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiLineStringField type must be "MultiLineString"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiLineString", "coordinates": [[[1, 2, 3]]]}
expected = "Invalid MultiLineString:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "Invalid MultiLineString must contain at least one valid linestring"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1]]]
expected = (
"Invalid MultiLineString:\nValue (%s) must be a two-dimensional point"
% repr(invalid_coords[0][0])
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2, 3]]]
expected = (
"Invalid MultiLineString:\nValue (%s) must be a two-dimensional point"
% repr(invalid_coords[0][0])
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[{}, {}]]], [[("a", "b")]]]
for coord in invalid_coords:
expected = (
"Invalid MultiLineString:\nBoth values (%s) in point must be float or int"
% repr(coord[0][0])
)
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[[1, 2], [3, 4], [5, 6], [1, 2]]]).validate()
def test_multipolygon_validation(self):
class Location(Document):
loc = MultiPolygonField()
invalid_coords = {"x": 1, "y": 2}
expected = "MultiPolygonField can only accept a valid GeoJson dictionary or lists of (x, y)"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiPolygonField type must be "MultiPolygon"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiPolygon", "coordinates": [[[[1, 2, 3]]]]}
expected = (
"Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[5, "a"]]]]
expected = "Invalid MultiPolygon:\nBoth values ([5, 'a']) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[]]]]
expected = "Invalid MultiPolygon must contain at least one valid Polygon"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[1, 2, 3]]]]
expected = (
"Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
)
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[{}, {}]]], [[("a", "b")]]]
expected = "Invalid MultiPolygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[1, 2], [3, 4]]]]
expected = (
"Invalid MultiPolygon:\nLineStrings must start and end at the same point"
)
self._test_for_expected_error(Location, invalid_coords, expected)
Location(loc=[[[[1, 2], [3, 4], [5, 6], [1, 2]]]]).validate()
def test_indexes_geopoint(self):
"""Ensure that indexes are created automatically for GeoPointFields."""
class Event(Document):
title = StringField()
location = GeoPointField()
geo_indicies = Event._geo_indices()
assert geo_indicies == [{"fields": [("location", "2d")]}]
def test_geopoint_embedded_indexes(self):
"""Ensure that indexes are created automatically for GeoPointFields on
embedded documents.
"""
class Venue(EmbeddedDocument):
location = GeoPointField()
name = StringField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
assert geo_indicies == [{"fields": [("venue.location", "2d")]}]
def test_indexes_2dsphere(self):
"""Ensure that indexes are created automatically for GeoPointFields."""
class Event(Document):
title = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
geo_indicies = Event._geo_indices()
assert {"fields": [("line", "2dsphere")]} in geo_indicies
assert {"fields": [("polygon", "2dsphere")]} in geo_indicies
assert {"fields": [("point", "2dsphere")]} in geo_indicies
def test_indexes_2dsphere_embedded(self):
"""Ensure that indexes are created automatically for GeoPointFields."""
class Venue(EmbeddedDocument):
name = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
assert {"fields": [("venue.line", "2dsphere")]} in geo_indicies
assert {"fields": [("venue.polygon", "2dsphere")]} in geo_indicies
assert {"fields": [("venue.point", "2dsphere")]} in geo_indicies
def test_geo_indexes_recursion(self):
class Location(Document):
name = StringField()
location = GeoPointField()
class Parent(Document):
name = StringField()
location = ReferenceField(Location)
Location.drop_collection()
Parent.drop_collection()
Parent(name="Berlin").save()
info = Parent._get_collection().index_information()
assert "location_2d" not in info
info = Location._get_collection().index_information()
assert "location_2d" in info
assert len(Parent._geo_indices()) == 0
assert len(Location._geo_indices()) == 1
def test_geo_indexes_auto_index(self):
# Test just listing the fields
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {"indexes": [[("location", "2dsphere"), ("datetime", 1)]]}
assert Log._geo_indices() == []
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
assert info["location_2dsphere_datetime_1"]["key"] == [
("location", "2dsphere"),
("datetime", 1),
]
# Test listing explicitly
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {
"indexes": [{"fields": [("location", "2dsphere"), ("datetime", 1)]}]
}
assert Log._geo_indices() == []
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
assert info["location_2dsphere_datetime_1"]["key"] == [
("location", "2dsphere"),
("datetime", 1),
]
if __name__ == "__main__":
unittest.main()
| TestGeoField |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/transformer.py | {
"start": 4193,
"end": 5163
} | class ____(object):
"""Syntactic sugar for accessing an instance of a StateStack context manager.
This structure offers syntactic sugar over a dict of stacks of objects
of known type. These structures are useful to keep state during AST walks.
Multiple different scopes can be tracked in parallel. For example:
s = _State()
s[foo].enter()
s[bar].enter() # this will not affect s[foo]
Element access has special semantics:
* keys are a data type
* element values are _StateStack(type=key) objects
* missing elements are automatically added, similarly to defaultdict
For example, the following block :
_State s
s[Foo]
Is equivalent to:
s = {}
if Foo not in s:
s[Foo] = Foo()
s[Foo]
See Base for how it's used.
"""
def __init__(self):
self._value = {}
def __getitem__(self, key):
if key not in self._value:
self._value[key] = _StateStack(key)
return self._value[key]
| _State |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_detector.py | {
"start": 1854,
"end": 2760
} | class ____(BaseDetectorHandlerTest):
def setUp(self) -> None:
super().setUp()
self.detector = self.create_detector(
type=self.handler_type.slug,
workflow_condition_group=self.create_data_condition_group(),
)
cache.clear()
def test_no_caching(self) -> None:
# Refetch without `.select_related` to make sure that the object isn't cached
self.detector = Detector.objects.get(id=self.detector.id)
with self.assertNumQueries(1):
self.detector.detector_handler
def test_caching(self) -> None:
# Refetch with `.select_related` to make sure that the object iscached
self.detector = Detector.objects.select_related("workflow_condition_group").get(
id=self.detector.id
)
with self.assertNumQueries(0):
self.detector.detector_handler
@freeze_time()
| TestInit |
python | gevent__gevent | src/greentest/3.12/test_ssl.py | {
"start": 105011,
"end": 114714
} | class ____(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
def supports_kx_alias(ctx, aliases):
for cipher in ctx.get_ciphers():
for alias in aliases:
if f"Kx={alias}" in cipher['description']:
return True
return False
| AsyncoreEchoServer |
python | apache__airflow | airflow-core/src/airflow/task/trigger_rule.py | {
"start": 847,
"end": 1749
} | class ____(str, Enum):
"""Class with task's trigger rules."""
ALL_SUCCESS = "all_success"
ALL_FAILED = "all_failed"
ALL_DONE = "all_done"
ALL_DONE_MIN_ONE_SUCCESS = "all_done_min_one_success"
ALL_DONE_SETUP_SUCCESS = "all_done_setup_success"
ONE_SUCCESS = "one_success"
ONE_FAILED = "one_failed"
ONE_DONE = "one_done"
NONE_FAILED = "none_failed"
NONE_SKIPPED = "none_skipped"
ALWAYS = "always"
NONE_FAILED_MIN_ONE_SUCCESS = "none_failed_min_one_success"
ALL_SKIPPED = "all_skipped"
@classmethod
def is_valid(cls, trigger_rule: str) -> bool:
"""Validate a trigger rule."""
return trigger_rule in cls.all_triggers()
@classmethod
def all_triggers(cls) -> set[str]:
"""Return all trigger rules."""
return set(cls.__members__.values())
def __str__(self) -> str:
return self.value
| TriggerRule |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_legacy_slugs.py | {
"start": 3080,
"end": 3743
} | class ____(util.MdCase):
"""Test encoded GitHub Flavored Markdown style slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.gfm_encoded
}
}
def test_slug(self):
"""Test the slug output."""
with pytest.warns(DeprecationWarning):
self.check_markdown(
r'# Testing GFM unicode-slugs_headers ±♠Ωℑ with encoding',
r'<h1 id="testing-gfm-unicode-slugs_headers-%CE%A9%E2%84%91-with-encoding">'
r'Testing GFM unicode-slugs_headers ±♠Ωℑ with encoding</h1>'
)
| TestGFMEncoded |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 472478,
"end": 472966
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ApproveVerifiableDomain"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "domain")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
domain = sgqlc.types.Field("VerifiableDomain", graphql_name="domain")
"""The verifiable domain that was approved."""
| ApproveVerifiableDomainPayload |
python | vyperlang__vyper | vyper/utils.py | {
"start": 357,
"end": 3414
} | class ____(Generic[_T]):
"""
a minimal "ordered set" class. this is needed in some places
because, while dict guarantees you can recover insertion order
vanilla sets do not.
no attempt is made to fully implement the set API, will add
functionality as needed.
"""
def __init__(self, iterable=None):
if iterable is None:
self._data = dict()
else:
self._data = dict.fromkeys(iterable)
def __repr__(self):
keys = ", ".join(repr(k) for k in self)
return f"{{{keys}}}"
def __iter__(self):
return iter(self._data)
def __reversed__(self):
return reversed(self._data)
def __contains__(self, item):
return self._data.__contains__(item)
def __len__(self):
return len(self._data)
def first(self):
return next(iter(self))
def last(self):
return next(reversed(self))
def pop(self):
return self._data.popitem()[0]
def add(self, item: _T) -> None:
self._data[item] = None
# NOTE to refactor: duplicate of self.update()
def addmany(self, iterable):
for item in iterable:
self._data[item] = None
def remove(self, item: _T) -> None:
del self._data[item]
def discard(self, item: _T):
# friendly version of remove
self._data.pop(item, None)
# consider renaming to "discardmany"
def dropmany(self, iterable):
for item in iterable:
self._data.pop(item, None)
def clear(self):
self._data.clear()
def difference(self, other):
ret = self.copy()
ret.dropmany(other)
return ret
def update(self, other):
# CMC 2024-03-22 for some reason, this is faster than dict.update?
# (maybe size dependent)
for item in other:
self._data[item] = None
def union(self, other):
return self | other
# set dunders
def __ior__(self, other):
self.update(other)
return self
def __or__(self, other):
ret = self.copy()
ret.update(other)
return ret
def __eq__(self, other):
return self._data == other._data
def __isub__(self, other):
self.dropmany(other)
return self
def __sub__(self, other):
ret = self.copy()
ret.dropmany(other)
return ret
def copy(self):
cls = self.__class__
ret = cls.__new__(cls)
ret._data = self._data.copy()
return ret
@classmethod
def intersection(cls, *sets):
if len(sets) == 0:
raise ValueError("undefined: intersection of no sets")
tmp = sets[0]._data.keys()
for s in sets[1:]:
tmp &= s._data.keys()
return cls(tmp)
def uniq(seq: Iterable[_T]) -> Iterator[_T]:
"""
Yield unique items in ``seq`` in original sequence order.
"""
seen: Set[_T] = set()
for x in seq:
if x in seen:
continue
seen.add(x)
yield x
| OrderedSet |
python | gevent__gevent | src/gevent/_config.py | {
"start": 8554,
"end": 8738
} | class ____(object):
def validate(self, value):
if value is not None and value <= 0:
raise ValueError("Must be positive")
return value
| _PositiveValueMixin |
python | kamyu104__LeetCode-Solutions | Python/make-array-non-decreasing.py | {
"start": 38,
"end": 327
} | class ____(object):
def maximumPossibleSize(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = prev = 0
for x in nums:
if prev <= x:
prev = x
result += 1
return result
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 209060,
"end": 209267
} | class ____(AnyMark):
"""CompositeMark schema wrapper."""
_schema = {"$ref": "#/definitions/CompositeMark"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| CompositeMark |
python | great-expectations__great_expectations | tests/datasource/fluent/test_sql_datasources.py | {
"start": 14115,
"end": 18569
} | class ____:
@pytest.mark.parametrize("schema_name", ["my_schema", "MY_SCHEMA", "My_Schema"])
def test_unquoted_schema_names_are_added_as_lowercase(
self,
sql_datasource_table_asset_test_connection_noop: SQLDatasource,
schema_name: str,
):
my_datasource: SQLDatasource = sql_datasource_table_asset_test_connection_noop
table_asset = my_datasource.add_table_asset(
name="my_table_asset",
table_name="my_table",
schema_name=schema_name,
)
assert table_asset.schema_name == schema_name.lower()
@pytest.mark.parametrize("table_name", ["my_table", "MY_TABLE", "My_Table"])
def test_unquoted_table_names_are_unquoted(
self,
sql_datasource_table_asset_test_connection_noop: SQLDatasource,
table_name: str,
):
my_datasource: SQLDatasource = sql_datasource_table_asset_test_connection_noop
table_asset = my_datasource.add_table_asset(
name="my_table_asset",
table_name=table_name,
schema_name="my_schema",
)
assert isinstance(table_asset.table_name, sqlalchemy.quoted_name)
assert table_asset.table_name == table_name
assert not table_asset.table_name.quote
@pytest.mark.parametrize(
"schema_name",
[
'"my_schema"',
'"MY_SCHEMA"',
'"My_Schema"',
"'my_schema'",
"'MY_SCHEMA'",
"'My_Schema'",
"`My_Schema`",
"[My_Schema]",
],
)
def test_quoted_schema_names_are_not_modified(
self,
sql_datasource_table_asset_test_connection_noop: SQLDatasource,
schema_name: str,
):
my_datasource: SQLDatasource = sql_datasource_table_asset_test_connection_noop
table_asset = my_datasource.add_table_asset(
name="my_table_asset",
table_name="my_table",
schema_name=schema_name,
)
assert table_asset.schema_name == schema_name
@pytest.mark.parametrize(
"table_name",
[
'"my_table"',
'"MY_TABLE"',
'"My_Table"',
"'my_table'",
"'MY_TABLE'",
"'My_Table'",
"`my_table`",
"[my_table]",
],
)
def test_quoted_table_names_are_quoted(
self,
sql_datasource_table_asset_test_connection_noop: SQLDatasource,
table_name: str,
):
my_datasource: SQLDatasource = sql_datasource_table_asset_test_connection_noop
table_asset = my_datasource.add_table_asset(
name="my_table_asset",
table_name=table_name,
schema_name="my_schema",
)
assert isinstance(table_asset.table_name, sqlalchemy.quoted_name)
assert table_asset.table_name == table_name[1:-1]
assert table_asset.table_name.quote
@pytest.mark.parametrize(
"table_name,serialized_name",
[
pytest.param('"my_table"', '"my_table"'),
pytest.param('"MY_TABLE"', '"MY_TABLE"'),
pytest.param('"My_Table"', '"My_Table"'),
pytest.param("'my_table'", "'my_table'"),
pytest.param("'MY_TABLE'", "'MY_TABLE'"),
pytest.param("'My_Table'", "'My_Table'"),
pytest.param("[My_Table]", "[My_Table]"),
pytest.param("`My_Table`", "`My_Table`"),
pytest.param("my_table", "my_table"),
pytest.param("MY_TABLE", "MY_TABLE"),
pytest.param("My_Table", "My_Table"),
],
)
def test_table_name_serialization_preserves_quotes(
self,
table_name: str,
serialized_name: str,
):
table_asset = TableAsset(name="my_table_asset", table_name=table_name)
with mock.patch(
"great_expectations.datasource.fluent.sql_datasource.TableAsset.datasource",
new_callable=mock.PropertyMock,
return_value=SQLDatasource(
name="my_snowflake_datasource",
connection_string="snowflake://<user_login_name>:<password>@<account_identifier>/<database_name>/<schema_name>?warehouse=<warehouse_name>&role=<role_name>",
),
):
serialized = table_asset.dict()
assert serialized["table_name"] == serialized_name
if __name__ == "__main__":
pytest.main([__file__, "-vv"])
| TestTableAsset |
python | getsentry__sentry | src/sentry/integrations/models/integration.py | {
"start": 1310,
"end": 6407
} | class ____(DefaultFieldsModelExisting):
"""
An integration tied to a particular instance of a third-party provider (a single Slack
workspace, a single GH org, etc.), which can be shared by multiple Sentry orgs.
"""
__relocation_scope__ = RelocationScope.Global
provider = models.CharField(max_length=64)
external_id = models.CharField(max_length=64)
name = models.CharField(max_length=200)
# metadata might be used to store things like credentials, but it should NOT
# be used to store organization-specific information, as an Integration
# instance can be shared by multiple organizations
metadata = models.JSONField(default=dict)
status = BoundedPositiveIntegerField(
default=ObjectStatus.ACTIVE, choices=ObjectStatus.as_choices(), null=True
)
class Meta:
app_label = "sentry"
db_table = "sentry_integration"
unique_together = (("provider", "external_id"),)
def get_provider(self) -> IntegrationProvider:
from .utils import get_provider
return get_provider(instance=self)
def get_installation(self, organization_id: int, **kwargs: Any) -> IntegrationInstallation:
from .utils import get_installation
return get_installation(instance=self, organization_id=organization_id, **kwargs)
def has_feature(self, feature: IntegrationFeatures) -> bool:
from .utils import has_feature
return has_feature(instance=self, feature=feature)
def delete(self, *args, **kwds):
with outbox_context(
transaction.atomic(using=router.db_for_write(OrganizationIntegration)), flush=False
):
for outbox in Integration.outboxes_for_update(self.id):
outbox.save()
for organization_integration in self.organizationintegration_set.all():
organization_integration.delete()
return super().delete(*args, **kwds)
@staticmethod
def outboxes_for_update(identifier: int) -> list[ControlOutbox]:
org_ids = OrganizationIntegration.objects.filter(integration_id=identifier).values_list(
"organization_id", flat=True
)
return [
ControlOutbox(
shard_scope=OutboxScope.INTEGRATION_SCOPE,
shard_identifier=identifier,
object_identifier=identifier,
category=OutboxCategory.INTEGRATION_UPDATE,
region_name=region_name,
)
for region_name in find_regions_for_orgs(org_ids)
]
def add_organization(
self,
organization_id: int | Organization | RpcOrganization,
user: User | RpcUser | None = None,
default_auth_id: int | None = None,
) -> OrganizationIntegration | None:
"""
Add an organization to this integration.
Returns None if the OrganizationIntegration was not created
"""
from sentry.integrations.models.organization_integration import OrganizationIntegration
if not isinstance(organization_id, int):
organization_id = organization_id.id
try:
with transaction.atomic(using=router.db_for_write(OrganizationIntegration)):
org_integration, created = OrganizationIntegration.objects.get_or_create(
organization_id=organization_id,
integration_id=self.id,
defaults={"default_auth_id": default_auth_id, "config": {}},
)
# TODO(Steve): add audit log if created
if not created and default_auth_id:
org_integration.update(default_auth_id=default_auth_id)
if created:
organization_service.schedule_signal(
integration_added,
organization_id=organization_id,
args=dict(integration_id=self.id, user_id=user.id if user else None),
)
return org_integration
except IntegrityError:
logger.info(
"add-organization-integrity-error",
extra={
"organization_id": organization_id,
"integration_id": self.id,
"default_auth_id": default_auth_id,
},
)
return None
def disable(self):
"""
Disable this integration
"""
self.update(status=ObjectStatus.DISABLED)
self.save()
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "external_id"))
sanitizer.set_json(json, SanitizableField(model_name, "metadata"), {})
sanitizer.set_string(json, SanitizableField(model_name, "provider"))
| Integration |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/router_query_engine.py | {
"start": 12408,
"end": 15552
} | class ____(BaseQueryEngine):
"""
Tool Retriever router query engine.
Selects a set of candidate query engines to execute a query.
Args:
retriever (ObjectRetriever): A retriever that retrieves a set of
query engine tools.
summarizer (Optional[TreeSummarize]): Tree summarizer to summarize sub-results.
"""
def __init__(
self,
retriever: ObjectRetriever[QueryEngineTool],
llm: Optional[LLM] = None,
summarizer: Optional[TreeSummarize] = None,
) -> None:
llm = llm or Settings.llm
self._summarizer = summarizer or TreeSummarize(
llm=llm,
summary_template=DEFAULT_TREE_SUMMARIZE_PROMPT_SEL,
)
self._retriever = retriever
super().__init__(Settings.callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
# NOTE: don't include tools for now
return {"summarizer": self._summarizer}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
query_engine_tools = self._retriever.retrieve(query_bundle)
responses = []
for query_engine_tool in query_engine_tools:
query_engine = query_engine_tool.query_engine
responses.append(query_engine.query(query_bundle))
if len(responses) > 1:
final_response = combine_responses(
self._summarizer, responses, query_bundle
)
else:
final_response = responses[0]
# add selected result
final_response.metadata = final_response.metadata or {}
final_response.metadata["retrieved_tools"] = query_engine_tools
query_event.on_end(payload={EventPayload.RESPONSE: final_response})
return final_response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
query_engine_tools = self._retriever.retrieve(query_bundle)
tasks = []
for query_engine_tool in query_engine_tools:
query_engine = query_engine_tool.query_engine
tasks.append(query_engine.aquery(query_bundle))
responses = run_async_tasks(tasks)
if len(responses) > 1:
final_response = await acombine_responses(
self._summarizer, responses, query_bundle
)
else:
final_response = responses[0]
# add selected result
final_response.metadata = final_response.metadata or {}
final_response.metadata["retrieved_tools"] = query_engine_tools
query_event.on_end(payload={EventPayload.RESPONSE: final_response})
return final_response
| ToolRetrieverRouterQueryEngine |
python | getsentry__sentry | tests/sentry/utils/test_function_cache.py | {
"start": 331,
"end": 736
} | class ____(models.Model):
__relocation_scope__ = RelocationScope.Excluded
some_field = models.TextField()
class Meta:
app_label = "fixtures"
def count_func(text_search: str):
return CacheModel.objects.filter(some_field=text_search).count()
def simple_func(val: str):
return val + "_yay"
def arg_extractor(instance: CacheModel):
return (instance.some_field,)
| CacheModel |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 89797,
"end": 90111
} | class ____:
xlColumnField = 2 # from enum XlPivotFieldOrientation
xlDataField = 4 # from enum XlPivotFieldOrientation
xlHidden = 0 # from enum XlPivotFieldOrientation
xlPageField = 3 # from enum XlPivotFieldOrientation
xlRowField = 1 # from enum XlPivotFieldOrientation
| PivotFieldOrientation |
python | pytorch__pytorch | test/inductor/test_perf.py | {
"start": 9796,
"end": 18252
} | class ____(TestCase):
"""
Tests that things can be fused into a single kernel
"""
def test_horizontal_reduction_pointwise(self):
def f(a):
b = a.sum(dim=1)
c = a.cos()
return b, c
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """210""")
def test_horizontal_reduction_reduction(self):
def f(a):
b = a.sum(dim=1)
c = a.amax(dim=1)
return b, c
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """120""")
def test_horizontal_reduction_pointwise2(self):
def f(a, b):
c = a.sum(dim=1)
b = b.cos()
return b + c
inp = (T(10, 10), T(10))
self.assertExpectedInline(count_numel(f, *inp), """120""")
def test_horizontal_reduction_outer_pointwise(self):
def f(a, b):
c = a.sum(dim=0)
b = b.cos()
return b + c
inp = (T(10, 10), T(10))
self.assertExpectedInline(count_numel(f, *inp), """120""")
def test_horizontal_sum_pw_broadcast(self):
def f(a, b):
a = a.sum(dim=1, keepdim=True)
b = b.cos()
return a * b
inp = (T(10, 10), T(10))
self.assertExpectedInline(count_numel(f, *inp), """210""")
def test_vertical_sum_pw(self):
def f(a):
a = a.cos()
a = a.sum(dim=1)
return a.cos()
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """110""")
def test_norm_chain(self):
def f(a):
b = a.sum(dim=1, keepdim=True)
a = a * b
b = a.sum(dim=1, keepdim=True)
a = a * b
b = a.sum(dim=1, keepdim=True)
a = a * b
return a
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """200""")
def test_softmax_inner(self):
def f(a):
return torch.softmax(a, dim=1)
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """200""")
def test_layer_norm(self):
# TODO: Suboptimal! We shouldn't need to save normalization stats.
mod = torch.nn.LayerNorm(10, device=self.device)
def f(x):
return mod(x)
inp = (T(10, 10),)
with torch.no_grad():
self.assertExpectedInline(count_numel(f, *inp), """220""")
def test_double_softmax(self):
def f(x):
x = torch.softmax(x, dim=1)
x = torch.softmax(x, dim=1)
return x
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """200""")
def test_softmax_backward(self):
def f(grad_out, out):
return aten._softmax_backward_data(grad_out, out, 1, torch.float32)
inp = (T(10, 10), T(10, 10))
self.assertExpectedInline(count_numel(f, *inp), """300""")
def test_neighbor(self):
def f(a, b):
return ((a - b) ** 2).sum(dim=-1).amax(dim=1)
inp = (T(10, 1, 4), T(1, 10, 4))
self.assertExpectedInline(count_numel(f, *inp), """90""")
def test_factory_reduction(self):
def f():
a = torch.ones(10, device=self.device)
b = torch.ones(10, 10, device=self.device)
return (a + b).sum(dim=-1)
inp = ()
self.assertExpectedInline(count_numel(f, *inp), """10""")
def test_index_pointwise(self):
def f(a, b):
return a[b].cos()
inp = (T(10, 10), TI(20, mx=10))
self.assertExpectedInline(count_numel(f, *inp), """320""")
def test_index_reduction(self):
def f(a, b):
return a[b].cos().sum(dim=1)
inp = (T(10, 10), TI(20, mx=10))
self.assertExpectedInline(count_numel(f, *inp), """140""")
def test_mutation_fusion(self):
def f(a, b, c):
a0 = a.add(c)
b0 = b.add(a0)
b.copy_(b0)
a.copy_(a0)
inp = (T(10, 10), T(10, 10), T(10, 10))
self.assertExpectedInline(count_numel(f, *inp), """500""")
def test_reduction_pointwise_multi_level_reduction(self):
hidden_size = 4096
layer_norm = torch.nn.LayerNorm(hidden_size).to(GPU_TYPE).float()
@torch.inference_mode()
def f(x, scale, amax_keep_dim):
x = layer_norm(x.to(dtype=torch.float))
amax = torch.amax(torch.abs(x), keepdim=amax_keep_dim)
x_scaled = x * scale
y = torch.nn.functional.sigmoid(x_scaled)
return (y, amax)
inp = (T(4, 2048, hidden_size, dtype=torch.float), T(1, dtype=torch.float))
# 2 kernels:
# kernel 1: (input = X, scale, LN scale, LN bias, output = LN_pointwise(X), first-level amax (split-reduction))
# kernel 2: (input = first-level amax, output = final amax)
# scale (1) + X (4*2048*hidden_size) * 2 + LN scale (hidden_size) + LN bias (hidden_size) + amax (4 * 2048 * 2 + 1)
expected_numel = (
1 + hidden_size * 2 + 4 * 2048 * hidden_size * 2 + 4 * 2048 * 2 + 1
)
if config.triton.cooperative_reductions:
expected_numel = 134225922
self.assertExpectedInline(count_numel(f, *inp, True), str(expected_numel))
self.assertExpectedInline(count_numel(f, *inp, False), str(expected_numel))
def test_pointwise_multi_level_reduction(self):
# TODO: this can be optimized by having the first pointwise kernel leveraging block sizes
# of the first-level reduction kernel.
hidden_size = 4096
def f(x, scale, amax_keep_dim):
x = x * 1.1
amax = torch.amax(torch.abs(x), keepdim=amax_keep_dim)
x_scaled = x * scale
y = torch.nn.functional.sigmoid(x_scaled)
return (y, amax)
inp = (T(4, 2048, hidden_size, dtype=torch.float), T(1, dtype=torch.float))
compiled_f = torch.compile(f)
compiled_f(*inp, True)
# 3 kernels:
# kernel 1: (input = X, scale, output = pointwise(X))
# kernel 2: (input = X, output = first-level amax)
# kernel 3: (input = first-level amax, output = final amax)
# scale (1) + X (4*2048*hidden_size) * 3 + amax (num_splits * 2 + 1)
# num_splits depends on SM architectures.
expected_numel = 1 + 4 * 2048 * hidden_size * 3 + 1
actual_numel_amax_keep_dim = count_numel(f, *inp, True)
actual_numel_amax_no_keep_dim = count_numel(f, *inp, False)
self.assertEqual(actual_numel_amax_keep_dim, actual_numel_amax_no_keep_dim)
self.assertGreaterAlmostEqual(actual_numel_amax_keep_dim, str(expected_numel))
def test_create_block_mask(self):
def mk_3d_flex_natten_mask(dims, kernel_size):
T, H, W = dims
K_T, K_H, K_W = kernel_size
spatial = H * W
def get_x_y_t(idx: int) -> tuple[int, int, int]:
t = idx // spatial
s = idx % spatial
x = s // W
y = s % W
return x, y, t
def get_mask(b, h, q_idx, kv_idx):
q_x, q_y, q_t = get_x_y_t(q_idx)
kv_x, kv_y, kv_t = get_x_y_t(kv_idx)
kernel_x = q_x.clamp(K_W // 2, (W - 1) - K_W // 2)
kernel_y = q_y.clamp(K_H // 2, (H - 1) - K_H // 2)
kernel_t = q_t.clamp(K_T // 2, (T - 1) - K_T // 2)
hori_mask = (kernel_x - kv_x).abs() <= K_W // 2
vert_mask = (kernel_y - kv_y).abs() <= K_H // 2
temp_mask = (kernel_t - kv_t).abs() <= K_T // 2
return hori_mask & vert_mask & temp_mask
return get_mask
T = 4
H = 16
W = 16
t = 5
h = 5
w = 5
data_size = (T, H, W)
kernel_size = (t, h, w)
S = T * H * W
from torch.nn.attention.flex_attention import create_block_mask
mask_mod = mk_3d_flex_natten_mask(data_size, kernel_size)
torch.compile(create_block_mask)(mask_mod, None, None, S, S)
numel = int(count_numel(create_block_mask, mask_mod, None, None, S, S))
# We should be writing way less than a quadratic amount of bytes here
# With fusion, we should only be writing a linear number of bytes
self.assertLess(numel * 5, S * S)
| FusionTests |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph_build_order/test_build_order.py | {
"start": 18,
"end": 1633
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBuildOrder, self).__init__()
self.dependencies = [
Dependency('d', 'g'),
Dependency('f', 'c'),
Dependency('f', 'b'),
Dependency('f', 'a'),
Dependency('c', 'a'),
Dependency('b', 'a'),
Dependency('a', 'e'),
Dependency('b', 'e'),
]
def test_build_order(self):
build_order = BuildOrder(self.dependencies)
processed_nodes = build_order.find_build_order()
expected_result0 = ('d', 'f')
expected_result1 = ('c', 'b', 'g')
self.assertTrue(processed_nodes[0].key in expected_result0)
self.assertTrue(processed_nodes[1].key in expected_result0)
self.assertTrue(processed_nodes[2].key in expected_result1)
self.assertTrue(processed_nodes[3].key in expected_result1)
self.assertTrue(processed_nodes[4].key in expected_result1)
self.assertTrue(processed_nodes[5].key is 'a')
self.assertTrue(processed_nodes[6].key is 'e')
print('Success: test_build_order')
def test_build_order_circular(self):
self.dependencies.append(Dependency('e', 'f'))
build_order = BuildOrder(self.dependencies)
processed_nodes = build_order.find_build_order()
self.assertTrue(processed_nodes is None)
print('Success: test_build_order_circular')
def main():
test = TestBuildOrder()
test.test_build_order()
test.test_build_order_circular()
if __name__ == '__main__':
main()
| TestBuildOrder |
python | openai__openai-python | src/openai/types/responses/response_code_interpreter_tool_call.py | {
"start": 519,
"end": 807
} | class ____(BaseModel):
type: Literal["image"]
"""The type of the output. Always `image`."""
url: str
"""The URL of the image output from the code interpreter."""
Output: TypeAlias = Annotated[Union[OutputLogs, OutputImage], PropertyInfo(discriminator="type")]
| OutputImage |
python | huggingface__transformers | tests/models/zoedepth/test_image_processing_zoedepth.py | {
"start": 1223,
"end": 3847
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
ensure_multiple_of=32,
keep_aspect_ratio=False,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_pad=True,
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.ensure_multiple_of = ensure_multiple_of
self.keep_aspect_ratio = keep_aspect_ratio
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"ensure_multiple_of": self.ensure_multiple_of,
"keep_aspect_ratio": self.keep_aspect_ratio,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.ensure_multiple_of, self.ensure_multiple_of
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_depth_outputs(self):
depth_tensors = prepare_image_inputs(
batch_size=self.batch_size,
num_channels=1,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=True,
torchify=True,
)
depth_tensors = [depth_tensor.squeeze(0) for depth_tensor in depth_tensors]
stacked_depth_tensors = torch.stack(depth_tensors, dim=0)
return ZoeDepthDepthOutputProxy(predicted_depth=stacked_depth_tensors)
@require_torch
@require_vision
| ZoeDepthImageProcessingTester |
python | getsentry__sentry | src/sentry/seer/endpoints/trace_explorer_ai_translate_agentic.py | {
"start": 2928,
"end": 5107
} | class ____(OrganizationEndpoint):
"""
Endpoint to call Seer's agentic search API for translating natural language queries.
"""
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
permission_classes = (OrganizationTraceExplorerAIPermission,)
def post(self, request: Request, organization: Organization) -> Response:
"""
Request to translate a natural language query using the agentic search API.
"""
serializer = SearchAgentTranslateSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
validated_data = serializer.validated_data
natural_language_query = validated_data["natural_language_query"]
strategy = validated_data.get("strategy", "Traces")
options = validated_data.get("options") or {}
model_name = options.get("model_name")
projects = self.get_projects(
request, organization, project_ids=set(validated_data["project_ids"])
)
project_ids = [project.id for project in projects]
if not features.has("organizations:seer-explorer", organization, actor=request.user):
return Response(
{"detail": "Feature flag not enabled"},
status=status.HTTP_403_FORBIDDEN,
)
has_seer_access, detail = has_seer_access_with_detail(organization, actor=request.user)
if not has_seer_access:
return Response(
{"detail": detail},
status=status.HTTP_403_FORBIDDEN,
)
if not settings.SEER_AUTOFIX_URL:
return Response(
{"detail": "Seer is not properly configured."},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
data = send_translate_agentic_request(
organization.id,
organization.slug,
project_ids,
natural_language_query,
strategy=strategy,
model_name=model_name,
)
return Response(data)
| SearchAgentTranslateEndpoint |
python | apache__airflow | task-sdk/src/airflow/sdk/bases/operator.py | {
"start": 4251,
"end": 6984
} | class ____(str, Enum):
"""
Reasons for trigger failures.
Internal use only.
:meta private:
"""
TRIGGER_TIMEOUT = "Trigger timeout"
TRIGGER_FAILURE = "Trigger failure"
TRIGGER_FAIL_REPR = "__fail__"
"""String value to represent trigger failure.
Internal use only.
:meta private:
"""
def _get_parent_defaults(dag: DAG | None, task_group: TaskGroup | None) -> tuple[dict, ParamsDict]:
if not dag:
return {}, ParamsDict()
dag_args = copy.copy(dag.default_args)
dag_params = copy.deepcopy(dag.params)
dag_params._fill_missing_param_source("dag")
if task_group:
if task_group.default_args and not isinstance(task_group.default_args, collections.abc.Mapping):
raise TypeError("default_args must be a mapping")
dag_args.update(task_group.default_args)
return dag_args, dag_params
def get_merged_defaults(
dag: DAG | None,
task_group: TaskGroup | None,
task_params: collections.abc.MutableMapping | None,
task_default_args: dict | None,
) -> tuple[dict, ParamsDict]:
args, params = _get_parent_defaults(dag, task_group)
if task_params:
if not isinstance(task_params, collections.abc.Mapping):
raise TypeError(f"params must be a mapping, got {type(task_params)}")
task_params = ParamsDict(task_params)
task_params._fill_missing_param_source("task")
params.update(task_params)
if task_default_args:
if not isinstance(task_default_args, collections.abc.Mapping):
raise TypeError(f"default_args must be a mapping, got {type(task_params)}")
args.update(task_default_args)
with contextlib.suppress(KeyError):
if params_from_default_args := ParamsDict(task_default_args["params"] or {}):
params_from_default_args._fill_missing_param_source("task")
params.update(params_from_default_args)
return args, params
def parse_retries(retries: Any) -> int | None:
if retries is None:
return 0
if type(retries) == int: # noqa: E721
return retries
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
raise RuntimeError(f"'retries' type must be int, not {type(retries).__name__}")
return parsed_retries
def coerce_timedelta(value: float | timedelta, *, key: str | None = None) -> timedelta:
if isinstance(value, timedelta):
return value
return timedelta(seconds=value)
def coerce_resources(resources: dict[str, Any] | None) -> Resources | None:
if resources is None:
return None
from airflow.sdk.definitions.operator_resources import Resources
return Resources(**resources)
| TriggerFailureReason |
python | kamyu104__LeetCode-Solutions | Python/average-salary-excluding-the-minimum-and-maximum-salary.py | {
"start": 49,
"end": 429
} | class ____(object):
def average(self, salary):
"""
:type salary: List[int]
:rtype: float
"""
total, mi, ma = 0, float("inf"), float("-inf")
for s in salary:
total += s
mi, ma = min(mi, s), max(ma, s)
return 1.0*(total-mi-ma)/(len(salary)-2)
# Time: O(n)
# Space: O(1)
# one-liner solution
| Solution |
python | facelessuser__soupsieve | tests/test_level4/test_indeterminate.py | {
"start": 58,
"end": 2407
} | class ____(util.TestCase):
"""Test indeterminate selectors."""
def test_indeterminate(self):
"""Test indeterminate."""
markup = """
<input type="radio" name="" id="radio-no-name1">
<label>No name 1</label>
<input type="radio" name="" id="radio-no-name2" checked>
<label>no name 2</label>
<div>
<input type="checkbox" id="checkbox" indeterminate>
<label for="checkbox">This label starts out lime.</label>
</div>
<div>
<input type="radio" name="test" id="radio1">
<label for="radio1">This label starts out lime.</label>
<form>
<input type="radio" name="test" id="radio2">
<label for="radio2">This label starts out lime.</label>
<input type="radio" name="test" id="radio3" checked>
<label for="radio3">This label starts out lime.</label>
<input type="radio" name="other" id="radio4">
<label for="radio4">This label starts out lime.</label>
<input type="radio" name="other" id="radio5">
<label for="radio5">This label starts out lime.</label>
</form>
<input type="radio" name="test" id="radio6">
<label for="radio6">This label starts out lime.</label>
</div>
"""
self.assert_selector(
markup,
":indeterminate",
['checkbox', 'radio1', 'radio6', 'radio4', 'radio5', 'radio-no-name1'],
flags=util.HTML
)
def test_iframe(self):
"""Test indeterminate when `iframe` is involved."""
markup = """
<form>
<input type="radio" name="test" id="radio1">
<label for="radio1">This label starts out lime.</label>
<iframe>
<html>
<body>
<input type="radio" name="test" id="radio2" checked>
<label for="radio2">This label starts out lime.</label>
<input type="radio" name="other" id="radio3">
<label for="radio3">This label starts out lime.</label>
</body>
</html>
</iframe></form>"""
self.assert_selector(
markup,
":indeterminate",
['radio1', 'radio3'],
flags=util.PYHTML
)
| TestIndeterminate |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 134374,
"end": 134642
} | class ____(Layout):
"""A Tensor layout we cannot change"""
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
"""A closure containing math to read a given element"""
return _fixed_indexer(self.size, self.stride, self.offset)
| FixedLayout |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py | {
"start": 6611,
"end": 6910
} | class ____(BaseFSHandler):
@web.authenticated
@authorized
def delete(self):
result = self.fs_rmdir(
self.get_path_argument("path"),
non_empty=(self.get_argument("non_empty", "false").lower() == "true"),
)
self.write_json(result)
| RmdirHandler |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/test_cards.py | {
"start": 5363,
"end": 5876
} | class ____(MetaflowCard):
"""Card that renders a tiny PNG using ``TaskToDict.parse_image``."""
type = "test_image_card"
def render(self, task):
from .convert_to_native_type import TaskToDict
import base64
png_bytes = base64.b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGNgYGBgAAAABQABRDE8UwAAAABJRU5ErkJggg=="
)
img_src = TaskToDict().parse_image(png_bytes)
return f"<html><img src='{img_src}' /></html>"
| TestImageCard |
python | apache__airflow | airflow-core/src/airflow/cli/commands/task_command.py | {
"start": 8915,
"end": 11217
} | class ____:
"""Marker for listener hooks, to properly detect from which component they are called."""
@cli_utils.action_cli(check_db=False)
@providers_configuration_loaded
def task_failed_deps(args) -> None:
"""
Get task instance dependencies that were not met.
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
task = get_db_dag(args.bundle_name, args.dag_id).get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, logical_date_or_run_id=args.logical_date_or_run_id)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
@providers_configuration_loaded
def task_state(args) -> None:
"""
Return the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
if not (dag := SerializedDagModel.get_dag(args.dag_id)):
raise SystemExit(f"Can not find dag {args.dag_id!r}")
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, logical_date_or_run_id=args.logical_date_or_run_id)
print(ti.state)
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
@providers_configuration_loaded
def task_list(args, dag: DAG | None = None) -> None:
"""List the tasks within a DAG at the command line."""
dag = dag or get_bagged_dag(args.bundle_name, args.dag_id)
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks))
| TaskCommandMarker |
python | numpy__numpy | numpy/_array_api_info.py | {
"start": 429,
"end": 10354
} | class ____:
"""
Get the array API inspection namespace for NumPy.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for NumPy.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
__module__ = 'numpy'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for NumPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
NumPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new NumPy arrays.
For NumPy, this always returns ``'cpu'``.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : str
The default device used for new NumPy arrays.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_device()
'cpu'
"""
return "cpu"
def default_dtypes(self, *, device=None):
"""
The default data types used for new NumPy arrays.
For NumPy, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for. For NumPy, only
``'cpu'`` is allowed.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new NumPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by NumPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for. For NumPy, only ``'cpu'`` is
allowed.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
NumPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self):
"""
The devices supported by NumPy.
For NumPy, this always returns ``['cpu']``.
Returns
-------
devices : list of str
The devices supported by NumPy.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.devices()
['cpu']
"""
return ["cpu"]
| __array_namespace_info__ |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 23869,
"end": 24467
} | class ____:
"""
RoleInfo groups:
- UserItem: <role_name:admin>, <users:('root',)>
"""
def __init__(self, results: List[milvus_types.RoleResult]) -> None:
groups = []
for result in results:
if isinstance(result, milvus_types.RoleResult):
groups.append(RoleItem(result.role.name, result.users))
self._groups = groups
def __repr__(self) -> str:
s = "RoleInfo groups:"
for g in self.groups:
s += f"\n- {g}"
return s
@property
def groups(self):
return self._groups
| RoleInfo |
python | spyder-ide__spyder | spyder/plugins/completion/providers/snippets/widgets/snippetsconfig.py | {
"start": 16804,
"end": 19160
} | class ____(QAbstractTableModel):
TRIGGER = 0
DESCRIPTION = 1
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.parent = parent
self.snippets = []
self.delete_queue = []
self.snippet_map = {}
self.rich_text = []
self.normal_text = []
self.letters = ''
self.label = QLabel()
self.widths = []
def sortByName(self):
self.snippets = sorted(self.snippets, key=lambda x: x.trigger_text)
self.reset()
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index))
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if not index.isValid() or not (0 <= row < len(self.snippets)):
return to_qvariant()
snippet = self.snippets[row]
column = index.column()
if role == Qt.DisplayRole:
if column == self.TRIGGER:
return to_qvariant(snippet.trigger_text)
elif column == self.DESCRIPTION:
return to_qvariant(snippet.description)
elif role == Qt.TextAlignmentRole:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
elif role == Qt.ToolTipRole:
return to_qvariant(_("Double-click to view or edit"))
return to_qvariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
if role != Qt.DisplayRole:
return to_qvariant()
if orientation == Qt.Horizontal:
if section == self.TRIGGER:
return to_qvariant(_('Trigger text'))
elif section == self.DESCRIPTION:
return to_qvariant(_('Description'))
return to_qvariant()
def rowCount(self, index=QModelIndex()):
return len(self.snippets)
def columnCount(self, index=QModelIndex()):
return 2
def row(self, row_num):
return self.snippets[row_num]
def reset(self):
self.beginResetModel()
self.endResetModel()
| SnippetsModel |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 11511,
"end": 11859
} | class ____(Integration):
integration_type_id = Integration.GITHUB_WEBHOOK
has_sync = True
class Meta:
proxy = True
@property
def can_sync(self):
try:
return all((k in self.provider_data) for k in ["id", "url"])
except (ValueError, TypeError):
return False
@dataclass
| GitHubWebhook |
python | scikit-image__scikit-image | benchmarks/benchmark_filters.py | {
"start": 2002,
"end": 2668
} | class ____:
"""Benchmark for transform routines in scikit-image."""
def setup(self):
self.image = np.zeros((2000, 2000), dtype=np.uint8)
self.image3D = np.zeros((30, 300, 300), dtype=np.uint8)
idx = np.arange(500, 700)
idx3D = np.arange(10, 200)
self.image[idx[::-1], idx] = 255
self.image[idx, idx] = 255
self.image3D[:, idx3D[::-1], idx3D] = 255
self.image3D[:, idx3D, idx3D] = 255
def time_sauvola(self):
filters.threshold_sauvola(self.image, window_size=51)
def time_sauvola_3d(self):
filters.threshold_sauvola(self.image3D, window_size=51)
| ThresholdSauvolaSuite |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 26371,
"end": 26638
} | class ____:
""" Class for exercise 'visit' and 'visititems' methods """
def __init__(self):
self._names = []
def __call__(self, name, obj=None):
self._names.append(name)
@property
def names(self):
return self._names
| Visitor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.