language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/gevent/_interfaces.py | {
"start": 1363,
"end": 8231
} | class ____(Interface):
"""
The common interface expected for all event loops.
.. caution::
This is an internal, low-level interface. It may change
between minor versions of gevent.
.. rubric:: Watchers
The methods that create event loop watchers are `io`, `timer`,
`signal`, `idle`, `prepare`, `check`, `fork`, `async_`, `child`,
`stat`. These all return various types of :class:`IWatcher`.
All of those methods have one or two common arguments. *ref* is a
boolean saying whether the event loop is allowed to exit even if
this watcher is still started. *priority* is event loop specific.
"""
default = schema.Bool(
description=u"Boolean indicating whether this is the default loop",
required=True,
readonly=True,
)
approx_timer_resolution = schema.Float(
description=u"Floating point number of seconds giving (approximately) the minimum "
"resolution of a timer (and hence the minimun value the sleep can sleep for). "
"On libuv, this is fixed by the library, but on libev it is just a guess "
"and the actual value is system dependent.",
required=True,
min=0.0,
readonly=True,
)
def run(nowait=False, once=False):
"""
Run the event loop.
This is usually called automatically by the hub greenlet, but
in special cases (when the hub is *not* running) you can use
this to control how the event loop runs (for example, to integrate
it with another event loop).
"""
def now():
"""
now() -> float
Return the loop's notion of the current time.
This may not necessarily be related to :func:`time.time` (it
may have a different starting point), but it must be expressed
in fractional seconds (the same *units* used by :func:`time.time`).
"""
def update_now():
"""
Update the loop's notion of the current time.
.. versionadded:: 1.3
In the past, this available as ``update``. This is still available as
an alias but will be removed in the future.
"""
def destroy():
"""
Clean up resources used by this loop.
If you create loops
(especially loops that are not the default) you *should* call
this method when you are done with the loop.
.. caution::
As an implementation note, the libev C loop implementation has a
finalizer (``__del__``) that destroys the object, but the libuv
and libev CFFI implementations do not. The C implementation may change.
"""
def io(fd, events, ref=True, priority=None):
"""
Create and return a new IO watcher for the given *fd*.
*events* is a bitmask specifying which events to watch
for. 1 means read, and 2 means write.
*fd* should be valid. If it is not, this method _should_
throw an OSError EBADF.
"""
def closing_fd(fd):
"""
Inform the loop that the file descriptor *fd* is about to be closed.
The loop may choose to schedule events to be delivered to any active
IO watchers for the fd. libev does this so that the active watchers
can be closed.
:return: A boolean value that's true if active IO watchers were
queued to run. Closing the FD should be deferred until the next
run of the eventloop with a check watcher (callbacks may be
run immediately if we were already running callbacks when this was
added, and it needs to come after the loop).
"""
def timer(after, repeat=0.0, ref=True, priority=None):
"""
Create and return a timer watcher that will fire after *after* seconds.
If *repeat* is given, the timer will continue to fire every *repeat* seconds.
"""
def signal(signum, ref=True, priority=None):
"""
Create and return a signal watcher for the signal *signum*,
one of the constants defined in :mod:`signal`.
This is platform and event loop specific.
"""
def idle(ref=True, priority=None):
"""
Create and return a watcher that fires when the event loop is idle.
"""
def prepare(ref=True, priority=None):
"""
Create and return a watcher that fires before the event loop
polls for IO.
.. caution:: This method is not supported by libuv.
"""
def check(ref=True, priority=None):
"""
Create and return a watcher that fires after the event loop
polls for IO.
"""
def fork(ref=True, priority=None):
"""
Create a watcher that fires when the process forks.
Availability: Unix.
"""
def async_(ref=True, priority=None):
"""
Create a watcher that fires when triggered, possibly
from another thread.
.. versionchanged:: 1.3
This was previously just named ``async``; for compatibility
with Python 3.7 where ``async`` is a keyword it was renamed.
On older versions of Python the old name is still around, but
it will be removed in the future.
"""
if sys.platform != "win32":
def child(pid, trace=0, ref=True):
"""
Create a watcher that fires for events on the child with process ID *pid*.
This is platform specific and not available on Windows.
Availability: Unix.
"""
def stat(path, interval=0.0, ref=True, priority=None):
"""
Create a watcher that monitors the filesystem item at *path*.
If the operating system doesn't support event notifications
from the filesystem, poll for changes every *interval* seconds.
"""
def run_callback(func, *args):
"""
Run the *func* passing it *args* at the next opportune moment.
The next opportune moment may be the next iteration of the event loop,
the current iteration, or some other time in the future.
Returns a :class:`ICallback` object. See that documentation for
important caveats.
.. seealso:: :meth:`asyncio.loop.call_soon`
The :mod:`asyncio` equivalent.
"""
def run_callback_threadsafe(func, *args):
"""
Like :meth:`run_callback`, but for use from *outside* the
thread that is running this loop.
This not only schedules the *func* to run, it also causes the
loop to notice that the *func* has been scheduled (e.g., it causes
the loop to wake up).
.. versionadded:: 21.1.0
.. seealso:: :meth:`asyncio.loop.call_soon_threadsafe`
The :mod:`asyncio` equivalent.
"""
| ILoop |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 104155,
"end": 110158
} | class ____:
async def test_cannot_leave_nonblocking_pausing_state_without_a_deployment(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
five_minutes_from_now = now("UTC") + timedelta(minutes=5)
ctx.initial_state.state_details = states.StateDetails(
pause_timeout=five_minutes_from_now, pause_reschedule=True
)
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.REJECT
assert ctx.validated_state_type == states.StateType.PAUSED
async def test_can_leave_blocking_pausing_state_without_a_deployment(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
five_minutes_from_now = now("UTC") + timedelta(minutes=5)
ctx.initial_state.state_details = states.StateDetails(
pause_timeout=five_minutes_from_now
)
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
@pytest.mark.parametrize("proposed_state_type", list(states.StateType))
async def test_transitions_out_of_pausing_states_are_restricted(
self,
session,
proposed_state_type,
deployment,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.run.deployment_id = deployment.id
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
permitted_resuming_states = [
states.StateType.RUNNING,
states.StateType.COMPLETED,
states.StateType.SCHEDULED,
states.StateType.FAILED,
states.StateType.CRASHED,
states.StateType.CANCELLED,
]
if proposed_state_type in permitted_resuming_states:
assert ctx.response_status == SetStateStatus.ACCEPT
else:
assert ctx.response_status == SetStateStatus.REJECT
assert ctx.validated_state_type == initial_state_type
async def test_cannot_leave_pausing_state_if_pause_has_timed_out(
self,
session,
deployment,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.run.deployment_id = deployment.id
five_minutes_ago = now("UTC") - timedelta(minutes=5)
ctx.initial_state.state_details = states.StateDetails(
pause_timeout=five_minutes_ago
)
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.REJECT
assert ctx.validated_state.type == states.StateType.FAILED
async def test_allows_leaving_pausing_state(
self,
session,
deployment,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.run.deployment_id = deployment.id
the_future = now("UTC") + timedelta(minutes=5)
ctx.initial_state.state_details = states.StateDetails(pause_timeout=the_future)
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
async def test_marks_flow_run_as_resuming_upon_leaving_paused_state(
self,
session,
deployment,
initialize_orchestration,
):
initial_state_type = states.StateType.PAUSED
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.run.deployment_id = deployment.id
the_future = now("UTC") + timedelta(minutes=5)
ctx.initial_state.state_details = states.StateDetails(pause_timeout=the_future)
state_protection = HandleResumingPausedFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.run.empirical_policy.resuming
| TestResumingFlows |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py | {
"start": 1781,
"end": 1965
} | class ____(TypedDict):
"""Response when a human approves the action."""
type: Literal["approve"]
"""The type of response when a human approves the action."""
| ApproveDecision |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 17027,
"end": 17124
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "webPushClick"
| WebPushClick |
python | apache__airflow | providers/keycloak/src/airflow/providers/keycloak/auth_manager/user.py | {
"start": 895,
"end": 1356
} | class ____(BaseUser):
"""User model for users managed by Keycloak auth manager."""
def __init__(self, *, user_id: str, name: str, access_token: str, refresh_token: str) -> None:
self.user_id = user_id
self.name = name
self.access_token = access_token
self.refresh_token = refresh_token
def get_id(self) -> str:
return self.user_id
def get_name(self) -> str:
return self.name
| KeycloakAuthManagerUser |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 21926,
"end": 24203
} | class ____(unittest.TestCase):
package = 'tests.pkgs.conflictapp'
def _makeConfig(self):
from pyramid.config import Configurator
config = Configurator()
return config
def test_autoresolved_view(self):
config = self._makeConfig()
config.include(self.package)
app = config.make_wsgi_app()
self.testapp = TestApp(app)
res = self.testapp.get('/')
self.assertTrue(b'a view' in res.body)
res = self.testapp.get('/route')
self.assertTrue(b'route view' in res.body)
def test_overridden_autoresolved_view(self):
from pyramid.response import Response
config = self._makeConfig()
config.include(self.package)
def thisview(request):
return Response('this view')
config.add_view(thisview)
app = config.make_wsgi_app()
self.testapp = TestApp(app)
res = self.testapp.get('/')
self.assertTrue(b'this view' in res.body)
def test_overridden_route_view(self):
from pyramid.response import Response
config = self._makeConfig()
config.include(self.package)
def thisview(request):
return Response('this view')
config.add_view(thisview, route_name='aroute')
app = config.make_wsgi_app()
self.testapp = TestApp(app)
res = self.testapp.get('/route')
self.assertTrue(b'this view' in res.body)
def test_nonoverridden_authorization_policy(self):
config = self._makeConfig()
config.include(self.package)
app = config.make_wsgi_app()
self.testapp = TestApp(app)
res = self.testapp.get('/protected', status=403)
self.assertTrue(b'403 Forbidden' in res.body)
def test_overridden_authorization_policy(self):
config = self._makeConfig()
config.include(self.package)
class DummySecurityPolicy:
def permits(self, context, principals, permission):
return True
config.set_authorization_policy(DummySecurityPolicy())
app = config.make_wsgi_app()
self.testapp = TestApp(app)
res = self.testapp.get('/protected', status=200)
self.assertTrue('protected view' in res)
| TestConflictApp |
python | numba__numba | numba/core/types/containers.py | {
"start": 25368,
"end": 25731
} | class ____(SimpleIteratorType):
def __init__(self, iterable):
self.parent = iterable.parent
self.iterable = iterable
yield_type = iterable.yield_type
name = "iter[{}->{}],{}".format(
iterable.parent, yield_type, iterable.name
)
super(DictIteratorType, self).__init__(name, yield_type)
| DictIteratorType |
python | django__django | django/core/exceptions.py | {
"start": 1076,
"end": 1184
} | class ____(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
| DisallowedRedirect |
python | pytorch__pytorch | torch/testing/_comparison.py | {
"start": 23483,
"end": 66531
} | class ____(Pair):
"""Pair for :class:`torch.Tensor`-like inputs.
Kwargs:
allow_subclasses (bool):
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
values based on the type are selected. See :func:assert_close: for details.
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
values based on the type are selected. See :func:assert_close: for details.
equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`) before being compared.
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
compared.
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
"""
def __init__(
self,
actual: Any,
expected: Any,
*,
id: tuple[Any, ...] = (),
allow_subclasses: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = False,
check_device: bool = True,
check_dtype: bool = True,
check_layout: bool = True,
check_stride: bool = False,
**other_parameters: Any,
):
actual, expected = self._process_inputs(
actual, expected, id=id, allow_subclasses=allow_subclasses
)
super().__init__(actual, expected, id=id, **other_parameters)
self.rtol, self.atol = get_tolerances(
actual, expected, rtol=rtol, atol=atol, id=self.id
)
self.equal_nan = equal_nan
self.check_device = check_device
self.check_dtype = check_dtype
self.check_layout = check_layout
self.check_stride = check_stride
def _process_inputs(
self, actual: Any, expected: Any, *, id: tuple[Any, ...], allow_subclasses: bool
) -> tuple[torch.Tensor, torch.Tensor]:
directly_related = isinstance(actual, type(expected)) or isinstance(
expected, type(actual)
)
if not directly_related:
self._inputs_not_supported()
if not allow_subclasses and type(actual) is not type(expected):
self._inputs_not_supported()
actual, expected = (self._to_tensor(input) for input in (actual, expected))
for tensor in (actual, expected):
self._check_supported(tensor, id=id)
return actual, expected
def _to_tensor(self, tensor_like: Any) -> torch.Tensor:
if isinstance(tensor_like, torch.Tensor):
return tensor_like
try:
return torch.as_tensor(tensor_like)
except Exception:
self._inputs_not_supported()
def _check_supported(self, tensor: torch.Tensor, *, id: tuple[Any, ...]) -> None:
if tensor.layout not in {
torch.strided,
torch.jagged,
torch.sparse_coo,
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
raise ErrorMeta(
ValueError, f"Unsupported tensor layout {tensor.layout}", id=id
)
def compare(self) -> None:
actual, expected = self.actual, self.expected
self._compare_attributes(actual, expected)
if any(input.device.type == "meta" for input in (actual, expected)):
return
actual, expected = self._equalize_attributes(actual, expected)
self._compare_values(actual, expected)
def _compare_attributes(
self,
actual: torch.Tensor,
expected: torch.Tensor,
) -> None:
"""Checks if the attributes of two tensors match.
Always checks
- the :attr:`~torch.Tensor.shape`,
- whether both inputs are quantized or not,
- and if they use the same quantization scheme.
Checks for
- :attr:`~torch.Tensor.layout`,
- :meth:`~torch.Tensor.stride`,
- :attr:`~torch.Tensor.device`, and
- :attr:`~torch.Tensor.dtype`
are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair.
"""
def raise_mismatch_error(
attribute_name: str, actual_value: Any, expected_value: Any
) -> NoReturn:
self._fail(
AssertionError,
f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.",
)
if actual.shape != expected.shape:
raise_mismatch_error("shape", actual.shape, expected.shape)
if actual.is_quantized != expected.is_quantized:
raise_mismatch_error(
"is_quantized", actual.is_quantized, expected.is_quantized
)
elif actual.is_quantized and actual.qscheme() != expected.qscheme():
raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme())
if actual.layout != expected.layout:
if self.check_layout:
raise_mismatch_error("layout", actual.layout, expected.layout)
elif (
actual.layout == torch.strided
and self.check_stride
and actual.stride() != expected.stride()
):
raise_mismatch_error("stride()", actual.stride(), expected.stride())
if self.check_device and actual.device != expected.device:
raise_mismatch_error("device", actual.device, expected.device)
if self.check_dtype and actual.dtype != expected.dtype:
raise_mismatch_error("dtype", actual.dtype, expected.dtype)
def _equalize_attributes(
self, actual: torch.Tensor, expected: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
"""Equalizes some attributes of two tensors for value comparison.
If ``actual`` and ``expected`` are ...
- ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory.
- ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`).
- ... not of the same ``layout``, they are converted to strided tensors.
Args:
actual (Tensor): Actual tensor.
expected (Tensor): Expected tensor.
Returns:
(Tuple[Tensor, Tensor]): Equalized tensors.
"""
# The comparison logic uses operators currently not supported by the MPS backends.
# See https://github.com/pytorch/pytorch/issues/77144 for details.
# TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend
if actual.is_mps or expected.is_mps: # type: ignore[attr-defined]
actual = actual.cpu()
expected = expected.cpu()
if actual.device != expected.device:
actual = actual.cpu()
expected = expected.cpu()
if actual.dtype != expected.dtype:
actual_dtype = actual.dtype
expected_dtype = expected.dtype
# For uint64, this is not sound in general, which is why promote_types doesn't
# allow it, but for easy testing, we're unlikely to get confused
# by large uint64 overflowing into negative int64
if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]:
actual_dtype = torch.int64
if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]:
expected_dtype = torch.int64
dtype = torch.promote_types(actual_dtype, expected_dtype)
actual = actual.to(dtype)
expected = expected.to(dtype)
if actual.layout != expected.layout:
# These checks are needed, since Tensor.to_dense() fails on tensors that are already strided
actual = actual.to_dense() if actual.layout != torch.strided else actual
expected = (
expected.to_dense() if expected.layout != torch.strided else expected
)
return actual, expected
def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None:
if actual.is_quantized:
compare_fn = self._compare_quantized_values
elif actual.is_sparse:
compare_fn = self._compare_sparse_coo_values
elif actual.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
compare_fn = self._compare_sparse_compressed_values
elif actual.layout == torch.jagged:
actual, expected = actual.values(), expected.values()
compare_fn = self._compare_regular_values_close
elif actual.dtype.is_floating_point and actual.dtype.itemsize == 1:
def bitwise_comp(
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> None:
if rtol != 0.0 or atol != 0.0:
raise ErrorMeta(
AssertionError,
f"Rtol={rtol} and atol={atol} are not supported for bitwise comparison of low"
" dimensional floats. Please use rtol=0.0 and atol=0.0.",
)
return self._compare_regular_values_close(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier=identifier,
)
compare_fn = bitwise_comp
else:
compare_fn = self._compare_regular_values_close
compare_fn(
actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan
)
def _compare_quantized_values(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
) -> None:
"""Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness.
.. note::
A detailed discussion about why only the dequantized variant is checked for closeness rather than checking
the individual quantization parameters for closeness and the integer representation for equality can be
found in https://github.com/pytorch/pytorch/issues/68548.
"""
return self._compare_regular_values_close(
actual.dequantize(),
expected.dequantize(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}",
)
def _compare_sparse_coo_values(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
) -> None:
"""Compares sparse COO tensors by comparing
- the number of sparse dimensions,
- the number of non-zero elements (nnz) for equality,
- the indices for equality, and
- the values for closeness.
"""
if actual.sparse_dim() != expected.sparse_dim():
self._fail(
AssertionError,
(
f"The number of sparse dimensions in sparse COO tensors does not match: "
f"{actual.sparse_dim()} != {expected.sparse_dim()}"
),
)
if actual._nnz() != expected._nnz():
self._fail(
AssertionError,
(
f"The number of specified values in sparse COO tensors does not match: "
f"{actual._nnz()} != {expected._nnz()}"
),
)
self._compare_regular_values_equal(
actual._indices(),
expected._indices(),
identifier="Sparse COO indices",
)
self._compare_regular_values_close(
actual._values(),
expected._values(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier="Sparse COO values",
)
def _compare_sparse_compressed_values(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
) -> None:
"""Compares sparse compressed tensors by comparing
- the number of non-zero elements (nnz) for equality,
- the plain indices for equality,
- the compressed indices for equality, and
- the values for closeness.
"""
format_name, compressed_indices_method, plain_indices_method = {
torch.sparse_csr: (
"CSR",
torch.Tensor.crow_indices,
torch.Tensor.col_indices,
),
torch.sparse_csc: (
"CSC",
torch.Tensor.ccol_indices,
torch.Tensor.row_indices,
),
torch.sparse_bsr: (
"BSR",
torch.Tensor.crow_indices,
torch.Tensor.col_indices,
),
torch.sparse_bsc: (
"BSC",
torch.Tensor.ccol_indices,
torch.Tensor.row_indices,
),
}[actual.layout]
if actual._nnz() != expected._nnz():
self._fail(
AssertionError,
(
f"The number of specified values in sparse {format_name} tensors does not match: "
f"{actual._nnz()} != {expected._nnz()}"
),
)
# Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formats can be `torch.int32` _or_
# `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it
# can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will
# fail.
actual_compressed_indices = compressed_indices_method(actual)
expected_compressed_indices = compressed_indices_method(expected)
indices_dtype = torch.promote_types(
actual_compressed_indices.dtype, expected_compressed_indices.dtype
)
self._compare_regular_values_equal(
actual_compressed_indices.to(indices_dtype),
expected_compressed_indices.to(indices_dtype),
identifier=f"Sparse {format_name} {compressed_indices_method.__name__}",
)
self._compare_regular_values_equal(
plain_indices_method(actual).to(indices_dtype),
plain_indices_method(expected).to(indices_dtype),
identifier=f"Sparse {format_name} {plain_indices_method.__name__}",
)
self._compare_regular_values_close(
actual.values(),
expected.values(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier=f"Sparse {format_name} values",
)
def _compare_regular_values_equal(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
equal_nan: bool = False,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> None:
"""Checks if the values of two tensors are equal."""
self._compare_regular_values_close(
actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier
)
def _compare_regular_values_close(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> None:
"""Checks if the values of two tensors are close up to a desired tolerance."""
matches = torch.isclose(
actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan
)
if torch.all(matches):
return
if actual.shape == torch.Size([]):
msg = make_scalar_mismatch_msg(
actual.item(),
expected.item(),
rtol=rtol,
atol=atol,
identifier=identifier,
)
else:
msg = make_tensor_mismatch_msg(
actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier
)
self._fail(AssertionError, msg)
def extra_repr(self) -> Sequence[str]:
return (
"rtol",
"atol",
"equal_nan",
"check_device",
"check_dtype",
"check_layout",
"check_stride",
)
def originate_pairs(
actual: Any,
expected: Any,
*,
pair_types: Sequence[type[Pair]],
sequence_types: tuple[type, ...] = (collections.abc.Sequence,),
mapping_types: tuple[type, ...] = (collections.abc.Mapping,),
id: tuple[Any, ...] = (),
**options: Any,
# pyrefly: ignore [bad-return]
) -> list[Pair]:
"""Originates pairs from the individual inputs.
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
:class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs.
First successful pair will be used.
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message.
**options (Any): Options passed to each pair during construction.
Raises:
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their
length does not match.
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of
keys do not match.
ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs.
ErrorMeta: With any expected exception that happens during the construction of a pair.
Returns:
(List[Pair]): Originated pairs.
"""
# We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
# "a" == "a"[0][0]...
if (
isinstance(actual, sequence_types)
and not isinstance(actual, str)
and isinstance(expected, sequence_types)
and not isinstance(expected, str)
):
actual_len = len(actual) # type: ignore[arg-type]
expected_len = len(expected) # type: ignore[arg-type]
if actual_len != expected_len:
raise ErrorMeta(
AssertionError,
f"The length of the sequences mismatch: {actual_len} != {expected_len}",
id=id,
)
pairs = []
for idx in range(actual_len):
pairs.extend(
originate_pairs(
actual[idx], # type: ignore[index]
expected[idx], # type: ignore[index]
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
id=(*id, idx),
**options,
)
)
return pairs
elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types):
actual_keys = set(actual.keys()) # type: ignore[attr-defined]
expected_keys = set(expected.keys()) # type: ignore[attr-defined]
if actual_keys != expected_keys:
missing_keys = expected_keys - actual_keys
additional_keys = actual_keys - expected_keys
raise ErrorMeta(
AssertionError,
(
f"The keys of the mappings do not match:\n"
f"Missing keys in the actual mapping: {sorted(missing_keys)}\n"
f"Additional keys in the actual mapping: {sorted(additional_keys)}"
),
id=id,
)
keys: Collection = actual_keys
# Since the origination aborts after the first failure, we try to be deterministic
with contextlib.suppress(Exception):
keys = sorted(keys)
pairs = []
for key in keys:
pairs.extend(
originate_pairs(
actual[key], # type: ignore[index]
expected[key], # type: ignore[index]
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
id=(*id, key),
**options,
)
)
return pairs
else:
for pair_type in pair_types:
try:
# pyrefly: ignore [bad-instantiation]
return [pair_type(actual, expected, id=id, **options)]
# Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the
# inputs. Thus, we try the next pair type.
except UnsupportedInputs:
continue
# Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This
# is only in a separate branch, because the one below would also except it.
except ErrorMeta:
raise
# Raising any other exception during origination is unexpected and will give some extra information about
# what happened. If applicable, the exception should be expected in the future.
except Exception as error:
raise RuntimeError(
f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n"
f"{type(actual).__name__}(): {actual}\n\n"
f"and\n\n"
f"{type(expected).__name__}(): {expected}\n\n"
f"resulted in the unexpected exception above. "
f"If you are a user and see this message during normal operation "
"please file an issue at https://github.com/pytorch/pytorch/issues. "
"If you are a developer and working on the comparison functions, "
"please except the previous error and raise an expressive `ErrorMeta` instead."
) from error
else:
raise ErrorMeta(
TypeError,
f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.",
id=id,
)
def not_close_error_metas(
actual: Any,
expected: Any,
*,
pair_types: Sequence[type[Pair]] = (ObjectPair,),
sequence_types: tuple[type, ...] = (collections.abc.Sequence,),
mapping_types: tuple[type, ...] = (collections.abc.Mapping,),
**options: Any,
) -> list[ErrorMeta]:
"""Asserts that inputs are equal.
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
:class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the
inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`.
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
**options (Any): Options passed to each pair during construction.
"""
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
try:
pairs = originate_pairs(
actual,
expected,
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
**options,
)
except ErrorMeta as error_meta:
# Explicitly raising from None to hide the internal traceback
raise error_meta.to_error() from None # noqa: RSE102
error_metas: list[ErrorMeta] = []
for pair in pairs:
try:
pair.compare()
except ErrorMeta as error_meta:
error_metas.append(error_meta)
# Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information
# about what happened. If applicable, the exception should be expected in the future.
except Exception as error:
raise RuntimeError(
f"Comparing\n\n"
f"{pair}\n\n"
f"resulted in the unexpected exception above. "
f"If you are a user and see this message during normal operation "
"please file an issue at https://github.com/pytorch/pytorch/issues. "
"If you are a developer and working on the comparison functions, "
"please except the previous error and raise an expressive `ErrorMeta` instead."
) from error
# [ErrorMeta Cycles]
# ErrorMeta objects in this list capture
# tracebacks that refer to the frame of this function.
# The local variable `error_metas` refers to the error meta
# objects, creating a reference cycle. Frames in the traceback
# would not get freed until cycle collection, leaking cuda memory in tests.
# We break the cycle by removing the reference to the error_meta objects
# from this frame as it returns.
# pyrefly: ignore [bad-assignment]
error_metas = [error_metas]
# pyrefly: ignore [bad-return]
return error_metas.pop()
def assert_close(
actual: Any,
expected: Any,
*,
allow_subclasses: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = False,
check_device: bool = True,
check_dtype: bool = True,
check_layout: bool = True,
check_stride: bool = False,
msg: Optional[Union[str, Callable[[str], str]]] = None,
):
r"""Asserts that ``actual`` and ``expected`` are close.
If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if
.. math::
\lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert
Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are
only considered equal to each other if ``equal_nan`` is ``True``.
In addition, they are only considered close if they have the same
- :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``),
- ``dtype`` (if ``check_dtype`` is ``True``),
- ``layout`` (if ``check_layout`` is ``True``), and
- stride (if ``check_stride`` is ``True``).
If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed.
If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are
checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR,
or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively,
are always checked for equality whereas the values are checked for closeness according to the definition above.
If ``actual`` and ``expected`` are quantized, they are considered close if they have the same
:meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the
definition above.
``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which
:class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types
have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s
or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all
their elements are considered close according to the above definition.
.. note::
Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e.
:class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus,
Python scalars of different types can be checked, but require ``check_dtype=False``.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types
are allowed. Otherwise type equality is required.
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal.
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`) before being compared.
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
compared.
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during
the comparison. Can also passed as callable in which case it will be called with the generated message and
should return the new message.
Raises:
ValueError: If no :class:`torch.Tensor` can be constructed from an input.
ValueError: If only ``rtol`` or ``atol`` is specified.
AssertionError: If corresponding inputs are not Python scalars and are not directly related.
AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have
different types.
AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match.
AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match.
AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`.
AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same
:attr:`~torch.Tensor.layout`.
AssertionError: If only one of corresponding tensors is quantized.
AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s.
AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same
:attr:`~torch.Tensor.device`.
AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``.
AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride.
AssertionError: If the values of corresponding tensors are not close according to the definition above.
The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching
``dtype``'s, the maximum of both tolerances is used.
+---------------------------+------------+----------+
| ``dtype`` | ``rtol`` | ``atol`` |
+===========================+============+==========+
| :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| other | ``0.0`` | ``0.0`` |
+---------------------------+------------+----------+
.. note::
:func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged
to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might
define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default:
>>> import functools
>>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
>>> assert_equal(1e-9, 1e-10)
Traceback (most recent call last):
...
AssertionError: Scalars are not equal!
<BLANKLINE>
Expected 1e-10 but got 1e-09.
Absolute difference: 9.000000000000001e-10
Relative difference: 9.0
Examples:
>>> # tensor to tensor comparison
>>> expected = torch.tensor([1e0, 1e-1, 1e-2])
>>> actual = torch.acos(torch.cos(expected))
>>> torch.testing.assert_close(actual, expected)
>>> # scalar to scalar comparison
>>> import math
>>> expected = math.sqrt(2.0)
>>> actual = 2.0 / math.sqrt(2.0)
>>> torch.testing.assert_close(actual, expected)
>>> # numpy array to numpy array comparison
>>> import numpy as np
>>> expected = np.array([1e0, 1e-1, 1e-2])
>>> actual = np.arccos(np.cos(expected))
>>> torch.testing.assert_close(actual, expected)
>>> # sequence to sequence comparison
>>> import numpy as np
>>> # The types of the sequences do not have to match. They only have to have the same
>>> # length and their elements have to match.
>>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)]
>>> actual = tuple(expected)
>>> torch.testing.assert_close(actual, expected)
>>> # mapping to mapping comparison
>>> from collections import OrderedDict
>>> import numpy as np
>>> foo = torch.tensor(1.0)
>>> bar = 2.0
>>> baz = np.array(3.0)
>>> # The types and a possible ordering of mappings do not have to match. They only
>>> # have to have the same set of keys and their elements have to match.
>>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)])
>>> actual = {"baz": baz, "bar": bar, "foo": foo}
>>> torch.testing.assert_close(actual, expected)
>>> expected = torch.tensor([1.0, 2.0, 3.0])
>>> actual = expected.clone()
>>> # By default, directly related instances can be compared
>>> torch.testing.assert_close(torch.nn.Parameter(actual), expected)
>>> # This check can be made more strict with allow_subclasses=False
>>> torch.testing.assert_close(
... torch.nn.Parameter(actual), expected, allow_subclasses=False
... )
Traceback (most recent call last):
...
TypeError: No comparison pair was able to handle inputs of type
<class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>.
>>> # If the inputs are not directly related, they are never considered close
>>> torch.testing.assert_close(actual.numpy(), expected)
Traceback (most recent call last):
...
TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'>
and <class 'torch.Tensor'>.
>>> # Exceptions to these rules are Python scalars. They can be checked regardless of
>>> # their type if check_dtype=False.
>>> torch.testing.assert_close(1.0, 1, check_dtype=False)
>>> # NaN != NaN by default.
>>> expected = torch.tensor(float("Nan"))
>>> actual = expected.clone()
>>> torch.testing.assert_close(actual, expected)
Traceback (most recent call last):
...
AssertionError: Scalars are not close!
<BLANKLINE>
Expected nan but got nan.
Absolute difference: nan (up to 1e-05 allowed)
Relative difference: nan (up to 1.3e-06 allowed)
>>> torch.testing.assert_close(actual, expected, equal_nan=True)
>>> expected = torch.tensor([1.0, 2.0, 3.0])
>>> actual = torch.tensor([1.0, 4.0, 5.0])
>>> # The default error message can be overwritten.
>>> torch.testing.assert_close(
... actual, expected, msg="Argh, the tensors are not close!"
... )
Traceback (most recent call last):
...
AssertionError: Argh, the tensors are not close!
>>> # If msg is a callable, it can be used to augment the generated message with
>>> # extra information
>>> torch.testing.assert_close(
... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter"
... )
Traceback (most recent call last):
...
AssertionError: Header
<BLANKLINE>
Tensor-likes are not close!
<BLANKLINE>
Mismatched elements: 2 / 3 (66.7%)
Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed)
Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed)
<BLANKLINE>
Footer
"""
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
error_metas = not_close_error_metas(
actual,
expected,
pair_types=(
NonePair,
BooleanPair,
NumberPair,
TensorLikePair,
),
allow_subclasses=allow_subclasses,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
check_device=check_device,
check_dtype=check_dtype,
check_layout=check_layout,
check_stride=check_stride,
msg=msg,
)
if error_metas:
# TODO: compose all metas into one AssertionError
raise error_metas[0].to_error(msg)
@deprecated(
"`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
"Please use `torch.testing.assert_close()` instead. "
"You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
category=FutureWarning,
)
def assert_allclose(
actual: Any,
expected: Any,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = True,
msg: str = "",
) -> None:
"""
.. warning::
:func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release.
Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions
`here <https://github.com/pytorch/pytorch/issues/61844>`_.
"""
if not isinstance(actual, torch.Tensor):
actual = torch.tensor(actual)
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected, dtype=actual.dtype)
if rtol is None and atol is None:
rtol, atol = default_tolerances(
actual,
expected,
dtype_precisions={
torch.float16: (1e-3, 1e-3),
torch.float32: (1e-4, 1e-5),
torch.float64: (1e-5, 1e-8),
},
)
torch.testing.assert_close(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
check_device=True,
check_dtype=False,
check_stride=False,
msg=msg or None,
)
| TensorLikePair |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/redistribute_x/package.py | {
"start": 217,
"end": 503
} | class ____(Package):
version("1.3")
version("1.2")
version("1.1")
version("1.0")
variant("foo", default=False)
redistribute(binary=False, when="@1.1")
redistribute(binary=False, when="@1.0:1.2+foo")
redistribute(source=False, when="@1.0:1.2")
| RedistributeX |
python | PyCQA__pycodestyle | tests/test_blank_lines.py | {
"start": 1660,
"end": 2066
} | class ____:
def a():
pass
def b():
pass
""")
self.assertEqual([
'E301:6:5', # b() call
], result)
def test_method_less_blank_lines_comment(self):
"""
It will trigger an error when less than 1 blank lin is found
before method definition, ignoring comments.
"""
result = errors_from_src("""# First comment line.
| X |
python | urllib3__urllib3 | dummyserver/testcase.py | {
"start": 5497,
"end": 6236
} | class ____(SocketDummyServerTestCase):
@classmethod
def _start_server(
cls,
socket_handler: typing.Callable[[socket.socket], None],
quit_event: threading.Event | None = None,
) -> None:
ready_event = threading.Event()
cls.server_thread = SocketServerThread(
socket_handler=socket_handler,
ready_event=ready_event,
host=cls.host,
quit_event=quit_event,
)
cls.server_thread.USE_IPV6 = False
cls.server_thread.start()
ready_event.wait(5)
if not ready_event.is_set():
raise Exception("most likely failed to start server")
cls.port = cls.server_thread.port
| IPV4SocketDummyServerTestCase |
python | tensorflow__tensorflow | tensorflow/python/util/tf_decorator_test.py | {
"start": 2294,
"end": 2637
} | class ____(object):
"""Test Decorated Class."""
def __init__(self, two_attr=2):
self.two_attr = two_attr
@property
def two_prop(self):
return 2
def two_func(self):
return 2
@test_decorator_increment_first_int_arg
def return_params(self, a, b, c):
"""Return parameters."""
return [a, b, c]
| TestDecoratedClass |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 874685,
"end": 877133
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"content",
"created_at",
"creator",
"database_id",
"field_value_by_name",
"field_values",
"is_archived",
"project",
"type",
"updated_at",
)
content = sgqlc.types.Field("ProjectV2ItemContent", graphql_name="content")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field(Actor, graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
field_value_by_name = sgqlc.types.Field(
"ProjectV2ItemFieldValue",
graphql_name="fieldValueByName",
args=sgqlc.types.ArgDict(
(
(
"name",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="name", default=None
),
),
)
),
)
field_values = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemFieldValueConnection),
graphql_name="fieldValues",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ProjectV2ItemFieldValueOrder,
graphql_name="orderBy",
default={"field": "POSITION", "direction": "ASC"},
),
),
)
),
)
is_archived = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isArchived"
)
project = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2), graphql_name="project")
type = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemType), graphql_name="type"
)
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
| ProjectV2Item |
python | scrapy__scrapy | scrapy/pipelines/files.py | {
"start": 2621,
"end": 4350
} | class ____:
def __init__(self, basedir: str | PathLike[str]):
basedir = _to_string(basedir)
if "://" in basedir:
basedir = basedir.split("://", 1)[1]
self.basedir: str = basedir
self._mkdir(Path(self.basedir))
self.created_directories: defaultdict[MediaPipeline.SpiderInfo, set[str]] = (
defaultdict(set)
)
def persist_file(
self,
path: str | PathLike[str],
buf: BytesIO,
info: MediaPipeline.SpiderInfo,
meta: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
) -> None:
absolute_path = self._get_filesystem_path(path)
self._mkdir(absolute_path.parent, info)
absolute_path.write_bytes(buf.getvalue())
def stat_file(
self, path: str | PathLike[str], info: MediaPipeline.SpiderInfo
) -> StatInfo:
absolute_path = self._get_filesystem_path(path)
try:
last_modified = absolute_path.stat().st_mtime
except OSError:
return {}
with absolute_path.open("rb") as f:
checksum = _md5sum(f)
return {"last_modified": last_modified, "checksum": checksum}
def _get_filesystem_path(self, path: str | PathLike[str]) -> Path:
path_comps = _to_string(path).split("/")
return Path(self.basedir, *path_comps)
def _mkdir(
self, dirname: Path, domain: MediaPipeline.SpiderInfo | None = None
) -> None:
seen: set[str] = self.created_directories[domain] if domain else set()
if str(dirname) not in seen:
if not dirname.exists():
dirname.mkdir(parents=True)
seen.add(str(dirname))
| FSFilesStore |
python | tensorflow__tensorflow | tensorflow/tools/docs/fenced_doctest_lib.py | {
"start": 2168,
"end": 2580
} | class ____(tf_doctest_lib.TfDoctestOutputChecker):
"""TfDoctestChecker with a different warning message."""
MESSAGE = textwrap.dedent("""\n
##############################################################
# Check the documentation (go/g3doctest) on how to write
# testable g3docs.
##############################################################
""")
| FencedCellOutputChecker |
python | django__django | tests/prefetch_related/models.py | {
"start": 6607,
"end": 7199
} | class ____(models.Model):
name = models.CharField(max_length=50)
houses = models.ManyToManyField(House, related_name="occupants")
@property
def primary_house(self):
# Assume business logic forces every person to have at least one house.
return sorted(self.houses.all(), key=lambda house: -house.rooms.count())[0]
@property
def all_houses(self):
return list(self.houses.all())
@cached_property
def cached_all_houses(self):
return self.all_houses
class Meta:
ordering = ["id"]
# Models for nullable FK tests
| Person |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_test.py | {
"start": 36092,
"end": 44086
} | class ____(unittest.TestCase):
"""Tests for CopyTo functions of Descriptor."""
def _AssertProtoEqual(self, actual_proto, expected_class, expected_ascii):
expected_proto = expected_class()
text_format.Merge(expected_ascii, expected_proto)
self.assertEqual(
actual_proto, expected_proto,
'Not equal,\nActual:\n%s\nExpected:\n%s\n'
% (str(actual_proto), str(expected_proto)))
def _InternalTestCopyToProto(self, desc, expected_proto_class,
expected_proto_ascii):
actual = expected_proto_class()
desc.CopyToProto(actual)
self._AssertProtoEqual(
actual, expected_proto_class, expected_proto_ascii)
def testCopyToProto_EmptyMessage(self):
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII)
def testCopyToProto_NestedMessage(self):
TEST_NESTED_MESSAGE_ASCII = """
name: 'NestedMessage'
field: <
name: 'bb'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_NESTED_MESSAGE_ASCII)
def testCopyToProto_ForeignNestedMessage(self):
TEST_FOREIGN_NESTED_ASCII = """
name: 'TestForeignNested'
field: <
name: 'foreign_nested'
number: 1
label: 1 # Optional
type: 11 # TYPE_MESSAGE
type_name: '.proto2_unittest.TestAllTypes.NestedMessage'
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestForeignNested.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_FOREIGN_NESTED_ASCII)
def testCopyToProto_ForeignEnum(self):
TEST_FOREIGN_ENUM_ASCII = """
name: 'ForeignEnum'
value: <
name: 'FOREIGN_FOO'
number: 4
>
value: <
name: 'FOREIGN_BAR'
number: 5
>
value: <
name: 'FOREIGN_BAZ'
number: 6
>
value: <
name: 'FOREIGN_BAX'
number: 32
>
value: <
name: 'FOREIGN_LARGE'
number: 123456
>
"""
self._InternalTestCopyToProto(
unittest_pb2.ForeignEnum.DESCRIPTOR,
descriptor_pb2.EnumDescriptorProto,
TEST_FOREIGN_ENUM_ASCII)
def testCopyToProto_Options(self):
TEST_DEPRECATED_FIELDS_ASCII = """
name: 'TestDeprecatedFields'
field: <
name: 'deprecated_int32'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
options: <
deprecated: true
>
>
field: {
name: 'deprecated_repeated_string'
number: 4
label: LABEL_REPEATED
type: TYPE_STRING
options: {
deprecated: true
}
}
field {
name: "deprecated_message"
number: 3
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".proto2_unittest.TestAllTypes.NestedMessage"
options {
deprecated: true
}
}
field {
name: "deprecated_int32_in_oneof"
number: 2
label: LABEL_OPTIONAL
type: TYPE_INT32
options {
deprecated: true
}
oneof_index: 0
}
field {
name: "nested"
number: 5
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".proto2_unittest.TestDeprecatedFields"
}
oneof_decl {
name: "oneof_fields"
}
"""
self._InternalTestCopyToProto(
unittest_pb2.TestDeprecatedFields.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_DEPRECATED_FIELDS_ASCII)
def testCopyToProto_AllExtensions(self):
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII = """
name: 'TestEmptyMessageWithExtensions'
extension_range: <
start: 1
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessageWithExtensions.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII)
def testCopyToProto_SeveralExtensions(self):
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII = """
name: 'TestMultipleExtensionRanges'
extension_range: <
start: 42
end: 43
>
extension_range: <
start: 4143
end: 4244
>
extension_range: <
start: 65536
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII)
def testCopyToProto_FileDescriptor(self):
UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII = ("""
name: 'google/protobuf/unittest_import.proto'
package: 'proto2_unittest_import'
dependency: 'google/protobuf/unittest_import_public.proto'
message_type: <
name: 'ImportMessage'
field: <
name: 'd'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
>
>
""" +
"""enum_type: <
name: 'ImportEnum'
value: <
name: 'IMPORT_FOO'
number: 7
>
value: <
name: 'IMPORT_BAR'
number: 8
>
value: <
name: 'IMPORT_BAZ'
number: 9
>
>
enum_type: <
name: 'ImportEnumForMap'
value: <
name: 'UNKNOWN'
number: 0
>
value: <
name: 'FOO'
number: 1
>
value: <
name: 'BAR'
number: 2
>
>
options: <
java_package: 'com.google.protobuf.test'
optimize_for: 1 # SPEED
""" +
"""
cc_enable_arenas: true
>
public_dependency: 0
""")
self._InternalTestCopyToProto(
unittest_import_pb2.DESCRIPTOR,
descriptor_pb2.FileDescriptorProto,
UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII)
def testCopyToProto_ServiceDescriptor(self):
TEST_SERVICE_ASCII = """
name: 'TestService'
method: <
name: 'Foo'
input_type: '.proto2_unittest.FooRequest'
output_type: '.proto2_unittest.FooResponse'
>
method: <
name: 'Bar'
input_type: '.proto2_unittest.BarRequest'
output_type: '.proto2_unittest.BarResponse'
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestService.DESCRIPTOR,
descriptor_pb2.ServiceDescriptorProto,
TEST_SERVICE_ASCII)
def testCopyToProto_MethodDescriptor(self):
expected_ascii = """
name: 'Foo'
input_type: '.proto2_unittest.FooRequest'
output_type: '.proto2_unittest.FooResponse'
"""
method_descriptor = unittest_pb2.TestService.DESCRIPTOR.FindMethodByName(
'Foo')
self._InternalTestCopyToProto(
method_descriptor,
descriptor_pb2.MethodDescriptorProto,
expected_ascii)
@unittest.skipIf(
api_implementation.Type() == 'python',
'Pure python does not raise error.')
# TODO: Fix pure python to check with the proto type.
def testCopyToProto_TypeError(self):
file_proto = descriptor_pb2.FileDescriptorProto()
self.assertRaises(TypeError,
unittest_pb2.TestEmptyMessage.DESCRIPTOR.CopyToProto,
file_proto)
self.assertRaises(TypeError,
unittest_pb2.ForeignEnum.DESCRIPTOR.CopyToProto,
file_proto)
self.assertRaises(TypeError,
unittest_pb2.TestService.DESCRIPTOR.CopyToProto,
file_proto)
proto = descriptor_pb2.DescriptorProto()
self.assertRaises(TypeError,
unittest_import_pb2.DESCRIPTOR.CopyToProto,
proto)
| DescriptorCopyToProtoTest |
python | walkccc__LeetCode | solutions/1739. Building Boxes/1739.py | {
"start": 0,
"end": 756
} | class ____:
def minimumBoxes(self, n: int) -> int:
nBoxes = 0
nextTouchings = 0 # j
currLevelBoxes = 0 # 1 + 2 + ... + j
# Find the minimum j s.t. `nBoxes` = 1 + (1 + 2) + ... + (1 + 2 + ... + j)
# >= n
while nBoxes < n:
nextTouchings += 1
currLevelBoxes += nextTouchings
nBoxes += currLevelBoxes
# If nBoxes = n, the answer is `currLevelBoxes` = 1 + 2 + ... + j.
if nBoxes == n:
return currLevelBoxes
# Otherwise, need to remove the boxes in the current level and rebuild it.
nBoxes -= currLevelBoxes
currLevelBoxes -= nextTouchings
nextTouchings = 0
while nBoxes < n:
nextTouchings += 1
nBoxes += nextTouchings
return currLevelBoxes + nextTouchings
| Solution |
python | gevent__gevent | src/gevent/tests/test__server_pywsgi.py | {
"start": 2480,
"end": 2544
} | class ____(test__server.TestCase):
Settings = Settings
| TestCase |
python | graphql-python__graphene | graphene/types/tests/test_definition.py | {
"start": 912,
"end": 977
} | class ____(ObjectType):
write_article = Field(Article)
| Mutation |
python | Netflix__metaflow | test/unit/configs/test_config_naming.py | {
"start": 285,
"end": 1638
} | class ____:
"""Test Config parameter names with underscores and dashes."""
def test_flow_completes(self, config_naming_run):
"""Test that the flow completes successfully."""
assert config_naming_run.successful
assert config_naming_run.finished
def test_config_with_underscore(self, config_naming_run):
"""Test Config with underscore in name."""
end_task = config_naming_run["end"].task
assert end_task["underscore_test"].data == "underscore"
assert end_task["underscore_value"].data == 42
assert end_task["underscore_dict"].data == {"test": "underscore", "value": 42}
def test_config_with_dash(self, config_naming_run):
"""Test Config with dash in name."""
end_task = config_naming_run["end"].task
assert end_task["dash_test"].data == "dash"
assert end_task["dash_value"].data == 99
assert end_task["dash_dict"].data == {"test": "dash", "value": 99}
def test_config_with_mixed_naming(self, config_naming_run):
"""Test Config with both underscores and dashes in name."""
end_task = config_naming_run["end"].task
assert end_task["mixed_test"].data == "mixed"
assert end_task["mixed_value"].data == 123
assert end_task["mixed_dict"].data == {"test": "mixed", "value": 123}
| TestConfigNaming |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 92119,
"end": 92192
} | class ____(Binop):
operation = operator.or_
_operator_repr = "|"
| Or |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-books-you-can-take.py | {
"start": 42,
"end": 674
} | class ____(object):
def maximumBooks(self, books):
"""
:type books: List[int]
:rtype: int
"""
def count(right, l):
left = max(right-l+1, 0)
return (left+right)*(right-left+1)//2
result = curr = 0
stk = [-1]
for i in xrange(len(books)):
while stk[-1] != -1 and books[stk[-1]] >= books[i]-(i-stk[-1]):
j = stk.pop()
curr -= count(books[j], j-stk[-1])
curr += count(books[i], i-stk[-1])
stk.append(i)
result = max(result, curr)
return result
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/proto_json_test.py | {
"start": 455,
"end": 1046
} | class ____(unittest.TestCase):
def test_simple_serialize(self):
message = json_format_proto3_pb2.TestMessage()
message.int32_value = 12345
expected = {'int32Value': 12345}
self.assertEqual(expected, proto_json.serialize(message))
def test_simple_parse(self):
expected = 12345
js_dict = {'int32Value': expected}
message = proto_json.parse(json_format_proto3_pb2.TestMessage,
js_dict)
self.assertEqual(expected, message.int32_value) # pytype: disable=attribute-error
if __name__ == "__main__":
unittest.main()
| ProtoJsonTest |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/base.py | {
"start": 842,
"end": 3593
} | class ____(ABC, Generic[_ColumnTypes]):
name: Optional[str] = None
table_name: Optional[str] = None # Overrides random table name generation
column_types: Optional[Mapping[str, _ColumnTypes]] = None
extra_column_types: Mapping[str, Mapping[str, _ColumnTypes]] = field(default_factory=dict)
@property
@abstractmethod
def label(self) -> str:
"""Label that will show up in test name."""
...
@property
@abstractmethod
def pytest_mark(self) -> pytest.MarkDecorator:
"""Mark for pytest"""
...
@abstractmethod
def create_batch_setup(
self,
request: FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
# This violates the interface segration principle (the I in SOLID) since we now make
# non-SQL datasources rely on an argument that only SQL datasources are need.
# However, this is simpler than adding an additional layer to decouple this interface.
# If the SQL and non-SQL test interfaces diverge more significantly we should consider
# refactoring these tests.
# One possible fix is to remove this method from this class and create a sql and
# non-sql subclass. We'd like need to update _ConfigT to be bounded by a union of
# these subclasses and update callers of create_batch_setup.
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
"""Create a batch setup object for this data source."""
@property
def test_id(self) -> str:
parts: list[Optional[str]] = [self.label, self.name]
non_null_parts = [p for p in parts if p is not None]
return "-".join(non_null_parts)
@override
def __eq__(self, value: object) -> bool:
if not isinstance(value, DataSourceTestConfig):
return False
return all(
[
super().__eq__(value),
self.label == value.label,
self.pytest_mark == value.pytest_mark,
]
)
@override
def __hash__(self) -> int:
hashable_col_types = dict_to_tuple(self.column_types) if self.column_types else None
hashable_extra_col_types = dict_to_tuple(
{k: dict_to_tuple(self.extra_column_types[k]) for k in sorted(self.extra_column_types)}
)
return hash(
(
self.__class__.name,
self.test_id,
hashable_col_types,
hashable_extra_col_types,
)
)
_ConfigT = TypeVar("_ConfigT", bound=DataSourceTestConfig)
_AssetT = TypeVar("_AssetT", bound=DataAsset)
| DataSourceTestConfig |
python | getsentry__sentry | tests/sentry/testutils/helpers/test_features.py | {
"start": 7805,
"end": 11437
} | class ____(TestCase):
"""Test nested overrides when the class itself has a feature decorator."""
def setUp(self) -> None:
self.org = self.create_organization()
def test_class_decorator_baseline(self) -> None:
"""Verify the class decorator is working."""
assert features.has("organizations:session-replay", self.org)
def test_context_manager_override_class_decorator(self) -> None:
"""Test that context managers can override class-level decorators."""
# Class decorator enables the feature
assert features.has("organizations:session-replay", self.org)
# Context manager overrides to disable
with self.feature({"organizations:session-replay": False}):
assert not features.has("organizations:session-replay", self.org)
# Nested context enables it again
with self.feature("organizations:session-replay"):
assert features.has("organizations:session-replay", self.org)
# Back to first override
assert not features.has("organizations:session-replay", self.org)
# Back to class decorator state
assert features.has("organizations:session-replay", self.org)
@with_feature("organizations:codecov-integration")
def test_method_and_class_decorators_with_context_override(self) -> None:
"""Test interaction of class decorator + method decorator + context manager."""
# Both class and method decorators should be active
assert features.has("organizations:session-replay", self.org) # From class
assert features.has("organizations:codecov-integration", self.org) # From method
# Override both with context manager
with self.feature(
{"organizations:session-replay": False, "organizations:codecov-integration": False}
):
assert not features.has("organizations:session-replay", self.org)
assert not features.has("organizations:codecov-integration", self.org)
# Back to decorator states
assert features.has("organizations:session-replay", self.org)
assert features.has("organizations:codecov-integration", self.org)
def test_deeply_nested_context_managers(self) -> None:
"""Test deeply nested context managers with alternating states."""
# Start with class decorator enabled
assert features.has("organizations:session-replay", self.org)
with self.feature({"organizations:session-replay": False}): # Level 1: Disabled
assert not features.has("organizations:session-replay", self.org)
with self.feature("organizations:session-replay"): # Level 2: Enabled
assert features.has("organizations:session-replay", self.org)
with self.feature({"organizations:session-replay": False}): # Level 3: Disabled
assert not features.has("organizations:session-replay", self.org)
with self.feature("organizations:session-replay"): # Level 4: Enabled
assert features.has("organizations:session-replay", self.org)
# Back to level 3
assert not features.has("organizations:session-replay", self.org)
# Back to level 2
assert features.has("organizations:session-replay", self.org)
# Back to level 1
assert not features.has("organizations:session-replay", self.org)
# Back to class decorator
assert features.has("organizations:session-replay", self.org)
| TestClassDecoratorWithNestedOverrides |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/dataframes.py | {
"start": 8054,
"end": 8358
} | class ____(CaptureF):
def __str__(self) -> str:
variable = self.kwargs["variable"]
value = self.kwargs["value"]
return f"{variable} = {value}"
def execute(self) -> None:
self.kwargs["variable"].calculated_value = self.kwargs["value"].execute()
| CaptureVariableAssign |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_python.py | {
"start": 85683,
"end": 86351
} | class ____(BaseOperator):
def execute(self, context: Context):
assert context == get_current_context()
def get_all_the_context(**context):
current_context = get_current_context()
with warnings.catch_warnings():
if AIRFLOW_V_3_0_PLUS:
assert context == current_context
else:
from airflow.utils.context import AirflowContextDeprecationWarning
warnings.simplefilter("ignore", AirflowContextDeprecationWarning)
assert current_context._context
@pytest.fixture
def clear_db():
clear_db_runs()
yield
clear_db_runs()
@pytest.mark.usefixtures("clear_db")
| MyContextAssertOperator |
python | numba__numba | numba/core/errors.py | {
"start": 2434,
"end": 2569
} | class ____(NumbaPedanticWarning):
"""
Warning category for reporting an IR assumption violation.
"""
| NumbaIRAssumptionWarning |
python | mlflow__mlflow | examples/mlflow-3/langchain_example.py | {
"start": 3443,
"end": 7567
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 10)
self.fc3 = nn.Linear(10, 3)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.dropout(x, 0.2)
x = self.fc3(x)
return x
model = IrisClassifier()
model = model.to(device)
scripted_model = torch.jit.script(model) # scripting the model
# Start a run to represent the training job
with mlflow.start_run():
# Load the training dataset with MLflow. We will link training metrics to this dataset.
train_dataset: mlflow.data.pandas_dataset.PandasDataset = mlflow.data.from_pandas(
train, name="train_dataset"
)
X_train, y_train = prepare_data(train_dataset.df)
# Log training job parameters
mlflow.log_param("num_gpus", 1)
mlflow.log_param("optimizer", "adam")
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(scripted_model.parameters(), lr=0.01)
for epoch in range(100):
out = scripted_model(X_train)
loss = criterion(out, y_train).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 10 == 0:
# Log a checkpoint with metrics every 10 epochs
mlflow.log_metric(
"accuracy",
compute_accuracy(scripted_model, X_train, y_train),
step=epoch,
dataset=train_dataset,
)
mlflow.pytorch.log_model(
pytorch_model=scripted_model,
name="torch-iris",
# "hyperparams=?"
# Feedback: No need for this, just inherit from the run params!
params={
# Log model parameters
"n_layers": 3,
},
# Specify the epoch at which the model was logged
step=epoch,
# Specify the training dataset with which the metric is associated
dataset=train_dataset,
# Feedback: Should support checkpoint TTL, automatically purge checkpoints with lower performance
# Feedback: Checkpointing for stability (checkpoint every Y mins) vs performance (checkpoint per X epochs + evals)
)
ranked_checkpoints = mlflow.search_logged_models(
filter_string="params.n_layers = '3' AND metrics.accuracy > 0",
order_by=["metrics.accuracy DESC"],
output_format="list",
)
worst_checkpoint = ranked_checkpoints[-1]
print("WORST CHECKPOINT", worst_checkpoint)
print("\n")
best_checkpoint = ranked_checkpoints[0]
print("BEST CHECKPOINT", best_checkpoint)
# Feedback: Consider renaming `Model` to `Checkpoint`
# perhaps some field on the Model indicating whether its a checkpoint so that we can limit the # of checkpoints
# displayed in the UI by default (e.g. only show the best or most recent ones), automatically TTL the checkpoints,
# would be quite nice
# Start a run to represent the test dataset evaluation job
with mlflow.start_run() as evaluation_run:
# Load the test dataset with MLflow. We will link test metrics to this dataset.
test_dataset: mlflow.data.pandas_dataset.PandasDataset = mlflow.data.from_pandas(
test, name="test_dataset"
)
X_test, y_test = prepare_data(test_dataset.df)
# Load the best checkpoint
model = mlflow.pytorch.load_model(f"models:/{best_checkpoint.model_id}")
model = model.to(device)
scripted_model = torch.jit.script(model)
# Evaluate the model on the test dataset and log metrics to MLflow
mlflow.log_metric(
"accuracy",
compute_accuracy(scripted_model, X_test, y_test),
# Specify the ID of the checkpoint to which to link the metrics
model_id=best_checkpoint.model_id,
# Specify the test dataset with which the metric is associated
dataset=test_dataset,
)
mlflow.get_logged_model(best_checkpoint.model_id)
print([m.to_dictionary() for m in mlflow.get_logged_model(best_checkpoint.model_id).metrics])
| IrisClassifier |
python | walkccc__LeetCode | solutions/269. Alien Dictionary/269.py | {
"start": 0,
"end": 1451
} | class ____:
def alienOrder(self, words: list[str]) -> str:
graph = {}
inDegrees = [0] * 26
self._buildGraph(graph, words, inDegrees)
return self._topology(graph, inDegrees)
def _buildGraph(
self,
graph: dict[str, set[str]],
words: list[str],
inDegrees: list[int],
) -> None:
# Create a node for each character in each word.
for word in words:
for c in word:
if c not in graph:
graph[c] = set()
for first, second in zip(words, words[1:]):
length = min(len(first), len(second))
for j in range(length):
u = first[j]
v = second[j]
if u != v:
if v not in graph[u]:
graph[u].add(v)
inDegrees[ord(v) - ord('a')] += 1
break # The order of characters after this are meaningless.
# First = 'ab', second = 'a' . invalid
if j == length - 1 and len(first) > len(second):
graph.clear()
return
def _topology(self, graph: dict[str, set[str]], inDegrees: list[int]) -> str:
s = ''
q = collections.deque()
for c in graph:
if inDegrees[ord(c) - ord('a')] == 0:
q.append(c)
while q:
u = q.pop()
s += u
for v in graph[u]:
inDegrees[ord(v) - ord('a')] -= 1
if inDegrees[ord(v) - ord('a')] == 0:
q.append(v)
# Words = ['z', 'x', 'y', 'x']
return s if len(s) == len(graph) else ''
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1420913,
"end": 1421102
} | class ____(VegaLiteSchema):
"""TitleFrame schema wrapper."""
_schema = {"$ref": "#/definitions/TitleFrame"}
def __init__(self, *args):
super().__init__(*args)
| TitleFrame |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 32202,
"end": 32431
} | class ____(PrefectBaseModel):
"""Filter by `ArtifactCollection.latest_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of artifact ids to include"
)
| ArtifactCollectionFilterLatestId |
python | PyCQA__pylint | doc/data/messages/i/invalid-metaclass/bad.py | {
"start": 0,
"end": 60
} | class ____(metaclass=int): # [invalid-metaclass]
pass
| Apple |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py | {
"start": 245,
"end": 656
} | class ____(TypedDict, total=False):
type: Required[Literal["computer_screenshot"]]
"""Specifies the event type.
For a computer screenshot, this property is always set to `computer_screenshot`.
"""
file_id: str
"""The identifier of an uploaded file that contains the screenshot."""
image_url: str
"""The URL of the screenshot image."""
| ResponseComputerToolCallOutputScreenshotParam |
python | scikit-learn__scikit-learn | sklearn/linear_model/_linear_loss.py | {
"start": 1312,
"end": 34236
} | class ____:
"""General class for loss functions with raw_prediction = X @ coef + intercept.
Note that raw_prediction is also known as linear predictor.
The loss is the average of per sample losses and includes a term for L2
regularization::
loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept)
+ 1/2 * l2_reg_strength * ||coef||_2^2
with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i.
Gradient and hessian, for simplicity without intercept, are::
gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef
hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X
+ l2_reg_strength * identity
Conventions:
if fit_intercept:
n_dof = n_features + 1
else:
n_dof = n_features
if base_loss.is_multiclass:
coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,)
else:
coef.shape = (n_dof,)
The intercept term is at the end of the coef array:
if base_loss.is_multiclass:
if coef.shape (n_classes, n_dof):
intercept = coef[:, -1]
if coef.shape (n_classes * n_dof,)
intercept = coef[n_classes * n_features:] = coef[(n_dof-1):]
intercept.shape = (n_classes,)
else:
intercept = coef[-1]
Shape of gradient follows shape of coef.
gradient.shape = coef.shape
But hessian (to make our lives simpler) are always 2-d:
if base_loss.is_multiclass:
hessian.shape = (n_classes * n_dof, n_classes * n_dof)
else:
hessian.shape = (n_dof, n_dof)
Note: if coef has shape (n_classes * n_dof,), the classes are expected to be
contiguous, i.e. the 2d-array can be reconstructed as
coef.reshape((n_classes, -1), order="F")
The option order="F" makes coef[:, i] contiguous. This, in turn, makes the
coefficients without intercept, coef[:, :-1], contiguous and speeds up
matrix-vector computations.
Note: If the average loss per sample is wanted instead of the sum of the loss per
sample, one can simply use a rescaled sample_weight such that
sum(sample_weight) = 1.
Parameters
----------
base_loss : instance of class BaseLoss from sklearn._loss.
fit_intercept : bool
"""
def __init__(self, base_loss, fit_intercept):
self.base_loss = base_loss
self.fit_intercept = fit_intercept
def init_zero_coef(self, X, dtype=None):
"""Allocate coef of correct shape with zeros.
Parameters:
-----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
dtype : data-type, default=None
Overrides the data type of coef. With dtype=None, coef will have the same
dtype as X.
Returns
-------
coef : ndarray of shape (n_dof,) or (n_classes, n_dof)
Coefficients of a linear model.
"""
n_features = X.shape[1]
n_classes = self.base_loss.n_classes
if self.fit_intercept:
n_dof = n_features + 1
else:
n_dof = n_features
if self.base_loss.is_multiclass:
coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F")
else:
coef = np.zeros_like(X, shape=n_dof, dtype=dtype)
return coef
def weight_intercept(self, coef):
"""Helper function to get coefficients and intercept.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
Returns
-------
weights : ndarray of shape (n_features,) or (n_classes, n_features)
Coefficients without intercept term.
intercept : float or ndarray of shape (n_classes,)
Intercept terms.
"""
if not self.base_loss.is_multiclass:
if self.fit_intercept:
intercept = coef[-1]
weights = coef[:-1]
else:
intercept = 0.0
weights = coef
else:
# reshape to (n_classes, n_dof)
if coef.ndim == 1:
weights = coef.reshape((self.base_loss.n_classes, -1), order="F")
else:
weights = coef
if self.fit_intercept:
intercept = weights[:, -1]
weights = weights[:, :-1]
else:
intercept = 0.0
return weights, intercept
def weight_intercept_raw(self, coef, X):
"""Helper function to get coefficients, intercept and raw_prediction.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Returns
-------
weights : ndarray of shape (n_features,) or (n_classes, n_features)
Coefficients without intercept term.
intercept : float or ndarray of shape (n_classes,)
Intercept terms.
raw_prediction : ndarray of shape (n_samples,) or \
(n_samples, n_classes)
"""
weights, intercept = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
# weights has shape (n_classes, n_dof)
raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous
return weights, intercept, raw_prediction
def l2_penalty(self, weights, l2_reg_strength):
"""Compute L2 penalty term l2_reg_strength/2 *||w||_2^2."""
norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights)
return 0.5 * l2_reg_strength * norm2_w
def loss(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Compute the loss as weighted average over point-wise losses.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
"""
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
loss = self.base_loss.loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=None,
n_threads=n_threads,
)
loss = np.average(loss, weights=sample_weight)
return loss + self.l2_penalty(weights, l2_reg_strength)
def loss_gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the sum of loss and gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
loss, grad_pointwise = self.base_loss.loss_gradient(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
loss = loss.sum() / sw_sum
loss += self.l2_penalty(weights, l2_reg_strength)
grad_pointwise /= sw_sum
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
# grad_pointwise.shape = (n_samples, n_classes)
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
grad = grad.ravel(order="F")
return loss, grad
def gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
grad_pointwise = self.base_loss.gradient(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
grad_pointwise /= sw_sum
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
return grad
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
# gradient.shape = (n_samples, n_classes)
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
return grad.ravel(order="F")
else:
return grad
def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray of shape (n_dof, n_dof) or \
(n_classes * n_dof, n_classes * n_dof)
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray of shape (n_dof, n_dof) or \
(n_classes, n_dof, n_dof, n_classes)
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than 25% of its elements non-positive.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
# Allocate gradient.
if gradient_out is None:
grad = np.empty_like(coef, dtype=weights.dtype, order="F")
elif gradient_out.shape != coef.shape:
raise ValueError(
f"gradient_out is required to have shape coef.shape = {coef.shape}; "
f"got {gradient_out.shape}."
)
elif self.base_loss.is_multiclass and not gradient_out.flags.f_contiguous:
raise ValueError("gradient_out must be F-contiguous.")
else:
grad = gradient_out
# Allocate hessian.
n = coef.size # for multinomial this equals n_dof * n_classes
if hessian_out is None:
hess = np.empty((n, n), dtype=weights.dtype)
elif hessian_out.shape != (n, n):
raise ValueError(
f"hessian_out is required to have shape ({n, n}); got "
f"{hessian_out.shape=}."
)
elif self.base_loss.is_multiclass and (
not hessian_out.flags.c_contiguous and not hessian_out.flags.f_contiguous
):
raise ValueError("hessian_out must be contiguous.")
else:
hess = hessian_out
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
# For non-canonical link functions and far away from the optimum, the
# pointwise hessian can be negative. We take care that 75% of the hessian
# entries are positive.
hessian_warning = (
np.average(hess_pointwise <= 0, weights=sample_weight) > 0.25
)
hess_pointwise = np.abs(hess_pointwise)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
if hessian_warning:
# Exit early without computing the hessian.
return grad, hess, hessian_warning
hess[:n_features, :n_features] = sandwich_dot(X, hess_pointwise)
if l2_reg_strength > 0:
# The L2 penalty enters the Hessian on the diagonal only. To add those
# terms, we use a flattened view of the array.
order = "C" if hess.flags.c_contiguous else "F"
hess.reshape(-1, order=order)[: (n_features * n_dof) : (n_dof + 1)] += (
l2_reg_strength
)
if self.fit_intercept:
# With intercept included as added column to X, the hessian becomes
# hess = (X, 1)' @ diag(h) @ (X, 1)
# = (X' @ diag(h) @ X, X' @ h)
# ( h @ X, sum(h))
# The left upper part has already been filled, it remains to compute
# the last row and the last column.
Xh = X.T @ hess_pointwise
hess[:-1, -1] = Xh
hess[-1, :-1] = Xh
hess[-1, -1] = hess_pointwise.sum()
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the full hessian. Therefore, we
# call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = grad.reshape((n_classes, n_dof), order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
grad = grad.ravel(order="F")
# The full hessian matrix, i.e. not only the diagonal part, dropping most
# indices, is given by:
#
# hess = X' @ h @ X
#
# Here, h is a priori a 4-dimensional matrix of shape
# (n_samples, n_samples, n_classes, n_classes). It is diagonal its first
# two dimensions (the ones with n_samples), i.e. it is
# effectively a 3-dimensional matrix (n_samples, n_classes, n_classes).
#
# h = diag(p) - p' p
#
# or with indices k and l for classes
#
# h_kl = p_k * delta_kl - p_k * p_l
#
# with p_k the (predicted) probability for class k. Only the dimension in
# n_samples multiplies with X.
# For 3 classes and n_samples = 1, this looks like ("@" is a bit misused
# here):
#
# hess = X' @ (h00 h10 h20) @ X
# (h10 h11 h12)
# (h20 h12 h22)
# = (X' @ diag(h00) @ X, X' @ diag(h10), X' @ diag(h20))
# (X' @ diag(h10) @ X, X' @ diag(h11), X' @ diag(h12))
# (X' @ diag(h20) @ X, X' @ diag(h12), X' @ diag(h22))
#
# Now coef of shape (n_classes * n_dof) is contiguous in n_classes.
# Therefore, we want the hessian to follow this convention, too, i.e.
# hess[:n_classes, :n_classes] = (x0' @ h00 @ x0, x0' @ h10 @ x0, ..)
# (x0' @ h10 @ x0, x0' @ h11 @ x0, ..)
# (x0' @ h20 @ x0, x0' @ h12 @ x0, ..)
# is the first feature, x0, for all classes. In our implementation, we
# still want to take advantage of BLAS "X.T @ X". Therefore, we have some
# index/slicing battle to fight.
if sample_weight is not None:
sw = sample_weight / sw_sum
else:
sw = 1.0 / sw_sum
for k in range(n_classes):
# Diagonal terms (in classes) hess_kk.
# Note that this also writes to some of the lower triangular part.
h = proba[:, k] * (1 - proba[:, k]) * sw
hess[
k : n_classes * n_features : n_classes,
k : n_classes * n_features : n_classes,
] = sandwich_dot(X, h)
if self.fit_intercept:
# See above in the non multiclass case.
Xh = X.T @ h
hess[
k : n_classes * n_features : n_classes,
n_classes * n_features + k,
] = Xh
hess[
n_classes * n_features + k,
k : n_classes * n_features : n_classes,
] = Xh
hess[n_classes * n_features + k, n_classes * n_features + k] = (
h.sum()
)
# Off diagonal terms (in classes) hess_kl.
for l in range(k + 1, n_classes):
# Upper triangle (in classes).
h = -proba[:, k] * proba[:, l] * sw
hess[
k : n_classes * n_features : n_classes,
l : n_classes * n_features : n_classes,
] = sandwich_dot(X, h)
if self.fit_intercept:
Xh = X.T @ h
hess[
k : n_classes * n_features : n_classes,
n_classes * n_features + l,
] = Xh
hess[
n_classes * n_features + k,
l : n_classes * n_features : n_classes,
] = Xh
hess[n_classes * n_features + k, n_classes * n_features + l] = (
h.sum()
)
# Fill lower triangle (in classes).
hess[l::n_classes, k::n_classes] = hess[k::n_classes, l::n_classes]
if l2_reg_strength > 0:
# See above in the non multiclass case.
order = "C" if hess.flags.c_contiguous else "F"
hess.reshape(-1, order=order)[
: (n_classes**2 * n_features * n_dof) : (n_classes * n_dof + 1)
] += l2_reg_strength
# The pointwise hessian is always non-negative for the multinomial loss.
hessian_warning = False
return grad, hess, hessian_warning
def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# Precompute as much as possible: hX, hX_sum and hessian_sum
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = (
sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples))
@ X
)
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
# Calculate the double derivative with respect to intercept.
# Note: In case hX is sparse, hX.sum is a matrix object.
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
# prevent squeezing to zero-dim array if n_features == 1
hX_sum = np.atleast_1d(hX_sum)
# With intercept included and l2_reg_strength = 0, hessp returns
# res = (X, 1)' @ diag(h) @ (X, 1) @ s
# = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1])
# res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1]
# res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1]
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the matrix-vector product of the
# full hessian. Therefore, we call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
# Full hessian-vector product, i.e. not only the diagonal part of the
# hessian. Derivation with some index battle for input vector s:
# - sample index i
# - feature indices j, m
# - class indices k, l
# - 1_{k=l} is one if k=l else 0
# - p_i_k is the (predicted) probability that sample i belongs to class k
# for all i: sum_k p_i_k = 1
# - s_l_m is input vector for class l and feature m
# - X' = X transposed
#
# Note: Hessian with dropping most indices is just:
# X' @ p_k (1(k=l) - p_l) @ X
#
# result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m
# = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l)
# * X_{im} s_l_m
# = sum_{i, m} (X')_{ji} * p_i_k
# * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m)
#
# See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411
def hessp(s):
s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof)
if self.fit_intercept:
s_intercept = s[:, -1]
s = s[:, :-1] # shape = (n_classes, n_features)
else:
s_intercept = 0
tmp = X @ s.T + s_intercept # X_{im} * s_k_m
tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l ..
tmp *= proba # * p_i_k
if sample_weight is not None:
tmp *= sample_weight[:, np.newaxis]
# hess_prod = empty_like(grad), but we ravel grad below and this
# function is run after that.
hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s
if self.fit_intercept:
hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum
if coef.ndim == 1:
return hess_prod.ravel(order="F")
else:
return hess_prod
if coef.ndim == 1:
return grad.ravel(order="F"), hessp
return grad, hessp
| LinearModelLoss |
python | ethereum__web3.py | web3/_utils/module_testing/utils.py | {
"start": 584,
"end": 13548
} | class ____:
"""
Context manager to mock requests made by a web3 instance. This is meant to be used
via a ``request_mocker`` fixture defined within the appropriate context.
************************************************************************************
Important: When mocking results, it's important to keep in mind the types that
clients return. For example, what we commonly translate to integers are returned
as hex strings in the RPC response and should be mocked as such for more
accurate testing.
************************************************************************************
Example:
-------
def test_my_w3(w3, request_mocker):
assert w3.eth.block_number == 0
with request_mocker(w3, mock_results={"eth_blockNumber": "0x1"}):
assert w3.eth.block_number == 1
assert w3.eth.block_number == 0
Example with async and a mocked response object:
-----------------------------------------------
async def test_my_w3(async_w3, request_mocker):
def _iter_responses():
while True:
yield {"error": {"message": "transaction indexing in progress"}}
yield {"error": {"message": "transaction indexing in progress"}}
yield {"result": {"status": "0x1"}}
iter_responses = _iter_responses()
async with request_mocker(
async_w3,
mock_responses={
"eth_getTransactionReceipt": lambda *_: next(iter_responses)
},
):
# assert that the first two error responses are handled and the result
# is eventually returned when present
assert await w3.eth.get_transaction_receipt("0x1") == "0x1"
- ``mock_results`` is a dict mapping method names to the desired "result" object of
the RPC response.
- ``mock_errors`` is a dict mapping method names to the desired
"error" object of the RPC response.
-``mock_responses`` is a dict mapping method names to the entire RPC response
object. This can be useful if you wish to return an iterator which returns
different responses on each call to the method.
If a method name is not present in any of the dicts above, the request is made as
usual.
"""
def __init__(
self,
w3: Union["AsyncWeb3[Any]", "Web3"],
mock_results: dict[Union["RPCEndpoint", str], Any] = None,
mock_errors: dict[Union["RPCEndpoint", str], Any] = None,
mock_responses: dict[Union["RPCEndpoint", str], Any] = None,
):
self.w3 = w3
self.mock_results = mock_results or {}
self.mock_errors = mock_errors or {}
self.mock_responses = mock_responses or {}
if isinstance(w3.provider, PersistentConnectionProvider):
self._send_request = w3.provider.send_request
self._recv_for_request = w3.provider.recv_for_request
else:
self._make_request: Union[
"AsyncMakeRequestFn", "MakeRequestFn"
] = w3.provider.make_request
def _build_request_id(self) -> int:
request_id = (
next(copy.deepcopy(self.w3.provider.request_counter))
if hasattr(self.w3.provider, "request_counter")
else 1
)
return request_id
def __enter__(self) -> "Self":
# mypy error: Cannot assign to a method
self.w3.provider.make_request = self._mock_request_handler # type: ignore[method-assign] # noqa: E501
# reset request func cache to re-build request_func with mocked make_request
self.w3.provider._request_func_cache = (None, None)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.w3.provider.make_request = ( # type: ignore[method-assign]
self._make_request
)
# reset request func cache to re-build request_func with original make_request
self.w3.provider._request_func_cache = (None, None)
def _mock_request_handler(
self, method: "RPCEndpoint", params: Any
) -> "RPCResponse":
self.w3 = cast("Web3", self.w3)
self._make_request = cast("MakeRequestFn", self._make_request)
if all(
method not in mock_dict
for mock_dict in (self.mock_errors, self.mock_results, self.mock_responses)
):
return self._make_request(method, params)
request_id = self._build_request_id()
response_dict = {"jsonrpc": "2.0", "id": request_id}
if method in self.mock_responses:
mock_return = self.mock_responses[method]
if callable(mock_return):
mock_return = mock_return(method, params)
if "result" in mock_return:
mock_return = {"result": mock_return["result"]}
elif "error" in mock_return:
mock_return = self._create_error_object(mock_return["error"])
mocked_response = merge(response_dict, mock_return)
elif method in self.mock_results:
mock_return = self.mock_results[method]
if callable(mock_return):
mock_return = mock_return(method, params)
mocked_response = merge(response_dict, {"result": mock_return})
elif method in self.mock_errors:
error = self.mock_errors[method]
if callable(error):
error = error(method, params)
mocked_response = merge(response_dict, self._create_error_object(error))
else:
raise Exception("Invariant: unreachable code path")
decorator = getattr(self._make_request, "_decorator", None)
if decorator is not None:
# If the original make_request was decorated, we need to re-apply
# the decorator to the mocked make_request. This is necessary for
# the request caching decorator to work properly.
return decorator(lambda *_: mocked_response)(
self.w3.provider, method, params
)
else:
return mocked_response
# -- async -- #
async def __aenter__(self) -> "Self":
if not isinstance(self.w3.provider, PersistentConnectionProvider):
# mypy error: Cannot assign to a method
self.w3.provider.make_request = self._async_mock_request_handler # type: ignore[method-assign] # noqa: E501
# reset request func cache to re-build request_func w/ mocked make_request
self.w3.provider._request_func_cache = (None, None)
else:
self.w3.provider.send_request = self._async_mock_send_handler # type: ignore[method-assign] # noqa: E501
self.w3.provider.recv_for_request = self._async_mock_recv_handler # type: ignore[method-assign] # noqa: E501
self.w3.provider._send_func_cache = (None, None)
self.w3.provider._recv_func_cache = (None, None)
return self
async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
if not isinstance(self.w3.provider, PersistentConnectionProvider):
# mypy error: Cannot assign to a method
self.w3.provider.make_request = self._make_request # type: ignore[method-assign] # noqa: E501
# reset request func cache to re-build request_func w/ original make_request
self.w3.provider._request_func_cache = (None, None)
else:
self.w3.provider.send_request = self._send_request # type: ignore[method-assign] # noqa: E501
self.w3.provider.recv_for_request = self._recv_for_request # type: ignore[method-assign] # noqa: E501
self.w3.provider._send_func_cache = (None, None)
self.w3.provider._recv_func_cache = (None, None)
async def _async_build_mock_result(
self, method: "RPCEndpoint", params: Any, request_id: int = None
) -> "RPCResponse":
request_id = request_id if request_id else self._build_request_id()
response_dict = {"jsonrpc": "2.0", "id": request_id}
if method in self.mock_responses:
mock_return = self.mock_responses[method]
if callable(mock_return):
mock_return = mock_return(method, params)
elif iscoroutinefunction(mock_return):
# this is the "correct" way to mock the async make_request
mock_return = await mock_return(method, params)
if "result" in mock_return:
mock_return = {"result": mock_return["result"]}
elif "error" in mock_return:
mock_return = self._create_error_object(mock_return["error"])
mocked_result = merge(response_dict, mock_return)
elif method in self.mock_results:
mock_return = self.mock_results[method]
if callable(mock_return):
# handle callable to make things easier since we're mocking
mock_return = mock_return(method, params)
elif iscoroutinefunction(mock_return):
# this is the "correct" way to mock the async make_request
mock_return = await mock_return(method, params)
mocked_result = merge(response_dict, {"result": mock_return})
elif method in self.mock_errors:
error = self.mock_errors[method]
if callable(error):
error = error(method, params)
elif iscoroutinefunction(error):
error = await error(method, params)
mocked_result = merge(response_dict, self._create_error_object(error))
else:
raise Exception("Invariant: unreachable code path")
return mocked_result
async def _async_mock_request_handler(
self, method: "RPCEndpoint", params: Any
) -> "RPCResponse":
self.w3 = cast("AsyncWeb3[Any]", self.w3)
self._make_request = cast("AsyncMakeRequestFn", self._make_request)
if all(
method not in mock_dict
for mock_dict in (self.mock_errors, self.mock_results, self.mock_responses)
):
return await self._make_request(method, params)
mocked_result = await self._async_build_mock_result(method, params)
decorator = getattr(self._make_request, "_decorator", None)
if decorator is not None:
# If the original make_request was decorated, we need to re-apply
# the decorator to the mocked make_request. This is necessary for
# the request caching decorator to work properly.
async def _coro(
_provider: Any, _method: "RPCEndpoint", _params: Any
) -> "RPCResponse":
return mocked_result
return await decorator(_coro)(self.w3.provider, method, params)
else:
return mocked_result
async def _async_mock_send_handler(
self, method: "RPCEndpoint", params: Any
) -> "RPCRequest":
if all(
method not in mock_dict
for mock_dict in (self.mock_errors, self.mock_results, self.mock_responses)
):
return await self._send_request(method, params)
else:
request_id = self._build_request_id()
return {"id": request_id, "method": method, "params": params}
async def _async_mock_recv_handler(
self, rpc_request: "RPCRequest"
) -> "RPCResponse":
self.w3 = cast("AsyncWeb3[Any]", self.w3)
method = rpc_request["method"]
request_id = rpc_request["id"]
if all(
method not in mock_dict
for mock_dict in (self.mock_errors, self.mock_results, self.mock_responses)
):
return await self._recv_for_request(request_id)
mocked_result = await self._async_build_mock_result(
method, rpc_request["params"], request_id=int(request_id)
)
decorator = getattr(self._recv_for_request, "_decorator", None)
if decorator is not None:
# If the original recv_for_request was decorated, we need to re-apply
# the decorator to the mocked recv_for_request. This is necessary for
# the request caching decorator to work properly.
async def _coro(
_provider: Any, _rpc_request: "RPCRequest"
) -> "RPCResponse":
return mocked_result
return await decorator(_coro)(self.w3.provider, rpc_request)
else:
return mocked_result
@staticmethod
def _create_error_object(error: dict[str, Any]) -> dict[str, Any]:
code = error.get("code", -32000)
message = error.get("message", "Mocked error")
return {"error": merge({"code": code, "message": message}, error)}
| RequestMocker |
python | huggingface__transformers | src/transformers/models/upernet/modeling_upernet.py | {
"start": 10004,
"end": 14238
} | class ____(UperNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
# Semantic segmentation head(s)
self.decode_head = UperNetHead(config, in_channels=self.backbone.channels)
self.auxiliary_head = (
UperNetFCNHead(config, in_channels=self.backbone.channels) if config.use_auxiliary_head else None
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
>>> from PIL import Image
>>> from huggingface_hub import hf_hub_download
>>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> filepath = hf_hub_download(
... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset"
... )
>>> image = Image.open(filepath).convert("RGB")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height, width)
>>> list(logits.shape)
[1, 150, 512, 512]
```"""
if labels is not None and self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
features = outputs.feature_maps
logits = self.decode_head(features)
logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False
)
loss = None
if labels is not None:
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
loss = loss_fct(logits, labels)
if auxiliary_logits is not None:
auxiliary_loss = loss_fct(auxiliary_logits, labels)
loss += self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["UperNetForSemanticSegmentation", "UperNetPreTrainedModel"]
| UperNetForSemanticSegmentation |
python | pypa__virtualenv | src/virtualenv/config/ini.py | {
"start": 264,
"end": 2706
} | class ____:
VIRTUALENV_CONFIG_FILE_ENV_VAR: ClassVar[str] = "VIRTUALENV_CONFIG_FILE"
STATE: ClassVar[dict[bool | None, str]] = {None: "failed to parse", True: "active", False: "missing"}
section = "virtualenv"
def __init__(self, env=None) -> None:
env = os.environ if env is None else env
config_file = env.get(self.VIRTUALENV_CONFIG_FILE_ENV_VAR, None)
self.is_env_var = config_file is not None
if config_file is None:
config_file = Path(user_config_dir(appname="virtualenv", appauthor="pypa")) / "virtualenv.ini"
else:
config_file = Path(config_file)
self.config_file = config_file
self._cache = {}
exception = None
self.has_config_file = None
try:
self.has_config_file = self.config_file.exists()
except OSError as exc:
exception = exc
else:
if self.has_config_file:
self.config_file = self.config_file.resolve()
self.config_parser = ConfigParser()
try:
self._load()
self.has_virtualenv_section = self.config_parser.has_section(self.section)
except Exception as exc: # noqa: BLE001
exception = exc
if exception is not None:
LOGGER.error("failed to read config file %s because %r", config_file, exception)
def _load(self):
with self.config_file.open("rt", encoding="utf-8") as file_handler:
return self.config_parser.read_file(file_handler)
def get(self, key, as_type):
cache_key = key, as_type
if cache_key in self._cache:
return self._cache[cache_key]
try:
source = "file"
raw_value = self.config_parser.get(self.section, key.lower())
value = convert(raw_value, as_type, source)
result = value, source
except Exception: # noqa: BLE001
result = None
self._cache[cache_key] = result
return result
def __bool__(self) -> bool:
return bool(self.has_config_file) and bool(self.has_virtualenv_section)
@property
def epilog(self):
return (
f"\nconfig file {self.config_file} {self.STATE[self.has_config_file]} "
f"(change{'d' if self.is_env_var else ''} via env var {self.VIRTUALENV_CONFIG_FILE_ENV_VAR})"
)
| IniConfig |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/base.py | {
"start": 982,
"end": 5087
} | class ____:
"""Base class for service that interacts with a VCS provider and a project."""
vcs_provider_slug: str
allauth_provider = type[OAuth2Provider]
url_pattern: re.Pattern | None = None
default_user_avatar_url = settings.OAUTH_AVATAR_USER_DEFAULT_URL
default_org_avatar_url = settings.OAUTH_AVATAR_ORG_DEFAULT_URL
supports_build_status = False
supports_clone_token = False
supports_commenting = False
@classmethod
def for_project(cls, project):
"""Return an iterator of services that can be used for the project."""
raise NotImplementedError
@classmethod
def for_user(cls, user):
"""Return an iterator of services that belong to the user."""
raise NotImplementedError
@classmethod
def sync_user_access(cls, user):
"""Sync the user's access to the provider's repositories and organizations."""
raise NotImplementedError
def sync(self):
"""
Sync remote repositories and organizations.
- Creates a new RemoteRepository/Organization per new repository
- Updates fields for existing RemoteRepository/Organization
- Deletes old RemoteRepository/Organization that are no longer present
in this provider.
"""
raise NotImplementedError
def update_repository(self, remote_repository: RemoteRepository):
"""
Update a repository using the service API.
This also updates the user relationship with the repository,
if user is an admin or not, and in case the user no longer has access
to the repository, the relationship is removed.
In the case of services that aren't linked to a user (GitHub Apps),
this method will update the permissions of all users that have access
to the repository.
"""
raise NotImplementedError
def setup_webhook(self, project, integration=None) -> bool:
"""
Setup webhook for project.
:param project: project to set up webhook for
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: boolean based on webhook set up success
"""
raise NotImplementedError
def update_webhook(self, project, integration) -> bool:
"""
Update webhook integration.
:param project: project to set up webhook for
:type project: Project
:param integration: Webhook integration to update
:type integration: Integration
:returns: boolean based on webhook update success, and requests Response object
"""
raise NotImplementedError
def send_build_status(self, *, build, commit, status):
"""
Create commit status for project.
:param build: Build to set up commit status for
:type build: Build
:param commit: commit sha of the pull/merge request
:type commit: str
:param status: build state failure, pending, or success.
:type status: str
:returns: boolean based on commit status creation was successful or not.
:rtype: Bool
"""
raise NotImplementedError
def get_clone_token(self, project):
"""Get a token used for cloning the repository."""
raise NotImplementedError
def post_comment(self, build, comment: str, create_new: bool = True):
"""
Post a comment on the pull request attached to the build.
:param create_new: Create a new comment if one doesn't exist.
"""
raise NotImplementedError
@classmethod
def is_project_service(cls, project):
"""
Determine if this is the service the project is using.
.. note::
This should be deprecated in favor of attaching the
:py:class:`RemoteRepository` to the project instance. This is a
slight improvement on the legacy check for webhooks
"""
return cls.url_pattern is not None and cls.url_pattern.search(project.repo) is not None
| Service |
python | openai__openai-python | src/openai/resources/realtime/client_secrets.py | {
"start": 7184,
"end": 7435
} | class ____:
def __init__(self, client_secrets: ClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = to_streamed_response_wrapper(
client_secrets.create,
)
| ClientSecretsWithStreamingResponse |
python | django__django | tests/serializers/models/base.py | {
"start": 3959,
"end": 4041
} | class ____(ProxyBaseModel):
class Meta:
proxy = True
| ProxyProxyBaseModel |
python | realpython__materials | inherit-python-string/lower_string.py | {
"start": 0,
"end": 428
} | class ____(str):
# This initializer doesn't work
def __init__(self, string):
super().__init__(string.lower())
# class LowerString(str):
# def __new__(cls, string):
# instance = super().__new__(cls, string.lower())
# return instance
# from collections import UserString
# class LowerString(UserString):
# def __init__(self, string):
# super().__init__(string.lower())
| LowerString |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/cli_commands/test_role_command.py | {
"start": 1495,
"end": 8983
} | class ____:
@pytest.fixture(autouse=True)
def _set_attrs(self):
with conf_vars(
{
(
"core",
"auth_manager",
): "airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager",
}
):
# Reload the module to use FAB auth manager
reload(cli_parser)
# Clearing the cache before calling it
cli_parser.get_parser.cache_clear()
self.parser = cli_parser.get_parser()
with conf_vars({("fab", "UPDATE_FAB_PERMS"): "False"}):
with get_application_builder() as appbuilder:
self.appbuilder = appbuilder
self.clear_users_and_roles()
yield
self.clear_users_and_roles()
def clear_users_and_roles(self):
session = self.appbuilder.session
for user in self.appbuilder.sm.get_all_users():
session.delete(user)
for role_name in ["FakeTeamA", "FakeTeamB", "FakeTeamC"]:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
session.commit()
def test_cli_create_roles(self):
assert self.appbuilder.sm.find_role("FakeTeamA") is None
assert self.appbuilder.sm.find_role("FakeTeamB") is None
args = self.parser.parse_args(["roles", "create", "FakeTeamA", "FakeTeamB"])
role_command.roles_create(args)
assert self.appbuilder.sm.find_role("FakeTeamA") is not None
assert self.appbuilder.sm.find_role("FakeTeamB") is not None
def test_cli_delete_roles(self):
assert self.appbuilder.sm.find_role("FakeTeamA") is None
assert self.appbuilder.sm.find_role("FakeTeamB") is None
assert self.appbuilder.sm.find_role("FakeTeamC") is None
self.appbuilder.sm.add_role("FakeTeamA")
self.appbuilder.sm.add_role("FakeTeamB")
self.appbuilder.sm.add_role("FakeTeamC")
args = self.parser.parse_args(["roles", "delete", "FakeTeamA", "FakeTeamC"])
role_command.roles_delete(args)
assert self.appbuilder.sm.find_role("FakeTeamA") is None
assert self.appbuilder.sm.find_role("FakeTeamB") is not None
assert self.appbuilder.sm.find_role("FakeTeamC") is None
def test_cli_create_roles_is_reentrant(self):
assert self.appbuilder.sm.find_role("FakeTeamA") is None
assert self.appbuilder.sm.find_role("FakeTeamB") is None
args = self.parser.parse_args(["roles", "create", "FakeTeamA", "FakeTeamB"])
role_command.roles_create(args)
assert self.appbuilder.sm.find_role("FakeTeamA") is not None
assert self.appbuilder.sm.find_role("FakeTeamB") is not None
def test_cli_list_roles(self):
self.appbuilder.sm.add_role("FakeTeamA")
self.appbuilder.sm.add_role("FakeTeamB")
with redirect_stdout(StringIO()) as stdout_io:
role_command.roles_list(self.parser.parse_args(["roles", "list"]))
stdout = stdout_io.getvalue()
assert "FakeTeamA" in stdout
assert "FakeTeamB" in stdout
def test_cli_list_roles_with_args(self):
role_command.roles_list(self.parser.parse_args(["roles", "list", "--output", "yaml"]))
role_command.roles_list(self.parser.parse_args(["roles", "list", "-p", "--output", "yaml"]))
def test_cli_roles_add_and_del_perms(self):
assert self.appbuilder.sm.find_role("FakeTeamC") is None
role_command.roles_create(self.parser.parse_args(["roles", "create", "FakeTeamC"]))
assert self.appbuilder.sm.find_role("FakeTeamC") is not None
role: Role = self.appbuilder.sm.find_role("FakeTeamC")
assert len(role.permissions) == 0
role_command.roles_add_perms(
self.parser.parse_args(
[
"roles",
"add-perms",
"FakeTeamC",
"-r",
permissions.RESOURCE_POOL,
"-a",
permissions.ACTION_CAN_EDIT,
]
)
)
role: Role = self.appbuilder.sm.find_role("FakeTeamC")
assert len(role.permissions) == 1
assert role.permissions[0].resource.name == permissions.RESOURCE_POOL
assert role.permissions[0].action.name == permissions.ACTION_CAN_EDIT
role_command.roles_del_perms(
self.parser.parse_args(
[
"roles",
"del-perms",
"FakeTeamC",
"-r",
permissions.RESOURCE_POOL,
"-a",
permissions.ACTION_CAN_EDIT,
]
)
)
role: Role = self.appbuilder.sm.find_role("FakeTeamC")
assert len(role.permissions) == 0
def test_cli_export_roles(self, tmp_path):
fn = tmp_path / "export_roles.json"
fn.touch()
args = self.parser.parse_args(["roles", "create", "FakeTeamA", "FakeTeamB"])
role_command.roles_create(args)
role_command.roles_add_perms(
self.parser.parse_args(
[
"roles",
"add-perms",
"FakeTeamA",
"-r",
permissions.RESOURCE_POOL,
"-a",
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_READ,
]
)
)
role_command.roles_export(self.parser.parse_args(["roles", "export", str(fn)]))
with open(fn) as outfile:
roles_exported = json.load(outfile)
assert {"name": "FakeTeamA", "resource": "Pools", "action": "can_edit,can_read"} in roles_exported
assert {"name": "FakeTeamB", "resource": "", "action": ""} in roles_exported
def test_cli_import_roles(self, tmp_path):
fn = tmp_path / "import_roles.json"
fn.touch()
roles_list = [
{"name": "FakeTeamA", "resource": "Pools", "action": "can_edit,can_read"},
{"name": "FakeTeamA", "resource": "Admin", "action": "menu_access"},
{"name": "FakeTeamB", "resource": "", "action": ""},
]
with open(fn, "w") as outfile:
json.dump(roles_list, outfile)
role_command.roles_import(self.parser.parse_args(["roles", "import", str(fn)]))
fakeTeamA: Role = self.appbuilder.sm.find_role("FakeTeamA")
fakeTeamB: Role = self.appbuilder.sm.find_role("FakeTeamB")
assert fakeTeamA is not None
assert fakeTeamB is not None
assert len(fakeTeamB.permissions) == 0
assert len(fakeTeamA.permissions) == 3
assert any(
permission.resource.name == permissions.RESOURCE_POOL
and permission.action.name == permissions.ACTION_CAN_EDIT
for permission in fakeTeamA.permissions
)
assert any(
permission.resource.name == permissions.RESOURCE_POOL
and permission.action.name == permissions.ACTION_CAN_READ
for permission in fakeTeamA.permissions
)
assert any(
permission.resource.name == permissions.RESOURCE_ADMIN_MENU
and permission.action.name == permissions.ACTION_CAN_ACCESS_MENU
for permission in fakeTeamA.permissions
)
| TestCliRoles |
python | Pylons__pyramid | tests/test_registry.py | {
"start": 10675,
"end": 13009
} | class ____(unittest.TestCase):
def _getTargetClass(slf):
from pyramid.registry import Introspectable
return Introspectable
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def _makeOnePopulated(self):
return self._makeOne('category', 'discrim', 'title', 'type')
def test_conformance(self):
from zope.interface.verify import verifyClass, verifyObject
from pyramid.interfaces import IIntrospectable
verifyClass(IIntrospectable, self._getTargetClass())
verifyObject(IIntrospectable, self._makeOnePopulated())
def test_relate(self):
inst = self._makeOnePopulated()
inst.relate('a', 'b')
self.assertEqual(inst._relations, [(True, 'a', 'b')])
def test_unrelate(self):
inst = self._makeOnePopulated()
inst.unrelate('a', 'b')
self.assertEqual(inst._relations, [(False, 'a', 'b')])
def test_discriminator_hash(self):
inst = self._makeOnePopulated()
self.assertEqual(inst.discriminator_hash, hash(inst.discriminator))
def test___hash__(self):
inst = self._makeOnePopulated()
self.assertEqual(
hash(inst), hash((inst.category_name,) + (inst.discriminator,))
)
def test___repr__(self):
inst = self._makeOnePopulated()
self.assertEqual(
repr(inst),
"<Introspectable category 'category', discriminator 'discrim'>",
)
def test___bool__(self):
inst = self._makeOnePopulated()
self.assertEqual(inst.__bool__(), True)
def test_register(self):
introspector = DummyIntrospector()
action_info = object()
inst = self._makeOnePopulated()
inst._relations.append((True, 'category1', 'discrim1'))
inst._relations.append((False, 'category2', 'discrim2'))
inst.register(introspector, action_info)
self.assertEqual(inst.action_info, action_info)
self.assertEqual(introspector.intrs, [inst])
self.assertEqual(
introspector.relations,
[(('category', 'discrim'), ('category1', 'discrim1'))],
)
self.assertEqual(
introspector.unrelations,
[(('category', 'discrim'), ('category2', 'discrim2'))],
)
| TestIntrospectable |
python | dask__distributed | distributed/deploy/tests/test_local.py | {
"start": 26629,
"end": 36999
} | class ____(Worker):
pass
def test_worker_class_worker(loop):
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyWorker,
processes=False,
dashboard_address=":0",
) as cluster:
assert all(isinstance(w, MyWorker) for w in cluster.workers.values())
def test_worker_class_nanny(loop):
class MyNanny(Nanny):
pass
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyNanny,
dashboard_address=":0",
) as cluster:
assert all(isinstance(w, MyNanny) for w in cluster.workers.values())
@gen_test()
async def test_worker_class_nanny_async():
class MyNanny(Nanny):
pass
async with LocalCluster(
n_workers=2,
worker_class=MyNanny,
dashboard_address=":0",
asynchronous=True,
) as cluster:
assert all(isinstance(w, MyNanny) for w in cluster.workers.values())
def test_starts_up_sync(loop):
cluster = LocalCluster(
n_workers=2,
loop=loop,
processes=False,
dashboard_address=":0",
)
try:
assert len(cluster.scheduler.workers) == 2
finally:
cluster.close()
def test_dont_select_closed_worker(loop):
# Make sure distributed does not try to reuse a client from a
# closed cluster (https://github.com/dask/distributed/issues/2840).
cluster = LocalCluster(n_workers=0, dashboard_address=":0", loop=loop)
c = Client(cluster)
cluster.scale(2)
assert c == get_client()
c.close()
cluster.close()
cluster2 = LocalCluster(n_workers=0, dashboard_address=":0", loop=loop)
c2 = Client(cluster2)
cluster2.scale(2)
current_client = get_client()
assert c2 == current_client
cluster2.close()
c2.close()
def test_client_cluster_synchronous(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
assert not c.asynchronous
assert not c.cluster.asynchronous
@gen_test()
async def test_scale_memory_cores():
async with LocalCluster(
n_workers=0,
processes=False,
threads_per_worker=2,
memory_limit="2GB",
asynchronous=True,
dashboard_address=":0",
) as cluster:
cluster.scale(cores=4)
assert len(cluster.worker_spec) == 2
cluster.scale(memory="6GB")
assert len(cluster.worker_spec) == 3
cluster.scale(cores=1)
assert len(cluster.worker_spec) == 1
cluster.scale(memory="7GB")
assert len(cluster.worker_spec) == 4
@pytest.mark.parametrize("memory_limit", ["2 GiB", None])
@gen_test()
async def test_repr(memory_limit):
async with LocalCluster(
n_workers=2,
processes=False,
threads_per_worker=2,
memory_limit=memory_limit,
asynchronous=True,
dashboard_address=":0",
) as cluster:
# __repr__ uses cluster.scheduler_info, which slightly lags behind
# cluster.scheduler.workers and client.wait_for_workers.
while len(cluster.scheduler_info["workers"]) < 2:
await asyncio.sleep(0.01)
text = repr(cluster)
assert cluster.scheduler_address in text
assert "workers=2, threads=4" in text
if memory_limit:
assert "memory=4.00 GiB" in text
else:
assert "memory" not in text
@gen_test()
async def test_threads_per_worker_set_to_0():
with pytest.warns(
Warning, match="Setting `threads_per_worker` to 0 has been deprecated."
):
async with LocalCluster(
n_workers=2,
processes=False,
threads_per_worker=0,
asynchronous=True,
dashboard_address=":0",
) as cluster:
assert len(cluster.workers) == 2
assert all(w.state.nthreads < CPU_COUNT for w in cluster.workers.values())
@pytest.mark.parametrize("temporary", [True, False])
@gen_test()
async def test_capture_security(temporary):
if temporary:
xfail_ssl_issue5601()
pytest.importorskip("cryptography")
security = True
else:
security = tls_only_security()
async with LocalCluster(
n_workers=0,
silence_logs=False,
security=security,
asynchronous=True,
dashboard_address=":0",
host="tls://0.0.0.0",
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert client.security == cluster.security
@gen_test()
async def test_no_dangling_asyncio_tasks():
start = asyncio.all_tasks()
async with LocalCluster(asynchronous=True, processes=False, dashboard_address=":0"):
await asyncio.sleep(0.01)
tasks = asyncio.all_tasks()
assert tasks == start
@gen_test()
async def test_async_with():
async with LocalCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
w = cluster.workers
assert w
assert not w
@gen_test()
async def test_no_workers():
async with Client(
n_workers=0, silence_logs=False, dashboard_address=":0", asynchronous=True
) as c:
pass
@gen_test()
async def test_cluster_names():
async with LocalCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as unnamed_cluster:
async with LocalCluster(
processes=False, asynchronous=True, name="mycluster", dashboard_address=":0"
) as named_cluster:
assert isinstance(unnamed_cluster.name, str)
assert isinstance(named_cluster.name, str)
assert named_cluster.name == "mycluster"
assert unnamed_cluster == unnamed_cluster
assert named_cluster == named_cluster
assert unnamed_cluster != named_cluster
async with LocalCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as unnamed_cluster2:
assert unnamed_cluster2 != unnamed_cluster
@pytest.mark.parametrize("nanny", [True, False])
@gen_test()
async def test_local_cluster_redundant_kwarg(nanny):
cluster = LocalCluster(
typo_kwarg="foo",
processes=nanny,
n_workers=1,
dashboard_address=":0",
asynchronous=True,
)
if nanny:
ctx = raises_with_cause(
RuntimeError, None, TypeError, "unexpected keyword argument"
)
else:
ctx = pytest.raises(TypeError, match="unexpected keyword argument")
with ctx:
# Extra arguments are forwarded to the worker class. Depending on
# whether we use the nanny or not, the error treatment is quite
# different and we should assert that an exception is raised
async with cluster:
pass
@gen_test()
async def test_cluster_info_sync():
async with LocalCluster(
processes=False,
asynchronous=True,
scheduler_sync_interval="1ms",
dashboard_address=":0",
) as cluster:
assert cluster._cluster_info["name"] == cluster.name
while "name" not in cluster.scheduler.get_metadata(
keys=["cluster-manager-info"]
):
await asyncio.sleep(0.01)
info = await cluster.scheduler_comm.get_metadata(keys=["cluster-manager-info"])
assert info["name"] == cluster.name
info = cluster.scheduler.get_metadata(keys=["cluster-manager-info"])
assert info["name"] == cluster.name
cluster._cluster_info["foo"] = "bar"
while "foo" not in cluster.scheduler.get_metadata(
keys=["cluster-manager-info"]
):
await asyncio.sleep(0.01)
info = cluster.scheduler.get_metadata(keys=["cluster-manager-info"])
assert info["foo"] == "bar"
@gen_test()
async def test_cluster_info_sync_is_robust_to_network_blips(monkeypatch):
async with LocalCluster(
processes=False,
asynchronous=True,
scheduler_sync_interval="1ms",
dashboard_address=":0",
) as cluster:
assert cluster._cluster_info["name"] == cluster.name
error_called = False
async def error(*args, **kwargs):
nonlocal error_called
await asyncio.sleep(0.001)
error_called = True
raise OSError
# Temporarily patch the `set_metadata` RPC to error
with monkeypatch.context() as patch:
patch.setattr(cluster.scheduler_comm, "set_metadata", error)
# Set a new cluster_info value
cluster._cluster_info["foo"] = "bar"
# Wait for the bad method to be called at least once
while not error_called:
await asyncio.sleep(0.01)
# Check that cluster_info is resynced after the error condition is fixed
while "foo" not in cluster.scheduler.get_metadata(
keys=["cluster-manager-info"]
):
await asyncio.sleep(0.01)
info = cluster.scheduler.get_metadata(keys=["cluster-manager-info"])
assert info["foo"] == "bar"
@pytest.mark.parametrize("host", [None, "127.0.0.1"])
@pytest.mark.parametrize("use_nanny", [True, False])
@gen_test()
async def test_cluster_host_used_throughout_cluster(host, use_nanny):
"""Ensure that the `host` kwarg is propagated through scheduler, nanny, and workers"""
async with LocalCluster(
host=host, asynchronous=True, dashboard_address=":0"
) as cluster:
url = urlparse(cluster.scheduler_address)
assert url.hostname == "127.0.0.1"
for worker in cluster.workers.values():
url = urlparse(worker.address)
assert url.hostname == "127.0.0.1"
if use_nanny:
url = urlparse(worker.process.worker_address)
assert url.hostname == "127.0.0.1"
@gen_test()
async def test_connect_to_closed_cluster():
async with LocalCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as c1:
assert await c1.submit(inc, 1) == 2
with pytest.raises(
RuntimeError,
match="Trying to connect to an already closed or closing Cluster",
):
# Raises during init without actually connecting since we're not
# awaiting anything
Client(cluster, asynchronous=True)
| MyWorker |
python | wandb__wandb | wandb/sdk/lib/service/service_connection.py | {
"start": 700,
"end": 780
} | class ____(Exception):
"""Failed to attach to a run."""
| WandbAttachFailedError |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 20755,
"end": 22402
} | class ____:
def setup_method(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set context
on the callable if possible
"""
class TestCallableDefault:
requires_context = True
def __call__(self, field=None):
return "success" if field is not None else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
| TestCreateOnlyDefault |
python | ansible__ansible | test/units/module_utils/basic/test_selinux.py | {
"start": 1032,
"end": 10097
} | class ____:
def test_selinux_enabled(self):
# test selinux unavailable
# selinux unavailable, should return false
with patch.object(basic, 'HAVE_SELINUX', False):
assert no_args_module().selinux_enabled() is False
# test selinux present/not-enabled
disabled_mod = no_args_module()
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.is_selinux_enabled.return_value = 0
assert disabled_mod.selinux_enabled() is False
# ensure value is cached (same answer after unpatching)
assert disabled_mod.selinux_enabled() is False
# and present / enabled
with patch.object(basic, 'HAVE_SELINUX', True):
enabled_mod = no_args_module()
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.is_selinux_enabled.return_value = 1
assert enabled_mod.selinux_enabled() is True
# ensure value is cached (same answer after unpatching)
assert enabled_mod.selinux_enabled() is True
def test_selinux_mls_enabled(self):
# selinux unavailable, should return false
with patch.object(basic, 'HAVE_SELINUX', False):
assert no_args_module().selinux_mls_enabled() is False
# selinux disabled, should return false
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.is_selinux_mls_enabled.return_value = 0
assert no_args_module(selinux_enabled=False).selinux_mls_enabled() is False
with patch.object(basic, 'HAVE_SELINUX', True):
# selinux enabled, should pass through the value of is_selinux_mls_enabled
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.is_selinux_mls_enabled.return_value = 1
assert no_args_module(selinux_enabled=True).selinux_mls_enabled() is True
def test_selinux_initial_context(self):
# selinux missing/disabled/enabled sans MLS is 3-element None
assert no_args_module(selinux_enabled=False, selinux_mls_enabled=False).selinux_initial_context() == [None, None, None]
assert no_args_module(selinux_enabled=True, selinux_mls_enabled=False).selinux_initial_context() == [None, None, None]
# selinux enabled with MLS is 4-element None
assert no_args_module(selinux_enabled=True, selinux_mls_enabled=True).selinux_initial_context() == [None, None, None, None]
def test_selinux_default_context(self):
# selinux unavailable
with patch.object(basic, 'HAVE_SELINUX', False):
assert no_args_module().selinux_default_context(path='/foo/bar') == [None, None, None]
am = no_args_module(selinux_enabled=True, selinux_mls_enabled=True)
with patch.object(basic, 'selinux', create=True) as selinux:
# matchpathcon success
selinux.matchpathcon.return_value = [0, 'unconfined_u:object_r:default_t:s0']
assert am.selinux_default_context(path='/foo/bar') == ['unconfined_u', 'object_r', 'default_t', 's0']
with patch.object(basic, 'selinux', create=True) as selinux:
# matchpathcon fail (return initial context value)
selinux.matchpathcon.return_value = [-1, '']
assert am.selinux_default_context(path='/foo/bar') == [None, None, None, None]
with patch.object(basic, 'selinux', create=True) as selinux:
# matchpathcon OSError
selinux.matchpathcon.side_effect = OSError
assert am.selinux_default_context(path='/foo/bar') == [None, None, None, None]
def test_selinux_context(self):
# selinux unavailable
with patch.object(basic, 'HAVE_SELINUX', False):
assert no_args_module().selinux_context(path='/foo/bar') == [None, None, None]
am = no_args_module(selinux_enabled=True, selinux_mls_enabled=True)
# lgetfilecon_raw passthru
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lgetfilecon_raw.return_value = [0, 'unconfined_u:object_r:default_t:s0']
assert am.selinux_context(path='/foo/bar') == ['unconfined_u', 'object_r', 'default_t', 's0']
# lgetfilecon_raw returned a failure
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lgetfilecon_raw.return_value = [-1, '']
assert am.selinux_context(path='/foo/bar') == [None, None, None, None]
# lgetfilecon_raw OSError (should bomb the module)
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lgetfilecon_raw.side_effect = OSError(errno.ENOENT, 'NotFound')
with pytest.raises(SystemExit):
am.selinux_context(path='/foo/bar')
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lgetfilecon_raw.side_effect = OSError()
with pytest.raises(SystemExit):
am.selinux_context(path='/foo/bar')
def test_is_special_selinux_path(self):
args = dict(
_ansible_selinux_special_fs="nfs,nfsd,foos",
_ansible_remote_tmp="/tmp",
_ansible_keep_remote_files=False,
)
with patch_module_args(args):
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_find_mount_point(path):
if path.startswith('/some/path'):
return '/some/path'
elif path.startswith('/weird/random/fstype'):
return '/weird/random/fstype'
return '/'
am.find_mount_point = _mock_find_mount_point
am.selinux_context = lambda path: ['foo_u', 'foo_r', 'foo_t', 's0']
m = mock_open()
m.side_effect = OSError
with patch.object(builtins, 'open', m, create=True):
assert am.is_special_selinux_path('/some/path/that/should/be/nfs') == (False, None)
mount_data = [
'/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n',
'10.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n',
'whatever /weird/random/fstype foos rw 0 0\n',
]
# mock_open has a broken readlines() implementation apparently...
# this should work by default but doesn't, so we fix it
m = mock_open(read_data=''.join(mount_data))
m.return_value.readlines.return_value = mount_data
with patch.object(builtins, 'open', m, create=True):
assert am.is_special_selinux_path('/some/random/path') == (False, None)
assert am.is_special_selinux_path('/some/path/that/should/be/nfs') == (True, ['foo_u', 'foo_r', 'foo_t', 's0'])
assert am.is_special_selinux_path('/weird/random/fstype/path') == (True, ['foo_u', 'foo_r', 'foo_t', 's0'])
def test_set_context_if_different(self):
am = no_args_module(selinux_enabled=False)
assert am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) is True
assert am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False) is False
am = no_args_module(selinux_enabled=True, selinux_mls_enabled=True)
am.selinux_context = lambda path: ['bar_u', 'bar_r', None, None]
am.is_special_selinux_path = lambda path: (False, None)
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lsetfilecon.return_value = 0
assert am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False) is True
selinux.lsetfilecon.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0')
selinux.lsetfilecon.reset_mock()
am.check_mode = True
assert am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False) is True
assert not selinux.lsetfilecon.called
am.check_mode = False
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lsetfilecon.return_value = 1
with pytest.raises(SystemExit):
am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lsetfilecon.side_effect = OSError
with pytest.raises(SystemExit):
am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
am.is_special_selinux_path = lambda path: (True, ['sp_u', 'sp_r', 'sp_t', 's0'])
with patch.object(basic, 'selinux', create=True) as selinux:
selinux.lsetfilecon.return_value = 0
assert am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False) is True
selinux.lsetfilecon.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0')
| TestSELinuxMU |
python | langchain-ai__langchain | libs/partners/prompty/langchain_prompty/core.py | {
"start": 9073,
"end": 10371
} | class ____:
"""Class for reading frontmatter from a string or file."""
_yaml_delim = r"(?:---|\+\+\+)"
_yaml = r"(.*?)"
_content = r"\s*(.+)$"
_re_pattern = r"^\s*" + _yaml_delim + _yaml + _yaml_delim + _content
_regex = re.compile(_re_pattern, re.S | re.M)
@classmethod
def read_file(cls, path: str) -> dict[str, Any]:
"""Reads file at path and returns dict with separated frontmatter.
See read() for more info on dict return value.
"""
with open(path, encoding="utf-8") as file:
file_contents = file.read()
return cls.read(file_contents)
@classmethod
def read(cls, string: str) -> dict[str, Any]:
"""Returns dict with separated frontmatter from string.
Returned dict keys:
- attributes: extracted YAML attributes in dict form.
- body: string contents below the YAML separators
- frontmatter: string representation of YAML
"""
fmatter = ""
body = ""
result = cls._regex.search(string)
if result:
fmatter = result.group(1)
body = result.group(2)
return {
"attributes": yaml.safe_load(fmatter),
"body": body,
"frontmatter": fmatter,
}
| Frontmatter |
python | kamyu104__LeetCode-Solutions | Python/zero-array-transformation-iv.py | {
"start": 1128,
"end": 1663
} | class ____(object):
def minZeroArray(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: int
"""
dp = [{0} for _ in xrange(len(nums))]
for i, (l, r, v) in enumerate(queries):
if all(nums[i] in dp[i] for i in xrange(len(dp))):
return i
for j in xrange(l, r+1):
dp[j] |= set(x+v for x in dp[j])
return len(queries) if all(nums[i] in dp[i] for i in xrange(len(dp))) else -1
| Solution2 |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_shape_base.py | {
"start": 1323,
"end": 2543
} | class ____(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
"""Test to make sure equivalent Travis O's r1array function"""
assert atleast_1d(3).shape == (1,)
assert atleast_1d(3j).shape == (1,)
assert atleast_1d(3.0).shape == (1,)
assert atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)
| TestAtleast1d |
python | bokeh__bokeh | src/bokeh/models/annotations/geometry.py | {
"start": 2407,
"end": 3348
} | class ____(Model):
""" Allows to style line, fill and hatch visuals. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
line_props = Include(ScalarLineProps, help="""
The {prop} values for the box.
""")
fill_props = Include(ScalarFillProps, help="""
The {prop} values for the box.
""")
hatch_props = Include(ScalarHatchProps, help="""
The {prop} values for the box.
""")
hover_line_props = Include(ScalarLineProps, prefix="hover", help="""
The {prop} values for the box when hovering over.
""")
hover_fill_props = Include(ScalarFillProps, prefix="hover", help="""
The {prop} values for the box when hovering over.
""")
hover_hatch_props = Include(ScalarHatchProps, prefix="hover", help="""
The {prop} values for the box when hovering over.
""")
| AreaVisuals |
python | donnemartin__system-design-primer | solutions/object_oriented_design/parking_lot/parking_lot.py | {
"start": 2448,
"end": 3070
} | class ____(object):
def __init__(self, level, row, spot_number, spot_size, vehicle_size):
self.level = level
self.row = row
self.spot_number = spot_number
self.spot_size = spot_size
self.vehicle_size = vehicle_size
self.vehicle = None
def is_available(self):
return True if self.vehicle is None else False
def can_fit_vehicle(self, vehicle):
if self.vehicle is not None:
return False
return vehicle.can_fit_in_spot(self)
def park_vehicle(self, vehicle):
pass
def remove_vehicle(self):
pass
| ParkingSpot |
python | walkccc__LeetCode | solutions/93. Restore IP Addresses/93.py | {
"start": 0,
"end": 663
} | class ____:
def restoreIpAddresses(self, s: str) -> list[str]:
ans = []
def dfs(start: int, path: list[int]) -> None:
if len(path) == 4 and start == len(s):
ans.append(path[0] + '.' + path[1] + '.' + path[2] + '.' + path[3])
return
if len(path) == 4 or start == len(s):
return
for length in range(1, 4):
if start + length > len(s):
return # out-of-bounds
if length > 1 and s[start] == '0':
return # leading '0'
num = s[start: start + length]
if int(num) > 255:
return
dfs(start + length, path + [num])
dfs(0, [])
return ans
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/file_metadata.py | {
"start": 247,
"end": 851
} | class ____(BaseModel):
id: str
"""Unique object identifier.
The format and length of IDs may change over time.
"""
created_at: datetime
"""RFC 3339 datetime string representing when the file was created."""
filename: str
"""Original filename of the uploaded file."""
mime_type: str
"""MIME type of the file."""
size_bytes: int
"""Size of the file in bytes."""
type: Literal["file"]
"""Object type.
For files, this is always `"file"`.
"""
downloadable: Optional[bool] = None
"""Whether the file can be downloaded."""
| FileMetadata |
python | jina-ai__jina | tests/integration/gateway_clients/test_executor_timeout_failures.py | {
"start": 95,
"end": 1712
} | class ____(Executor):
@requests
def foo(self, *args, **kwargs):
time.sleep(0.2)
def _test_error(flow_kwargs, add_kwargs, error_port=None):
f = Flow(**flow_kwargs).add(**add_kwargs)
with f:
with pytest.raises(ConnectionError) as err_info:
f.index(inputs=[])
if error_port:
assert str(error_port) in err_info.value.args[0]
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_headless_exec_timeout(port_generator, protocol):
exec_port = port_generator()
flow_kwargs = {'timeout_send': 1, 'protocol': protocol}
add_kwargs = {'uses': SlowExecutor, 'port': exec_port}
# we have to do this in a new process because otherwise grpc will be sad and everything will crash :(
p = multiprocessing.Process(
target=_test_error, args=(flow_kwargs, add_kwargs, exec_port)
)
p.start()
p.join()
assert (
p.exitcode == 0
) # if exitcode != 0 then test in other process did not pass and this should fail
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_headfull_exec_timeout(port_generator, protocol):
flow_kwargs = {'timeout_send': 1, 'protocol': protocol}
add_kwargs = {'uses': SlowExecutor, 'shards': 2}
# we have to do this in a new process because otherwise grpc will be sad and everything will crash :(
p = multiprocessing.Process(target=_test_error, args=(flow_kwargs, add_kwargs))
p.start()
p.join()
assert (
p.exitcode == 0
) # if exitcode != 0 then test in other process did not pass and this should fail
| SlowExecutor |
python | django__django | django/contrib/messages/storage/session.py | {
"start": 214,
"end": 1764
} | class ____(BaseStorage):
"""
Store messages in the session (that is, django.contrib.sessions).
"""
session_key = "_messages"
def __init__(self, request, *args, **kwargs):
if not hasattr(request, "session"):
raise ImproperlyConfigured(
"The session-based temporary message storage requires session "
"middleware to be installed, and come before the message "
"middleware in the MIDDLEWARE list."
)
super().__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return (
self.deserialize_messages(self.request.session.get(self.session_key)),
True,
)
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder()
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, str):
return json.loads(data, cls=MessageDecoder)
return data
| SessionStorage |
python | dagster-io__dagster | examples/airlift-migration-tutorial/tutorial_example/shared/export_duckdb_to_csv.py | {
"start": 115,
"end": 907
} | class ____:
table_name: str
csv_path: Path
duckdb_path: Path
duckdb_database_name: str
duckdb_schema: Optional[str] = None
def export_duckdb_to_csv(args: ExportDuckDbToCsvArgs) -> None:
duckdb_path, table_name = args.duckdb_path, args.table_name
if not duckdb_path.exists():
raise ValueError(f"DuckDB database not found at {duckdb_path}")
# Connect to DuckDB and create a new table
con = duckdb.connect(str(duckdb_path))
qualified_table = (
f"{args.duckdb_schema}.{args.table_name}" if args.duckdb_schema else table_name
)
df = con.execute(f"SELECT * FROM {args.duckdb_database_name}.{qualified_table}").df()
con.close()
# Write the dataframe to a CSV file
df.to_csv(args.csv_path, index=False)
| ExportDuckDbToCsvArgs |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/selector/subset_selector.py | {
"start": 1287,
"end": 2491
} | class ____(
NamedTuple(
"_OpSelectionData",
[
("op_selection", Sequence[str]),
("resolved_op_selection", AbstractSet[str]),
("parent_job_def", "JobDefinition"),
],
)
):
"""The data about op selection.
Args:
op_selection (List[str]): The queries of op selection.
resolved_op_selection (AbstractSet[str]): The names of selected ops.
parent_job_def (JobDefinition): The definition of the full job. This is used for constructing
pipeline snapshot lineage.
"""
def __new__(
cls,
op_selection: Sequence[str],
resolved_op_selection: AbstractSet[str],
parent_job_def: "JobDefinition",
):
from dagster._core.definitions.job_definition import JobDefinition
return super().__new__(
cls,
op_selection=check.sequence_param(op_selection, "op_selection", str),
resolved_op_selection=check.set_param(
resolved_op_selection, "resolved_op_selection", str
),
parent_job_def=check.inst_param(parent_job_def, "parent_job_def", JobDefinition),
)
@record
| OpSelectionData |
python | doocs__leetcode | solution/0000-0099/0077.Combinations/Solution.py | {
"start": 0,
"end": 385
} | class ____:
def combine(self, n: int, k: int) -> List[List[int]]:
def dfs(i: int):
if len(t) == k:
ans.append(t[:])
return
if i > n:
return
t.append(i)
dfs(i + 1)
t.pop()
dfs(i + 1)
ans = []
t = []
dfs(1)
return ans
| Solution |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/newlines.py | {
"start": 1842,
"end": 4687
} | class ____:
if sys.version_info >= (3, 11):
def joinpath(self): ...
# The .open method comes from pathlib.pyi and should be kept in sync.
@overload
def open(self): ...
def fakehttp():
class FakeHTTPConnection:
if mock_close:
def close(self):
pass
FakeHTTPConnection.fakedata = fakedata
if True:
if False:
def x():
def y():
pass
#comment
print()
if True:
def a():
return 1
else:
pass
if True:
# fmt: off
def a():
return 1
# fmt: on
else:
pass
match True:
case 1:
def a():
return 1
case 1:
def a():
return 1
try:
def a():
return 1
except RuntimeError:
def a():
return 1
try:
def a():
return 1
finally:
def a():
return 1
try:
def a():
return 1
except RuntimeError:
def a():
return 1
except ZeroDivisionError:
def a():
return 1
else:
def a():
return 1
finally:
def a():
return 1
if raw:
def show_file(lines):
for line in lines:
pass
# Trailing comment not on function or class
else:
pass
# NOTE: Please keep this the last block in this file. This tests that we don't insert
# empty line(s) at the end of the file due to nested function
if True:
def nested_trailing_function():
pass
def overload1(): ... # trailing comment
def overload1(a: int): ...
def overload2(): ... # trailing comment
def overload2(a: int): ...
def overload3():
...
# trailing comment
def overload3(a: int): ...
def overload4():
...
# trailing comment
def overload4(a: int): ...
# In preview, we preserve these newlines at the start of functions:
def preserved1():
return 1
def preserved2():
pass
def preserved3():
def inner(): ...
def preserved4():
def inner():
print("with a body")
return 1
return 2
def preserved5():
...
# trailing comment prevents collapsing the stub
def preserved6():
# Comment
return 1
def preserved7():
# comment
# another line
# and a third
return 0
def preserved8(): # this also prevents collapsing the stub
...
# But we still discard these newlines:
def removed1():
"Docstring"
return 1
def removed2():
...
def removed3():
... # trailing same-line comment does not prevent collapsing the stub
# And we discard empty lines after the first:
def partially_preserved1():
return 1
# We only preserve blank lines, not add new ones
def untouched1():
# comment
return 0
def untouched2():
# comment
return 0
def untouched3():
# comment
# another line
# and a third
return 0
| Path |
python | kamyu104__LeetCode-Solutions | Python/find-eventual-safe-states.py | {
"start": 39,
"end": 662
} | class ____(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE]*len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))
| Solution |
python | sympy__sympy | sympy/functions/elementary/hyperbolic.py | {
"start": 32699,
"end": 34682
} | class ____(ReciprocalHyperbolicFunction):
r"""
``csch(x)`` is the hyperbolic cosecant of ``x``.
The hyperbolic cosecant function is $\frac{2}{e^x - e^{-x}}$
Examples
========
>>> from sympy import csch
>>> from sympy.abc import x
>>> csch(x)
csch(x)
See Also
========
sympy.functions.elementary.hyperbolic.sinh
sympy.functions.elementary.hyperbolic.cosh
sympy.functions.elementary.hyperbolic.tanh
sympy.functions.elementary.hyperbolic.sech
sympy.functions.elementary.hyperbolic.asinh
sympy.functions.elementary.hyperbolic.acosh
"""
_reciprocal_of = sinh
_is_odd = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function
"""
if argindex == 1:
return -coth(self.args[0]) * csch(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion
"""
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2 * (1 - 2**n) * B/F * x**n
def _eval_rewrite_as_sin(self, arg, **kwargs):
return I / sin(I * arg, evaluate=False)
def _eval_rewrite_as_csc(self, arg, **kwargs):
return I * csc(I * arg, evaluate=False)
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return I / cosh(arg + I * pi / 2, evaluate=False)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return 1 / sinh(arg)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
| csch |
python | django__django | tests/urlpatterns/tests.py | {
"start": 15454,
"end": 16514
} | class ____(SimpleTestCase):
def test_integer_parameter_name_causes_exception(self):
msg = (
"URL route 'hello/<int:1>/' uses parameter name '1' which isn't "
"a valid Python identifier."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path(r"hello/<int:1>/", lambda r: None)
def test_non_identifier_parameter_name_causes_exception(self):
msg = (
"URL route 'b/<int:book.id>/' uses parameter name 'book.id' which "
"isn't a valid Python identifier."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path(r"b/<int:book.id>/", lambda r: None)
def test_allows_non_ascii_but_valid_identifiers(self):
# \u0394 is "GREEK CAPITAL LETTER DELTA", a valid identifier.
p = path("hello/<str:\u0394>/", lambda r: None)
match = p.resolve("hello/1/")
self.assertEqual(match.kwargs, {"\u0394": "1"})
@override_settings(ROOT_URLCONF="urlpatterns.path_dynamic_urls")
| ParameterRestrictionTests |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 7190,
"end": 9742
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `text_labels` is provided):
Text language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the text language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
depth_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `audio_labels` is provided):
Audio language modeling loss (for next-token prediction).
audio_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the audio language modeling heads.
depth_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Past key-values of the depth decoder.
depth_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Hidden states of the depth decoder
depth_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Depth decoder's Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
depth_loss: Optional[torch.FloatTensor] = None
audio_logits: Optional[torch.FloatTensor] = None
depth_past_key_values: Optional[Cache] = None
depth_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
depth_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring
| MoshiConditionalGenerationOutputWithPast |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_batch.py | {
"start": 5335,
"end": 6583
} | class ____:
@mock.patch(CLOUD_BATCH_HOOK_PATH)
def test_execute(self, hook_mock):
filter = "filter_description"
limit = 2
job_name = "test_job"
operator = CloudBatchListTasksOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
job_name=job_name,
filter=filter,
limit=limit,
)
operator.execute(context=mock.MagicMock())
hook_mock.return_value.list_tasks.assert_called_once_with(
region=REGION,
project_id=PROJECT_ID,
filter=filter,
job_name=job_name,
limit=limit,
group_name="group0",
)
@mock.patch(CLOUD_BATCH_HOOK_PATH)
def test_execute_with_invalid_limit(self, hook_mock):
filter = "filter_description"
limit = -1
job_name = "test_job"
with pytest.raises(expected_exception=AirflowException):
CloudBatchListTasksOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
job_name=job_name,
filter=filter,
limit=limit,
)
| TestCloudBatchListTasksOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 846034,
"end": 846834
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectV2FieldConfiguration."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2FieldConfigurationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2FieldConfiguration"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectV2FieldConfigurationConnection |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/base.py | {
"start": 1050,
"end": 1238
} | class ____(PydanticBaseModel):
"""
Base pydantic model for REST API.
:meta private:
"""
model_config = ConfigDict(from_attributes=True, populate_by_name=True)
| BaseModel |
python | jina-ai__jina | tests/integration/gateway_clients/test_streaming.py | {
"start": 1044,
"end": 16282
} | class ____:
def send_discover_endpoint(self, *args, **kwargs):
async def task_wrapper():
from jina.constants import __default_endpoint__
from jina.proto import jina_pb2
ep = jina_pb2.EndpointsProto()
ep.endpoints.extend([__default_endpoint__])
return ep, None
return asyncio.create_task(task_wrapper())
def send_requests_once(
self,
requests,
deployment: str,
head: bool,
metadata: dict = None,
shard_id=None,
endpoint: str = None,
timeout: float = 1.0,
retries: int = -1,
) -> asyncio.Task:
assert head
request = requests[0]
if not hasattr(self, '_docs'):
self._docs = DocumentArray()
async def _compute_response():
response_msg = copy.deepcopy(request)
exec_endpoint = request.header.exec_endpoint
new_docs = DocumentArray()
await asyncio.sleep(0.1)
if deployment == 'indexer-executor':
if exec_endpoint == '/index':
time.sleep(0.1)
self._docs.extend(request.docs)
else:
docs = response_msg.docs
docs.clear()
docs.extend(
DocumentArray(Document(tags={'ids': self._docs[:, 'id']}))
)
response_msg.data.docs = docs
return response_msg
else:
if deployment == 'slow-executor':
await asyncio.sleep(SLOW_EXECUTOR_SLEEP_TIME)
for doc in request.docs:
new_doc = Document(doc, copy=True)
new_doc.tags['executor'] = time.time()
print(
f'in {deployment}, {new_doc.id} => time: {readable_time_from(new_doc.tags["executor"])}, {new_doc.tags["executor"]}',
flush=True,
)
new_docs.append(new_doc)
docs = response_msg.docs
docs.clear()
docs.extend(new_docs)
response_msg.data.docs = docs
return response_msg
async def task_wrapper():
response_msg = await _compute_response()
return response_msg, {}
return asyncio.create_task(task_wrapper())
def readable_time_from(t):
return datetime.utcfromtimestamp(t).strftime('%M:%S:%f')
def get_document(i, name):
t = time.time()
print(f'in {name} {i}, time: {readable_time_from(t)}, {t}', flush=True)
return Document(id=f'id-{i}', tags={'input_gen': t})
def blocking_gen():
"""Fast synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='blocking_gen')
time.sleep(0.1)
async def async_gen():
"""Fast async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='async_gen')
await asyncio.sleep(0.1)
def slow_blocking_gen():
"""Slow synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_blocking_gen')
time.sleep(INPUT_GEN_SLEEP_TIME)
async def slow_async_gen():
"""Slow async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_async_gen')
await asyncio.sleep(INPUT_GEN_SLEEP_TIME)
def on_done(response, final_da: DocumentArray):
docs = response.docs
for doc in docs:
doc.tags['on_done'] = time.time()
print(
f'in on_done {doc.id}, time: {readable_time_from(doc.tags["on_done"])}',
flush=True,
)
final_da.extend(docs)
def create_runtime(graph_dict: Dict, protocol: str, port: int, prefetch: int = 0):
import json
graph_description = json.dumps(graph_dict)
args = set_gateway_parser().parse_args(
[
'--port',
f'{port}',
'--graph-description',
f'{graph_description}',
'--deployments-addresses',
'{}',
'--prefetch',
f'{prefetch}',
'--protocol',
protocol,
]
)
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler) as runtime:
runtime.run_forever()
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', slow_async_gen),
pytest.param(
'grpc',
slow_blocking_gen,
marks=pytest.mark.skip(
reason='grpc client + sync generator with time.sleep is expected to fail'
),
),
('websocket', slow_async_gen),
('websocket', slow_blocking_gen),
('http', slow_async_gen),
('http', slow_blocking_gen),
],
)
def test_disable_prefetch_slow_client_fast_executor(
protocol, inputs, monkeypatch, simple_graph_dict_fast
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_fast,
},
)
p.start()
time.sleep(1.0)
final_da = DocumentArray()
client = Client(protocol=protocol, port=port)
client.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
p.terminate()
p.join()
assert len(final_da) == INPUT_LEN
# Since the input_gen is slow, order will always be gen -> exec -> on_done for every request
assert final_da['id-0'].tags['input_gen'] < final_da['id-0'].tags['executor']
assert final_da['id-0'].tags['executor'] < final_da['id-0'].tags['on_done']
assert final_da['id-0'].tags['on_done'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-1'].tags['executor']
assert final_da['id-1'].tags['executor'] < final_da['id-1'].tags['on_done']
assert final_da['id-1'].tags['on_done'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-2'].tags['executor']
assert final_da['id-2'].tags['executor'] < final_da['id-2'].tags['on_done']
assert final_da['id-2'].tags['on_done'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-3'].tags['executor']
assert final_da['id-3'].tags['executor'] < final_da['id-3'].tags['on_done']
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', async_gen),
('grpc', blocking_gen),
('websocket', async_gen),
('websocket', blocking_gen),
('http', async_gen),
('http', blocking_gen),
],
)
def test_disable_prefetch_fast_client_slow_executor(
protocol, inputs, monkeypatch, simple_graph_dict_slow
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
port = random_port()
final_da = DocumentArray()
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_slow,
},
)
p.start()
time.sleep(1.0)
client = Client(protocol=protocol, port=port)
client.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
p.terminate()
p.join()
assert len(final_da) == INPUT_LEN
# since Executor is slow, all client inputs should be read before 1st request exits from Executor.
assert (
final_da['id-0'].id < final_da['id-1'].id
), f'ids are not ordered with times {final_da["id-0"].tags["input_gen"]} and {final_da["id-1"].tags["input_gen"]}'
assert (
final_da['id-1'].id < final_da['id-2'].id
), f'ids are not ordered with times {final_da["id-1"].tags["input_gen"]} and {final_da["id-2"].tags["input_gen"]}'
assert (
final_da['id-2'].id < final_da['id-3'].id
), f'ids are not ordered with times {final_da["id-2"].tags["input_gen"]} and {final_da["id-3"].tags["input_gen"]}'
assert final_da['id-0'].tags['input_gen'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-0'].tags['executor']
# At least 1 request should reache `on_done` before all requests are processed in the Executor.
# Validates that the requests are not pending at the Executor
first_on_done_time = min(i.tags['on_done'] for i in final_da)
last_executor_time = max(i.tags['executor'] for i in final_da)
assert first_on_done_time < last_executor_time
def _search_first_and_last_prefix_occurrence(
search_prefix: str, search_list: List[str]
):
first_idx, last_idx = None, None
for idx, value in enumerate(search_list):
if value.startswith(search_prefix):
if not first_idx:
first_idx = idx
else:
last_idx = idx
return first_idx, last_idx
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('protocol', ['websocket', 'http', 'grpc'])
@pytest.mark.parametrize('use_stream', [True, False])
@pytest.mark.parametrize('prefetch_implementation', ['gateway', 'client'])
def test_multiple_clients(
prefetch,
protocol,
monkeypatch,
simple_graph_dict_indexer,
use_stream,
prefetch_implementation,
):
if not use_stream and protocol != 'grpc':
return
GOOD_CLIENTS = 5
GOOD_CLIENT_NUM_DOCS = 20
MALICIOUS_CLIENT_NUM_DOCS = 50
port = random_port()
gateway_kwargs = {
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_indexer,
}
client_prefetch = None
if prefetch_implementation == 'gateway':
gateway_kwargs['prefetch'] = prefetch
else:
client_prefetch = prefetch
def get_document(i):
return Document(
id=f'{multiprocessing.current_process().name}_{i}',
text=str(bytes(bytearray(os.urandom(512 * 4)))),
)
async def good_client_gen():
for i in range(GOOD_CLIENT_NUM_DOCS):
yield get_document(i)
await asyncio.sleep(0.1)
async def malicious_client_gen():
for i in range(1000, 1000 + MALICIOUS_CLIENT_NUM_DOCS):
yield get_document(i)
def client(gen, port):
Client(protocol=protocol, port=port).post(
on='/index',
inputs=gen,
request_size=1,
stream=use_stream,
prefetch=client_prefetch,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_discover_endpoint',
DummyMockConnectionPool.send_discover_endpoint,
)
pool = []
runtime_process = multiprocessing.Process(
target=create_runtime,
kwargs=gateway_kwargs,
)
runtime_process.start()
time.sleep(1.0)
# We have 5 good clients connecting to the same gateway. They have controlled requests.
# Each client sends `GOOD_CLIENT_NUM_DOCS` (20) requests and sleeps after each request.
for i in range(GOOD_CLIENTS):
cp = multiprocessing.Process(
target=partial(client, good_client_gen, port),
name=f'goodguy_{i}',
)
cp.start()
pool.append(cp)
# and 1 malicious client, sending lot of requests (trying to block others)
cp = multiprocessing.Process(
target=partial(client, malicious_client_gen, port),
name='badguy',
)
cp.start()
pool.append(cp)
for p in pool:
p.join()
order_of_ids = list(
Client(protocol=protocol, port=port)
.post(on='/status', inputs=[Document()], stream=use_stream)[0]
.tags['ids']
)
# There must be total 150 docs indexed.
runtime_process.terminate()
runtime_process.join()
assert (
len(order_of_ids)
== GOOD_CLIENTS * GOOD_CLIENT_NUM_DOCS + MALICIOUS_CLIENT_NUM_DOCS
)
(
malicious_client_first_response_idx,
malicious_client_last_response_idx,
) = _search_first_and_last_prefix_occurrence('badguy', order_of_ids)
"""
If prefetch is disabled, clients can freeflow requests. No client is blocked.
Hence last 20 requests go from `goodguy`.
(Ideally last 30 requests should be validated, to avoid flaky CI, we test last 20)
When there are no rules, badguy wins! With rule, you find balance in the world.
"""
# first response will always be from one of the good clients
assert order_of_ids[0].split('_')[0] == 'goodguy'
# first response from malicilious client will (mostly) appear after the first response from a good client
assert malicious_client_first_response_idx >= 0
if prefetch == 0:
# mailicious client will finish before all the good clients
assert malicious_client_last_response_idx < (len(order_of_ids) - 1)
if prefetch == 5:
if prefetch_implementation == 'gateway' and not use_stream:
# gateway with prefetch will complete the full iteration from the gateway recieves
# the full malicious client request iterator which is processed with prefetch
assert set(map(lambda x: x.split('_')[0], order_of_ids[-10:])) == {
'goodguy'
}
# mailicious client will finish before all the good clients
assert malicious_client_last_response_idx < (len(order_of_ids) - 1)
elif prefetch_implementation == 'gateway' and protocol == 'http':
# gateway with prefetch will complete the full iteration from the gateway recieves
# the full malicious client request iterator which is processed with prefetch
# and due to the request <-> response coupling
assert set(map(lambda x: x.split('_')[0], order_of_ids[-10:])) == {
'goodguy'
}
# mailicious client will finish before all the good clients
assert malicious_client_last_response_idx < (len(order_of_ids) - 1)
else:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-10:])) == {'badguy'}
# mailicious client will finish last
assert malicious_client_last_response_idx == (len(order_of_ids) - 1)
| DummyMockConnectionPool |
python | ray-project__ray | python/ray/train/examples/horovod/horovod_pytorch_example.py | {
"start": 548,
"end": 8268
} | class ____(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def setup(config):
data_dir = config.get("data_dir", None)
seed = config.get("seed", 42)
batch_size = config.get("batch_size", 64)
use_adasum = config.get("use_adasum", False)
lr = config.get("lr", 0.01)
momentum = config.get("momentum", 0.5)
use_cuda = config.get("use_cuda", False)
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"pin_memory": True} if use_cuda else {}
data_dir = data_dir or "~/data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank()
)
# Note, don't set `num_workers` in DataLoader (not even 1),
# as that will separately start multiple processes (each corresponding to 1 worker)
# to load the data. This is known to cause issues with Ray.
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs
)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average,
)
return model, optimizer, train_loader, train_sampler
def train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
):
loss = None
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
return loss.item() if loss else None
def train_func(config):
num_epochs = config.get("num_epochs", 10)
log_interval = config.get("log_interval", 10)
use_cuda = config.get("use_cuda", False)
model, optimizer, train_loader, train_sampler = setup(config)
results = []
for epoch in range(num_epochs):
loss = train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
)
results.append(loss)
with tempfile.TemporaryDirectory() as tmpdir:
torch.save(model.state_dict(), os.path.join(tmpdir, "model.pt"))
train.report({"loss": loss}, checkpoint=Checkpoint.from_directory(tmpdir))
# Only used for testing.
return results
def main(num_workers, use_gpu, kwargs):
trainer = HorovodTrainer(
train_loop_per_worker=train_func,
train_loop_config={
"num_epochs": kwargs["num_epochs"],
"log_interval": kwargs["log_interval"],
"use_cuda": kwargs["use_cuda"],
},
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
)
result = trainer.fit()
print(result)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--num-epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--use-gpu", action="store_true", default=False, help="enables CUDA training"
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction",
)
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="Number of Ray workers to use for training.",
)
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)",
)
parser.add_argument(
"--address",
required=False,
type=str,
default=None,
help="Address of Ray cluster.",
)
args = parser.parse_args()
if args.address:
ray.init(args.address)
else:
ray.init()
use_cuda = args.use_gpu if args.use_gpu is not None else False
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": use_cuda,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval,
}
main(num_workers=args.num_workers, use_gpu=use_cuda, kwargs=kwargs)
| Net |
python | gawel__pyquery | tests/test_pyquery.py | {
"start": 21867,
"end": 26238
} | class ____(TestCase):
html = '''
<div id="div">
<input form="dispersed" name="order" value="spam">
</div>
<form id="dispersed">
<div><input name="order" value="eggs"></div>
<input form="dispersed" name="order" value="ham">
<input form="other-form" name="order" value="nothing">
<input form="" name="order" value="nothing">
</form>
<form id="other-form">
<input form="dispersed" name="order" value="tomato">
</form>
<form class="no-id">
<input form="dispersed" name="order" value="baked beans">
<input name="spam" value="Spam">
</form>
'''
html2 = '''
<form id="first">
<input name="order" value="spam">
<fieldset>
<input name="fieldset" value="eggs">
<input id="input" name="fieldset" value="ham">
</fieldset>
</form>
<form id="datalist">
<datalist><div><input name="datalist" value="eggs"></div></datalist>
<input type="checkbox" name="checkbox" checked>
<input type="radio" name="radio" checked>
</form>
'''
html3 = '''
<form>
<input name="order" value="spam">
<input id="noname" value="sausage">
<fieldset disabled>
<input name="order" value="sausage">
</fieldset>
<input name="disabled" value="ham" disabled>
<input type="submit" name="submit" value="Submit">
<input type="button" name="button" value="">
<input type="image" name="image" value="">
<input type="reset" name="reset" value="Reset">
<input type="file" name="file" value="">
<button type="submit" name="submit" value="submit"></button>
<input type="checkbox" name="spam">
<input type="radio" name="eggs">
</form>
'''
html4 = '''
<form>
<input name="spam" value="Spam/
spam">
<select name="order" multiple>
<option value="baked
beans" selected>
<option value="tomato" selected>
<option value="spam">
</select>
<textarea name="multiline">multiple
lines
of text</textarea>
</form>
'''
def test_serialize_pairs_form_id(self):
d = pq(self.html)
self.assertEqual(d('#div').serialize_pairs(), [])
self.assertEqual(d('#dispersed').serialize_pairs(), [
('order', 'spam'), ('order', 'eggs'), ('order', 'ham'),
('order', 'tomato'), ('order', 'baked beans'),
])
self.assertEqual(d('.no-id').serialize_pairs(), [
('spam', 'Spam'),
])
def test_serialize_pairs_form_controls(self):
d = pq(self.html2)
self.assertEqual(d('fieldset').serialize_pairs(), [
('fieldset', 'eggs'), ('fieldset', 'ham'),
])
self.assertEqual(d('#input, fieldset, #first').serialize_pairs(), [
('order', 'spam'), ('fieldset', 'eggs'), ('fieldset', 'ham'),
('fieldset', 'eggs'), ('fieldset', 'ham'), ('fieldset', 'ham'),
])
self.assertEqual(d('#datalist').serialize_pairs(), [
('datalist', 'eggs'), ('checkbox', 'on'), ('radio', 'on'),
])
def test_serialize_pairs_filter_controls(self):
d = pq(self.html3)
self.assertEqual(d('form').serialize_pairs(), [
('order', 'spam')
])
def test_serialize_pairs_form_values(self):
d = pq(self.html4)
self.assertEqual(d('form').serialize_pairs(), [
('spam', 'Spam/spam'), ('order', 'baked\r\nbeans'),
('order', 'tomato'), ('multiline', 'multiple\r\nlines\r\nof text'),
])
def test_serialize_array(self):
d = pq(self.html4)
self.assertEqual(d('form').serialize_array(), [
{'name': 'spam', 'value': 'Spam/spam'},
{'name': 'order', 'value': 'baked\r\nbeans'},
{'name': 'order', 'value': 'tomato'},
{'name': 'multiline', 'value': 'multiple\r\nlines\r\nof text'},
])
def test_serialize(self):
d = pq(self.html4)
self.assertEqual(
d('form').serialize(),
'spam=Spam%2Fspam&order=baked%0D%0Abeans&order=tomato&'
'multiline=multiple%0D%0Alines%0D%0Aof%20text'
)
def test_serialize_dict(self):
d = pq(self.html4)
self.assertEqual(d('form').serialize_dict(), {
'spam': 'Spam/spam',
'order': ['baked\r\nbeans', 'tomato'],
'multiline': 'multiple\r\nlines\r\nof text',
})
| TestAjax |
python | ApeWorX__ape | src/ape/plugins/network.py | {
"start": 984,
"end": 1813
} | class ____(PluginType):
"""
A network plugin, such as ``mainnet`` or ``ropsten``. Likely, registering networks
will happen soon after registering the ecosystem, as an ecosystem requires
networks.
"""
@hookspec
def networks(self) -> Iterator[tuple[str, str, type["NetworkAPI"]]]: # type: ignore[empty-body]
"""
A hook that must return an iterator of tuples of:
* the target ecosystem plugin's name
* the network name
* a :class:`ape.api.networks.NetworkAPI` subclass
Usage example::
@plugins.register(plugins.NetworkPlugin)
def networks():
yield "ethereum", "ShibaChain", ShibaNetwork
Returns:
Iterator[tuple[str, str, type[:class:`~ape.api.networks.NetworkAPI`]]]
"""
| NetworkPlugin |
python | django__django | tests/queries/models.py | {
"start": 15028,
"end": 15394
} | class ____(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.CASCADE, related_name="owner")
creator = models.ForeignKey(BaseUser, models.CASCADE, related_name="creator")
note = models.ForeignKey(Note, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.title
| Task |
python | django__django | tests/fixtures_regress/models.py | {
"start": 3471,
"end": 3877
} | class ____(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person, models.CASCADE)
objects = NaturalKeyWithFKDependencyManager()
class Meta:
unique_together = ["name", "author"]
def natural_key(self):
return (self.name,) + self.author.natural_key()
natural_key.dependencies = ["fixtures_regress.Person"]
| NaturalKeyWithFKDependency |
python | PyCQA__pylint | tests/functional/c/ctor_arguments.py | {
"start": 2310,
"end": 2788
} | class ____:
def __init__(self, first, second):
self.first = first
self.second = second
@classmethod
def from_nothing(cls):
return cls(1, 2, 3, 4) # [too-many-function-args]
@classmethod
def from_nothing1(cls):
return cls() # [no-value-for-parameter,no-value-for-parameter]
@classmethod
def from_nothing2(cls):
# +1: [no-value-for-parameter,unexpected-keyword-arg]
return cls(1, not_argument=2)
| Clsmethod |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 138504,
"end": 139715
} | class ____(ColumnDefault):
"""Default generator that's specific to the use of a "sentinel" column
when using the insertmanyvalues feature.
This default is used as part of the :func:`_schema.insert_sentinel`
construct.
"""
is_sentinel = True
for_update = False
arg = None
def __new__(cls) -> _InsertSentinelColumnDefault:
return object.__new__(cls)
def __init__(self) -> None:
pass
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
col = cast("Column[Any]", parent)
if not col._insert_sentinel:
raise exc.ArgumentError(
"The _InsertSentinelColumnDefault may only be applied to a "
"Column marked as insert_sentinel=True"
)
elif not col.nullable:
raise exc.ArgumentError(
"The _InsertSentinelColumnDefault may only be applied to a "
"Column that is nullable"
)
super()._set_parent(parent, **kw)
def _copy(self) -> _InsertSentinelColumnDefault:
return _InsertSentinelColumnDefault()
_SQLExprDefault = Union["ColumnElement[Any]", "TextClause"]
| _InsertSentinelColumnDefault |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 36245,
"end": 36409
} | class ____(BoringModel):
def on_train_epoch_end(self):
if self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnTrainEpochEnd |
python | pytorch__pytorch | torch/testing/_internal/common_dist_composable.py | {
"start": 1335,
"end": 1878
} | class ____(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.l = nn.Linear(100, 100, device=device)
self.u1 = UnitModule(device)
self.u2 = UnitModule(device)
self.p = nn.Parameter(torch.randn((100, 100), device=device))
self.register_buffer(
"buffer", torch.randn((100, 100), device=device), persistent=True
)
def forward(self, x):
a = self.u2(self.u1(self.l(x)))
b = self.p
return torch.mm(a, b)
| CompositeParamModel |
python | mahmoud__glom | glom/matching.py | {
"start": 11477,
"end": 12606
} | class ____(_Bool):
"""
Inverts the *child*. Child spec will be expected to raise
:exc:`GlomError` (or subtype), in which case the target will be returned.
If the child spec does not raise :exc:`GlomError`, :exc:`MatchError`
will be raised.
"""
__slots__ = ('child',)
def __init__(self, child):
self.child = child
def glomit(self, target, scope):
try: # one child must match without exception
scope[glom](target, self.child, scope)
except GlomError:
return target
else:
raise GlomError("child shouldn't have passed", self.child)
def _m_repr(self):
if isinstance(self.child, (_MType, _MExpr)):
return True
if type(self.child) not in (And, Or, Not):
return False
return self.child._m_repr()
def __repr__(self):
if self.child is M:
return '~M'
if self._m_repr(): # is in M repr
return "~(" + bbrepr(self.child) + ")"
return "Not(" + bbrepr(self.child) + ")"
_M_OP_MAP = {'=': '==', '!': '!=', 'g': '>=', 'l': '<='}
| Not |
python | jazzband__pip-tools | piptools/cache.py | {
"start": 1790,
"end": 6112
} | class ____:
"""
Create new persistent dependency cache for the current Python version.
The cache file is written to the appropriate user cache dir for the
current platform, i.e.
~/.cache/pip-tools/depcache-pyX.Y.json
Where py indicates the Python implementation.
Where X.Y indicates the Python version.
"""
def __init__(self, cache_dir: str):
os.makedirs(cache_dir, exist_ok=True)
cache_filename = f"depcache-{_implementation_name()}.json"
self._cache_file = os.path.join(cache_dir, cache_filename)
self._cache: CacheDict | None = None
@property
def cache(self) -> CacheDict:
"""
The dictionary that is the actual in-memory cache. This property
lazily loads the cache from disk.
"""
if self._cache is None:
try:
self._cache = read_cache_file(self._cache_file)
except FileNotFoundError:
self._cache = {}
return self._cache
def as_cache_key(self, ireq: InstallRequirement) -> CacheKey:
"""
Given a requirement, return its cache key.
This behavior is a little weird
in order to allow backwards compatibility with cache files. For a requirement
without extras, this will return, for example:
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and appended
to the version, inside brackets, like so:
("ipython", "2.1.0[nbconvert,notebook]")
"""
name, version, extras = as_tuple(ireq)
if not extras:
extras_string = ""
else:
extras_string = f"[{','.join(extras)}]"
return name, f"{version}{extras_string}"
def write_cache(self) -> None:
"""Write the cache to disk as JSON."""
doc = {"__format__": 1, "dependencies": self._cache}
with open(self._cache_file, "w", encoding="utf-8") as f:
json.dump(doc, f, sort_keys=True)
def clear(self) -> None:
self._cache = {}
self.write_cache()
def __contains__(self, ireq: InstallRequirement) -> bool:
pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
return pkgversion_and_extras in self.cache.get(pkgname, {})
def __getitem__(self, ireq: InstallRequirement) -> list[str]:
pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
return self.cache[pkgname][pkgversion_and_extras]
def __setitem__(self, ireq: InstallRequirement, values: list[str]) -> None:
pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
self.cache.setdefault(pkgname, {})
self.cache[pkgname][pkgversion_and_extras] = values
self.write_cache()
def reverse_dependencies(
self, ireqs: Iterable[InstallRequirement]
) -> dict[str, set[str]]:
"""
Return a lookup table of reverse dependencies for all the given ireqs.
Since this is all static, it only works if the dependency cache
contains the complete data, otherwise you end up with a partial view.
This is typically no problem if you use this function after the entire
dependency tree is resolved.
"""
ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs]
return self._reverse_dependencies(ireqs_as_cache_values)
def _reverse_dependencies(
self, cache_keys: Iterable[tuple[str, str]]
) -> dict[str, set[str]]:
"""
Return a lookup table of reverse dependencies for all the given cache keys.
Example input:
[('pep8', '1.5.7'),
('flake8', '2.4.0'),
('mccabe', '0.3'),
('pyflakes', '0.8.1')]
Example output:
{'pep8': ['flake8'],
'flake8': [],
'mccabe': ['flake8'],
'pyflakes': ['flake8']}
"""
# First, collect all the dependencies into a sequence of (parent, child)
# tuples, like [('flake8', 'pep8'), ('flake8', 'mccabe'), ...]
return lookup_table_from_tuples(
(key_from_req(Requirement(dep_name)), name)
for name, version_and_extras in cache_keys
for dep_name in self.cache[name][version_and_extras]
)
| DependencyCache |
python | celery__celery | t/unit/utils/test_timer2.py | {
"start": 112,
"end": 3215
} | class ____:
def test_enter_after(self):
t = timer2.Timer()
try:
done = [False]
def set_done():
done[0] = True
t.call_after(0.3, set_done)
mss = 0
while not done[0]:
if mss >= 2.0:
raise Exception('test timed out')
time.sleep(0.1)
mss += 0.1
finally:
t.stop()
def test_exit_after(self):
t = timer2.Timer()
t.call_after = Mock()
t.exit_after(0.3, priority=10)
t.call_after.assert_called_with(0.3, sys.exit, 10)
def test_ensure_started_not_started(self):
t = timer2.Timer()
t.running = True
t.start = Mock()
t.ensure_started()
t.start.assert_not_called()
t.running = False
t.on_start = Mock()
t.ensure_started()
t.on_start.assert_called_with(t)
t.start.assert_called_with()
@patch('celery.utils.timer2.sleep')
@patch('os._exit') # To ensure the test fails gracefully
def test_on_tick(self, _exit, sleep):
def next_entry_side_effect():
# side effect simulating following scenario:
# 3.33, 3.33, 3.33, <shutdown event set>
for _ in range(3):
yield 3.33
while True:
yield getattr(t, "_Timer__is_shutdown").set()
on_tick = Mock(name='on_tick')
t = timer2.Timer(on_tick=on_tick)
t._next_entry = Mock(
name='_next_entry', side_effect=next_entry_side_effect()
)
t.run()
sleep.assert_called_with(3.33)
on_tick.assert_has_calls([call(3.33), call(3.33), call(3.33)])
_exit.assert_not_called()
@patch('os._exit')
def test_thread_crash(self, _exit):
t = timer2.Timer()
t._next_entry = Mock()
t._next_entry.side_effect = OSError(131)
t.run()
_exit.assert_called_with(1)
def test_gc_race_lost(self):
t = timer2.Timer()
with patch.object(t, "_Timer__is_stopped") as mock_stop_event:
# Mark the timer as shutting down so we escape the run loop,
# mocking the running state so we don't block!
with patch.object(t, "running", new=False):
t.stop()
# Pretend like the interpreter has shutdown and GCed built-in
# modules, causing an exception
mock_stop_event.set.side_effect = TypeError()
t.run()
mock_stop_event.set.assert_called_with()
def test_test_enter(self):
t = timer2.Timer()
t._do_enter = Mock()
e = Mock()
t.enter(e, 13, 0)
t._do_enter.assert_called_with('enter_at', e, 13, priority=0)
def test_test_enter_after(self):
t = timer2.Timer()
t._do_enter = Mock()
t.enter_after()
t._do_enter.assert_called_with('enter_after')
def test_cancel(self):
t = timer2.Timer()
tref = Mock()
t.cancel(tref)
tref.cancel.assert_called_with()
| test_Timer |
python | ray-project__ray | doc/source/serve/doc_code/multiplexed.py | {
"start": 1393,
"end": 1503
} | class ____:
def __call__(self):
return serve.get_multiplexed_model_id()
@serve.deployment
| Downstream |
python | crytic__slither | slither/utils/upgradeability.py | {
"start": 815,
"end": 24695
} | class ____:
def __init__(self, contract: "Contract") -> None:
self._contract: Contract = contract
self._tainted_functions: List[Function] = []
self._tainted_variables: List[Variable] = []
@property
def contract(self) -> Contract:
return self._contract
@property
def tainted_functions(self) -> List[Function]:
return self._tainted_functions
def add_tainted_function(self, f: Function):
self._tainted_functions.append(f)
@property
def tainted_variables(self) -> List[Variable]:
return self._tainted_variables
def add_tainted_variable(self, v: Variable):
self._tainted_variables.append(v)
# pylint: disable=too-many-locals
def compare(
v1: Contract, v2: Contract, include_external: bool = False
) -> Tuple[
List[Variable],
List[Variable],
List[Variable],
List[Function],
List[Function],
List[Function],
]:
"""
Compares two versions of a contract. Most useful for upgradeable (logic) contracts,
but does not require that Contract.is_upgradeable returns true for either contract.
Args:
v1: Original version of (upgradeable) contract
v2: Updated version of (upgradeable) contract
include_external: Optional flag to enable cross-contract external taint analysis
Returns:
missing-vars-in-v2: list[Variable],
new-variables: list[Variable],
tainted-variables: list[Variable],
new-functions: list[Function],
modified-functions: list[Function],
tainted-functions: list[Function]
tainted-contracts: list[TaintedExternalContract]
"""
order_vars1 = v1.storage_variables_ordered + v1.transient_variables_ordered
order_vars2 = v2.storage_variables_ordered + v2.transient_variables_ordered
func_sigs1 = [function.solidity_signature for function in v1.functions]
func_sigs2 = [function.solidity_signature for function in v2.functions]
missing_vars_in_v2 = []
new_variables = []
tainted_variables = []
new_functions = []
modified_functions = []
tainted_functions = []
# Since this is not a detector, include any missing variables in the v2 contract
if len(order_vars2) < len(order_vars1):
missing_vars_in_v2.extend(get_missing_vars(v1, v2))
# Find all new and modified functions in the v2 contract
new_modified_functions = []
new_modified_function_vars = []
for sig in func_sigs2:
function = v2.get_function_from_signature(sig)
orig_function = v1.get_function_from_signature(sig)
if sig not in func_sigs1:
new_modified_functions.append(function)
new_functions.append(function)
new_modified_function_vars += function.all_state_variables_written()
elif not function.is_constructor_variables and is_function_modified(
orig_function, function
):
new_modified_functions.append(function)
modified_functions.append(function)
new_modified_function_vars += function.all_state_variables_written()
# Find all unmodified functions that call a modified function or read/write the
# same state variable(s) as a new/modified function, i.e., tainted functions
for function in v2.functions:
if (
function in new_modified_functions
or function.is_constructor
or function.name.startswith("slither")
):
continue
modified_calls = [
func
for func in new_modified_functions
if func in [ir.function for ir in function.internal_calls]
]
tainted_vars = [
var
for var in set(new_modified_function_vars)
if var in function.all_state_variables_read() + function.all_state_variables_written()
and not var.is_constant
and not var.is_immutable
]
if len(modified_calls) > 0 or len(tainted_vars) > 0:
tainted_functions.append(function)
# Find all new or tainted variables, i.e., variables that are written by a new/modified/tainted function
for var in order_vars2:
written_by = v2.get_functions_writing_to_variable(var)
if next((v for v in v1.state_variables_ordered if v.name == var.name), None) is None:
new_variables.append(var)
elif any(func in written_by for func in new_modified_functions + tainted_functions):
tainted_variables.append(var)
tainted_contracts = []
if include_external:
# Find all external contracts and functions called by new/modified/tainted functions
tainted_contracts = tainted_external_contracts(
new_functions + modified_functions + tainted_functions
)
return (
missing_vars_in_v2,
new_variables,
tainted_variables,
new_functions,
modified_functions,
tainted_functions,
tainted_contracts,
)
def tainted_external_contracts(funcs: List[Function]) -> List[TaintedExternalContract]:
"""
Takes a list of functions from one contract, finds any calls in these to functions in external contracts,
and determines which variables and functions in the external contracts are tainted by these external calls.
Args:
funcs: a list of Function objects to search for external calls.
Returns:
TaintedExternalContract() (
contract: Contract,
tainted_functions: List[TaintedFunction],
tainted_variables: List[TaintedVariable]
)
"""
tainted_contracts: dict[str, TaintedExternalContract] = {}
tainted_list: list[TaintedExternalContract] = []
for func in funcs:
for contract, ir in func.all_high_level_calls():
target = ir.function
if contract.is_library:
# Not interested in library calls
continue
if contract.name not in tainted_contracts:
# A contract may be tainted by multiple function calls - only make one TaintedExternalContract object
tainted_contracts[contract.name] = TaintedExternalContract(contract)
if (
isinstance(target, Function)
and target not in funcs
and target not in (f for f in tainted_contracts[contract.name].tainted_functions)
and not (target.is_constructor or target.is_fallback or target.is_receive)
):
# Found a high-level call to a new tainted function
tainted_contracts[contract.name].add_tainted_function(target)
for var in target.all_state_variables_written():
# Consider as tainted all variables written by the tainted function
if var not in (v for v in tainted_contracts[contract.name].tainted_variables):
tainted_contracts[contract.name].add_tainted_variable(var)
elif (
isinstance(target, StateVariable)
and target not in (v for v in tainted_contracts[contract.name].tainted_variables)
and target.is_stored
):
# Found a new high-level call to a public state variable getter
tainted_contracts[contract.name].add_tainted_variable(target)
for c in tainted_contracts.values():
tainted_list.append(c)
contract = c.contract
variables = c.tainted_variables
for var in variables:
# For each tainted variable, consider as tainted any function that reads or writes to it
read_write = set(
contract.get_functions_reading_from_variable(var)
+ contract.get_functions_writing_to_variable(var)
)
for f in read_write:
if f not in tainted_contracts[contract.name].tainted_functions and not (
f.is_constructor or f.is_fallback or f.is_receive
):
c.add_tainted_function(f)
return tainted_list
def tainted_inheriting_contracts(
tainted_contracts: List[TaintedExternalContract], contracts: List[Contract] = None
) -> List[TaintedExternalContract]:
"""
Takes a list of TaintedExternalContract obtained from tainted_external_contracts, and finds any contracts which
inherit a tainted contract, as well as any functions that call tainted functions or read tainted variables in
the inherited contract.
Args:
tainted_contracts: the list obtained from `tainted_external_contracts` or `compare`.
contracts: (optional) the list of contracts to check for inheritance. If not provided, defaults to
`contract.compilation_unit.contracts` for each contract in tainted_contracts.
Returns:
An updated list of TaintedExternalContract, including all from the input list.
"""
for tainted in tainted_contracts:
contract = tainted.contract
check_contracts = contracts
if contracts is None:
check_contracts = contract.compilation_unit.contracts
# We are only interested in checking contracts that inherit a tainted contract
check_contracts = [
c
for c in check_contracts
if c.name not in [t.contract.name for t in tainted_contracts]
and contract.name in [i.name for i in c.inheritance]
]
for c in check_contracts:
new_taint = TaintedExternalContract(c)
for f in c.functions_declared:
# Search for functions that call an inherited tainted function or access an inherited tainted variable
internal_calls = [
ir.function
for ir in f.all_internal_calls()
if isinstance(ir.function, Function)
]
if any(
call.canonical_name == t.canonical_name
for t in tainted.tainted_functions
for call in internal_calls
) or any(
var.canonical_name == t.canonical_name
for t in tainted.tainted_variables
for var in f.all_state_variables_read() + f.all_state_variables_written()
):
new_taint.add_tainted_function(f)
for f in new_taint.tainted_functions:
# For each newly found tainted function, consider as tainted any variable it writes to
for var in f.all_state_variables_written():
if var not in (
v for v in tainted.tainted_variables + new_taint.tainted_variables
):
new_taint.add_tainted_variable(var)
for var in new_taint.tainted_variables:
# For each newly found tainted variable, consider as tainted any function that reads or writes to it
read_write = set(
contract.get_functions_reading_from_variable(var)
+ contract.get_functions_writing_to_variable(var)
)
for f in read_write:
if f not in (
t for t in tainted.tainted_functions + new_taint.tainted_functions
) and not (f.is_constructor or f.is_fallback or f.is_receive):
new_taint.add_tainted_function(f)
if len(new_taint.tainted_functions) > 0:
tainted_contracts.append(new_taint)
return tainted_contracts
def get_missing_vars(v1: Contract, v2: Contract) -> List[StateVariable]:
"""
Gets all non-constant/immutable StateVariables that appear in v1 but not v2
Args:
v1: Contract version 1
v2: Contract version 2
Returns:
List of StateVariables from v1 missing in v2
"""
results = []
order_vars1 = v1.storage_variables_ordered + v1.transient_variables_ordered
order_vars2 = v2.storage_variables_ordered + v2.transient_variables_ordered
if len(order_vars2) < len(order_vars1):
for variable in order_vars1:
if variable.name not in [v.name for v in order_vars2]:
results.append(variable)
return results
def is_function_modified(f1: Function, f2: Function) -> bool:
"""
Compares two versions of a function, and returns True if the function has been modified.
First checks whether the functions' content hashes are equal to quickly rule out identical functions.
Walks the CFGs and compares IR operations if hashes differ to rule out false positives, i.e., from changed comments.
Args:
f1: Original version of the function
f2: New version of the function
Returns:
True if the functions differ, otherwise False
"""
# If the function content hashes are the same, no need to investigate the function further
if f1.source_mapping.content_hash == f2.source_mapping.content_hash:
return False
# If the hashes differ, it is possible a change in a name or in a comment could be the only difference
# So we need to resort to walking through the CFG and comparing the IR operations
queue_f1 = [f1.entry_point]
queue_f2 = [f2.entry_point]
visited = []
while len(queue_f1) > 0 and len(queue_f2) > 0:
node_f1 = queue_f1.pop(0)
node_f2 = queue_f2.pop(0)
visited.extend([node_f1, node_f2])
queue_f1.extend(son for son in node_f1.sons if son not in visited)
queue_f2.extend(son for son in node_f2.sons if son not in visited)
if len(node_f1.irs) != len(node_f2.irs):
return True
for i, ir in enumerate(node_f1.irs):
if encode_ir_for_upgradeability_compare(ir) != encode_ir_for_upgradeability_compare(
node_f2.irs[i]
):
return True
return False
def get_proxy_implementation_slot(proxy: Contract) -> Optional[SlotInfo]:
"""
Gets information about the storage slot where a proxy's implementation address is stored.
Args:
proxy: A Contract object (proxy.is_upgradeable_proxy should be true).
Returns:
(`SlotInfo`) | None : A dictionary of the slot information.
"""
delegate = get_proxy_implementation_var(proxy)
if isinstance(delegate, StateVariable):
if delegate.is_stored:
srs = SlitherReadStorage([proxy], 20)
return srs.get_storage_slot(delegate, proxy)
if delegate.is_constant and delegate.type.name == "bytes32":
return SlotInfo(
name=delegate.name,
type_string="address",
slot=int(delegate.expression.value, 16),
size=160,
offset=0,
)
return None
def get_proxy_implementation_var(proxy: Contract) -> Optional[Variable]:
"""
Gets the Variable that stores a proxy's implementation address. Uses data dependency to trace any LocalVariable
that is passed into a delegatecall as the target address back to its data source, ideally a StateVariable.
Can return a newly created StateVariable if an `sload` from a hardcoded storage slot is found in assembly.
Args:
proxy: A Contract object (proxy.is_upgradeable_proxy should be true).
Returns:
(`Variable`) | None : The variable, ideally a StateVariable, which stores the proxy's implementation address.
"""
if not proxy.is_upgradeable_proxy or not proxy.fallback_function:
return None
delegate = find_delegate_in_fallback(proxy)
if isinstance(delegate, LocalVariable):
dependencies = get_dependencies(delegate, proxy)
try:
delegate = next(var for var in dependencies if isinstance(var, StateVariable))
except StopIteration:
# TODO: Handle case where get_dependencies does not return any state variables.
return delegate
return delegate
def find_delegate_in_fallback(proxy: Contract) -> Optional[Variable]:
"""
Searches a proxy's fallback function for a delegatecall, then extracts the Variable being passed in as the target.
Can return a newly created StateVariable if an `sload` from a hardcoded storage slot is found in assembly.
Should typically be called by get_proxy_implementation_var(proxy).
Args:
proxy: A Contract object (should have a fallback function).
Returns:
(`Variable`) | None : The variable being passed as the destination argument in a delegatecall in the fallback.
"""
delegate: Optional[Variable] = None
fallback = proxy.fallback_function
for node in fallback.all_nodes():
for ir in node.irs:
if isinstance(ir, LowLevelCall) and ir.function_name == "delegatecall":
delegate = ir.destination
if delegate is not None:
break
if (
node.type == NodeType.ASSEMBLY
and isinstance(node.inline_asm, str)
and "delegatecall" in node.inline_asm
):
delegate = extract_delegate_from_asm(proxy, node)
elif node.type == NodeType.EXPRESSION:
expression = node.expression
if isinstance(expression, AssignmentOperation):
expression = expression.expression_right
if (
isinstance(expression, CallExpression)
and "delegatecall" in str(expression.called)
and len(expression.arguments) > 1
):
dest = expression.arguments[1]
if isinstance(dest, CallExpression) and "sload" in str(dest.called):
dest = dest.arguments[0]
if isinstance(dest, Identifier):
delegate = dest.value
break
if (
isinstance(dest, Literal) and len(dest.value) == 66
): # 32 bytes = 64 chars + "0x" = 66 chars
# Storage slot is not declared as a constant, but rather is hardcoded in the assembly,
# so create a new StateVariable to represent it.
delegate = create_state_variable_from_slot(dest.value)
break
return delegate
def extract_delegate_from_asm(contract: Contract, node: Node) -> Optional[Variable]:
"""
Finds a Variable with a name matching the argument passed into a delegatecall, when all we have is an Assembly node
with a block of code as one long string. Usually only the case for solc versions < 0.6.0.
Can return a newly created StateVariable if an `sload` from a hardcoded storage slot is found in assembly.
Should typically be called by find_delegate_in_fallback(proxy).
Args:
contract: The parent Contract.
node: The Assembly Node (i.e., node.type == NodeType.ASSEMBLY)
Returns:
(`Variable`) | None : The variable being passed as the destination argument in a delegatecall in the fallback.
"""
asm_split = str(node.inline_asm).split("\n")
asm = next(line for line in asm_split if "delegatecall" in line)
params = asm.split("call(")[1].split(", ")
dest = params[1]
if dest.endswith(")") and not dest.startswith("sload("):
dest = params[2]
if dest.startswith("sload("):
dest = dest.replace(")", "(").split("(")[1]
if dest.startswith("0x"):
return create_state_variable_from_slot(dest)
if dest.isnumeric():
slot_idx = int(dest)
return next(
(
v
for v in contract.state_variables_ordered
if SlitherReadStorage.get_variable_info(contract, v)[0] == slot_idx
),
None,
)
for v in node.function.variables_read_or_written:
if v.name == dest:
if isinstance(v, LocalVariable) and v.expression is not None:
e = v.expression
if isinstance(e, Identifier) and isinstance(e.value, StateVariable):
v = e.value
# Fall through, return constant storage slot
if isinstance(v, StateVariable) and v.is_constant:
return v
if "_fallback_asm" in dest or "_slot" in dest:
dest = dest.split("_")[0]
return find_delegate_from_name(contract, dest, node.function)
def find_delegate_from_name(
contract: Contract, dest: str, parent_func: Function
) -> Optional[Variable]:
"""
Searches for a variable with a given name, starting with StateVariables declared in the contract, followed by
LocalVariables in the parent function, either declared in the function body or as parameters in the signature.
Can return a newly created StateVariable if an `sload` from a hardcoded storage slot is found in assembly.
Args:
contract: The Contract object to search.
dest: The variable name to search for.
parent_func: The Function object to search.
Returns:
(`Variable`) | None : The variable with the matching name, if found
"""
for sv in contract.state_variables:
if sv.name == dest:
return sv
for lv in parent_func.local_variables:
if lv.name == dest:
return lv
for pv in parent_func.parameters + parent_func.returns:
if pv.name == dest:
return pv
if parent_func.contains_assembly:
for node in parent_func.all_nodes():
if node.type == NodeType.ASSEMBLY and isinstance(node.inline_asm, str):
asm = next(
(
s
for s in node.inline_asm.split("\n")
if f"{dest}:=sload(" in s.replace(" ", "")
),
None,
)
if asm:
slot = asm.split("sload(")[1].split(")")[0]
if slot.startswith("0x"):
return create_state_variable_from_slot(slot, name=dest)
try:
slot_idx = int(slot)
return next(
(
v
for v in contract.state_variables_ordered
if SlitherReadStorage.get_variable_info(contract, v)[0] == slot_idx
),
None,
)
except TypeError:
continue
return None
def create_state_variable_from_slot(slot: str, name: str = None) -> Optional[StateVariable]:
"""
Creates a new StateVariable object to wrap a hardcoded storage slot found in assembly.
Args:
slot: The storage slot hex string.
name: Optional name for the variable. The slot string is used if name is not provided.
Returns:
A newly created constant StateVariable of type bytes32, with the slot as the variable's expression and name,
if slot matches the length and prefix of a bytes32. Otherwise, returns None.
"""
if len(slot) == 66 and slot.startswith("0x"): # 32 bytes = 64 chars + "0x" = 66 chars
# Storage slot is not declared as a constant, but rather is hardcoded in the assembly,
# so create a new StateVariable to represent it.
v = StateVariable()
v.is_constant = True
v.expression = Literal(slot, ElementaryType("bytes32"))
if name is not None:
v.name = name
else:
v.name = slot
v.type = ElementaryType("bytes32")
return v
# This should probably also handle hashed strings, but for now return None
return None
| TaintedExternalContract |
python | django__django | tests/admin_scripts/app_waiting_migration/migrations/0001_initial.py | {
"start": 43,
"end": 606
} | class ____(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Bar",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
],
),
]
| Migration |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 22223,
"end": 24011
} | class ____(Serializer, DashboardFiltersMixin):
def get_attrs(self, item_list, user, **kwargs):
result = {}
widgets = serialize(
list(
DashboardWidget.objects.filter(dashboard_id__in=[i.id for i in item_list]).order_by(
"id"
)
),
user=user,
)
for dashboard in item_list:
dashboard_widgets = [w for w in widgets if w and w["dashboardId"] == str(dashboard.id)]
result[dashboard] = {"widgets": dashboard_widgets}
return result
def serialize(self, obj, attrs, user, **kwargs) -> DashboardDetailsResponse:
page_filters, tag_filters = self.get_filters(obj)
if "globalFilter" in tag_filters and not features.has(
"organizations:dashboards-global-filters",
organization=obj.organization,
actor=user,
):
tag_filters["globalFilter"] = []
data: DashboardDetailsResponse = {
"id": str(obj.id),
"title": obj.title,
"dateCreated": obj.date_added,
"createdBy": (
user_service.serialize_many(filter={"user_ids": [obj.created_by_id]})[0]
if obj.created_by_id
else None
),
"widgets": attrs["widgets"],
"filters": tag_filters,
"permissions": serialize(obj.permissions) if hasattr(obj, "permissions") else None,
"isFavorited": user.id in obj.favorited_by,
"projects": page_filters.get("projects", []),
"environment": page_filters.get("environment", []),
"prebuiltId": obj.prebuilt_id,
**page_filters,
}
return data
| DashboardDetailsModelSerializer |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 7686,
"end": 7799
} | class ____(InitTestModel):
def x(self):
return "XYZ"
# models from github issue
| InitTestModelSubclass |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_simplify/SIM401.py | {
"start": 1958,
"end": 2662
} | class ____:
def __init__(self):
self._dict = {}
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __iter__(self):
return self._dict.__iter__()
not_dict = NotADictionary()
not_dict["key"] = "value"
# OK (type `NotADictionary` is not a known dictionary type)
if "key" in not_dict:
value = not_dict["key"]
else:
value = None
###
# Positive cases (preview)
###
# SIM401
var = a_dict[key] if key in a_dict else "default3"
# SIM401
var = "default-1" if key not in a_dict else a_dict[key]
# OK (default contains effect)
var = a_dict[key] if key in a_dict else val1 + val2
| NotADictionary |
python | PyCQA__pylint | pylint/checkers/symilar.py | {
"start": 24888,
"end": 27262
} | class ____:
"""Holds and indexes all the lines of a single source file.
Allows for correspondence between real lines of the source file and stripped ones, which
are the real ones from which undesired patterns have been removed.
"""
def __init__(
self,
name: str,
lines: list[str],
ignore_comments: bool = False,
ignore_docstrings: bool = False,
ignore_imports: bool = False,
ignore_signatures: bool = False,
line_enabled_callback: Callable[[str, int], bool] | None = None,
) -> None:
self.name = name
self._real_lines = lines
self._stripped_lines = stripped_lines(
lines,
ignore_comments,
ignore_docstrings,
ignore_imports,
ignore_signatures,
line_enabled_callback=line_enabled_callback,
)
def __str__(self) -> str:
return f"<Lineset for {self.name}>"
def __len__(self) -> int:
return len(self._real_lines)
def __getitem__(self, index: int) -> LineSpecifs:
return self._stripped_lines[index]
def __lt__(self, other: LineSet) -> bool:
return self.name < other.name
def __hash__(self) -> int:
return id(self)
def __eq__(self, other: object) -> bool:
if not isinstance(other, LineSet):
return False
return self.__dict__ == other.__dict__
@property
def stripped_lines(self) -> list[LineSpecifs]:
return self._stripped_lines
@property
def real_lines(self) -> list[str]:
return self._real_lines
MSGS: dict[str, MessageDefinitionTuple] = {
"R0801": (
"Similar lines in %s files\n%s",
"duplicate-code",
"Indicates that a set of similar lines has been detected "
"among multiple file. This usually means that the code should "
"be refactored to avoid this duplication.",
)
}
def report_similarities(
sect: Section,
stats: LinterStats,
old_stats: LinterStats | None,
) -> None:
"""Make a layout with some stats about duplication."""
lines = ["", "now", "previous", "difference"]
lines += table_lines_from_stats(stats, old_stats, "duplicated_lines")
sect.append(Table(children=lines, cols=4, rheaders=1, cheaders=1))
# wrapper to get a pylint checker from the similar class
| LineSet |
python | openai__openai-python | src/openai/types/beta/assistant_stream_event.py | {
"start": 4071,
"end": 4287
} | class ____(BaseModel):
data: RunStepDeltaEvent
"""Represents a run step delta i.e.
any changed fields on a run step during streaming.
"""
event: Literal["thread.run.step.delta"]
| ThreadRunStepDelta |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/unit_tests.py | {
"start": 6759,
"end": 7788
} | class ____(dg.Config):
my_string: str
@asset
def asset_requires_config(config: MyAssetConfig) -> str:
return config.my_string
def test_asset_requires_config():
result = asset_requires_config(config=MyAssetConfig(my_string="foo"))
...
# end_test_config_asset
def get_data_from_source():
pass
def extract_structured_data(_):
pass
# start_materialize_asset
from dagster import asset, materialize_to_memory
@asset
def data_source():
return get_data_from_source()
@asset
def structured_data(data_source):
return extract_structured_data(data_source)
# An example unit test using materialize_to_memory
def test_data_assets():
result = materialize_to_memory([data_source, structured_data])
assert result.success
# Materialized objects can be accessed in terms of the underlying dg.op
materialized_data = result.output_for_node("structured_data")
...
# end_materialize_asset
# start_materialize_resources
import dagster as dg
from unittest import mock
| MyAssetConfig |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/ctl/commands/test_version_command.py | {
"start": 1516,
"end": 2330
} | class ____:
"""Test the version command."""
parser = cli_parser.get_parser()
def test_ctl_version_remote(self, mock_client):
with redirect_stdout(StringIO()) as stdout:
version_info(self.parser.parse_args(["version", "--remote"]), api_client=mock_client)
assert "version" in stdout.getvalue()
assert "git_version" in stdout.getvalue()
assert "airflowctl_version" in stdout.getvalue()
def test_ctl_version_only_local_version(self, mock_client):
"""Test the version command with an exception."""
with redirect_stdout(StringIO()) as stdout:
version_info(self.parser.parse_args(["version"]), api_client=mock_client)
output = stdout.getvalue()
assert "airflowctl_version" in output
| TestVersionCommand |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 6187,
"end": 6259
} | class ____(AnsibleError):
"""User interrupt."""
| AnsiblePromptInterrupt |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 763,
"end": 1122
} | class ____(BaseModel):
"""
Specifies the minimum percent of a given bundle's earnings to redistribute for it to be included
in a builder's block.
"""
body_idx: HexInt
"""
The index of the transaction in the bundle.
"""
percent: HexInt
"""
The minimum percent of the bundle's earnings to redistribute.
"""
| Refund |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 525186,
"end": 526817
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "project_v2", "projects_v2")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
project_v2 = sgqlc.types.Field(
"ProjectV2",
graphql_name="projectV2",
args=sgqlc.types.ArgDict(
(
(
"number",
sgqlc.types.Arg(
sgqlc.types.non_null(Int), graphql_name="number", default=None
),
),
)
),
)
projects_v2 = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2Connection),
graphql_name="projectsV2",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(
ProjectV2Order,
graphql_name="orderBy",
default={"field": "NUMBER", "direction": "DESC"},
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| ProjectV2Owner |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/entities/snippets.py | {
"start": 3156,
"end": 4923
} | class ____(ndb.Model):
message_text = ndb.StringProperty()
def demonstrate_entities_with_parent_hierarchy():
ndb.Key("Account", "sandy@example.com", "Message", 123, "Revision", "1")
ndb.Key("Account", "sandy@example.com", "Message", 123, "Revision", "2")
ndb.Key("Account", "larry@example.com", "Message", 456, "Revision", "1")
ndb.Key("Account", "larry@example.com", "Message", 789, "Revision", "2")
def equivalent_ways_to_define_key_with_parent():
ndb.Key("Account", "sandy@example.com", "Message", 123, "Revision", "1")
ndb.Key(
"Revision", "1", parent=ndb.Key("Account", "sandy@example.com", "Message", 123)
)
ndb.Key(
"Revision",
"1",
parent=ndb.Key("Message", 123, parent=ndb.Key("Account", "sandy@example.com")),
)
def create_root_key():
sandy_key = ndb.Key(Account, "sandy@example.com")
return sandy_key
def create_entity_with_parent_keys():
account_key = ndb.Key(Account, "sandy@example.com")
# Ask Datastore to allocate an ID.
new_id = ndb.Model.allocate_ids(size=1, parent=account_key)[0]
# Datastore returns us an integer ID that we can use to create the message
# key
message_key = ndb.Key("Message", new_id, parent=account_key)
# Now we can put the message into Datastore
initial_revision = Revision(message_text="Hello", id="1", parent=message_key)
initial_revision.put()
return initial_revision
def get_parent_key_of_entity(initial_revision):
message_key = initial_revision.key.parent()
return message_key
def operate_on_multiple_keys_at_once(list_of_entities):
list_of_keys = ndb.put_multi(list_of_entities)
list_of_entities = ndb.get_multi(list_of_keys)
ndb.delete_multi(list_of_keys)
| Revision |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor2.py | {
"start": 693,
"end": 3975
} | class ____(Animal[int, int], CaveDweller[int]):
def get_wingspan(self, p1: int) -> float:
raise NotImplemented
def s1():
b: Bear[str] = Bear()
a: Animal[str, int] = b
reveal_type(a, expected_text="Bear[str]")
def s2():
a: Animal[str, int] = Bear()
reveal_type(a, expected_text="Bear[str]")
def s3():
a: Animal[str, int] = Bear()
reveal_type(a, expected_text="Bear[str]")
def s4():
a: Bear[Any] = Bear[int]()
reveal_type(a, expected_text="Bear[Any]")
def s5():
a: Animal[Any, Any] = Bear[int]()
reveal_type(a, expected_text="Bear[int]")
def s6():
a: Bat | Bear[str] = Bear()
reveal_type(a, expected_text="Bear[str]")
def s7(p: Bat | Bear[int]):
a: Animal[int, int] = p
reveal_type(a, expected_text="Bat | Bear[int]")
def s8():
a: Animal[int, int] = Bear[int]()
reveal_type(a, expected_text="Bear[int]")
def s9(p: dict[str, str]):
a: dict[str, Any] = p
reveal_type(a, expected_text="dict[str, Any]")
def s10(p: list[str]):
a: Iterable[Any] = p
reveal_type(a, expected_text="list[str]")
b: Iterable[str] = []
reveal_type(b, expected_text="list[str]")
c: Iterable[str] = list()
reveal_type(c, expected_text="list[str]")
def s11():
a: Animal[Any, Any] = Donkey[int]()
reveal_type(a, expected_text="Donkey[int]")
def s12(p: Bear[_T1], b: _T1):
a: Animal[Any, int] = p
reveal_type(a, expected_text="Bear[_T1@s12]")
def s13(p: Bat):
a: Flyer[int] = p
reveal_type(a, expected_text="Bat")
def s14(p: Bat):
a: CaveDweller[int] = p
reveal_type(a, expected_text="Bat")
def s15():
a = Bear(1)
reveal_type(a, expected_text="Bear[int]")
b = Bear[int](1)
reveal_type(b, expected_text="Bear[int]")
c = Bear[float](1)
reveal_type(c, expected_text="Bear[float]")
d = Bear[str | int](1)
reveal_type(d, expected_text="Bear[str | int]")
def s16():
a: Any = Bear(1)
reveal_type(a, expected_text="Any")
def s17():
a1: Iterable[object] = [2, 3, 4]
reveal_type(a1, expected_text="list[int]")
a2: list[object] = [2, 3, 4]
reveal_type(a2, expected_text="list[object]")
b1: Iterable[float] = [2, 3, 4]
reveal_type(b1, expected_text="list[int]")
b2: list[float] = [2, 3, 4]
reveal_type(b2, expected_text="list[float]")
c1: Iterable[Literal["A", "B", "C"]] = ["A", "B"]
reveal_type(c1, expected_text="list[Literal['A', 'B']]")
c2: list[Literal["A", "B", "C"]] = ["A", "B"]
reveal_type(c2, expected_text="list[Literal['A', 'B', 'C']]")
def s18():
a1: Mapping[object, object] = {"a": 3, "b": 5.6}
reveal_type(a1, expected_text="dict[object, int | float]")
a2: dict[object, object] = {"a": 3, "b": 5.6}
reveal_type(a2, expected_text="dict[object, object]")
b1: Mapping[str, float] = {"a": 3, "b": 5}
reveal_type(b1, expected_text="dict[str, int]")
b2: dict[str, float] = {"a": 3, "b": 5}
reveal_type(b2, expected_text="dict[str, float]")
c1: Mapping[Literal["A", "B"], Literal[3, 4]] = {"A": 3}
reveal_type(c1, expected_text="dict[Literal['A', 'B'], Literal[3]]")
c2: dict[Literal["A", "B"], Literal[3, 4]] = {"A": 3}
reveal_type(c2, expected_text="dict[Literal['A', 'B'], Literal[3, 4]]")
| Bat |
python | pytorch__pytorch | torch/export/graph_signature.py | {
"start": 2525,
"end": 2808
} | class ____(Enum):
USER_OUTPUT = auto()
LOSS_OUTPUT = auto()
BUFFER_MUTATION = auto()
PARAMETER_MUTATION = auto()
GRADIENT_TO_PARAMETER = auto()
GRADIENT_TO_USER_INPUT = auto()
USER_INPUT_MUTATION = auto()
TOKEN = auto()
@dataclasses.dataclass
| OutputKind |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 18662,
"end": 19389
} | class ____(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
"This request is required to be conditional; try using"
' "If-Match" or "If-Unmodified-Since".'
)
| PreconditionRequired |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.