language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-separate-sentence-into-rows.py | {
"start": 1202,
"end": 2247
} | class ____(object):
def minimumCost(self, sentence, k):
"""
:type sentence: str
:type k: int
:rtype: int
"""
word_lens = []
j = 0
for i in xrange(len(sentence)+1):
if i != len(sentence) and sentence[i] != ' ':
continue
word_lens.append(i-j)
j = i+1
dp = [float("inf")]*(len(word_lens)) # dp[i]: min cost of word_lens[i:]
i, total = len(word_lens)-1, -1
while i >= 0 and total + (word_lens[i]+1) <= k: # find max i s.t. the length of the last line > k
total += (word_lens[i]+1)
dp[i] = 0
i -= 1
for i in reversed(xrange(i+1)):
total = word_lens[i]
for j in xrange(i+1, len(dp)):
dp[i] = min(dp[i], dp[j] + (k-total)**2)
total += (word_lens[j]+1)
if total > k:
break
return dp[0]
# Time: O(s + n * k), n is the number of the word_lens
# Space: O(n)
| Solution2 |
python | doocs__leetcode | solution/0600-0699/0687.Longest Univalue Path/Solution.py | {
"start": 192,
"end": 719
} | class ____:
def longestUnivaluePath(self, root: Optional[TreeNode]) -> int:
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
l, r = dfs(root.left), dfs(root.right)
l = l + 1 if root.left and root.left.val == root.val else 0
r = r + 1 if root.right and root.right.val == root.val else 0
nonlocal ans
ans = max(ans, l + r)
return max(l, r)
ans = 0
dfs(root)
return ans
| Solution |
python | numba__numba | numba/cuda/tests/cudapy/test_lineinfo.py | {
"start": 313,
"end": 6855
} | class ____(CUDATestCase):
def _loc_directive_regex(self):
# This is used in several tests
pat = (
r'\.loc' # .loc directive beginning
r'\s+[0-9]+' # whitespace then file index
r'\s+[0-9]+' # whitespace then line number
r'\s+[0-9]+' # whitespace then column position
)
return re.compile(pat)
def _check(self, fn, sig, expect):
fn.compile(sig)
llvm = fn.inspect_llvm(sig)
ptx = fn.inspect_asm(sig)
assertfn = self.assertIsNotNone if expect else self.assertIsNone
# DICompileUnit debug info metadata should all be of the
# DebugDirectivesOnly kind, and not the FullDebug kind
pat = (
r'!DICompileUnit\(.*' # Opening of DICompileUnit metadata. Since
# the order of attributes is not
# guaranteed, we need to match arbitrarily
# afterwards.
r'emissionKind:\s+' # The emissionKind attribute followed by
# whitespace.
r'DebugDirectivesOnly' # The correct emissionKind.
)
match = re.compile(pat).search(llvm)
assertfn(match, msg=ptx)
pat = (
r'!DICompileUnit\(.*' # Same as the pattern above, but for the
r'emissionKind:\s+' # incorrect FullDebug emissionKind.
r'FullDebug' #
)
match = re.compile(pat).search(llvm)
self.assertIsNone(match, msg=ptx)
# The name of this file should be present in the line mapping
# if lineinfo was propagated through correctly.
pat = (
r'\.file' # .file directive beginning
r'\s+[0-9]+\s+' # file number surrounded by whitespace
r'".*test_lineinfo.py"' # filename in quotes, ignoring full path
)
match = re.compile(pat).search(ptx)
assertfn(match, msg=ptx)
# .loc directives should be present in the ptx
self._loc_directive_regex().search(ptx)
assertfn(match, msg=ptx)
# Debug info sections should not be present when only lineinfo is
# generated
pat = (
r'\.section\s+' # .section directive beginning
r'\.debug_info' # Section named ".debug_info"
)
match = re.compile(pat).search(ptx)
self.assertIsNone(match, msg=ptx)
def test_no_lineinfo_in_asm(self):
@cuda.jit(lineinfo=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(int32[:],), expect=False)
def test_lineinfo_in_asm(self):
@cuda.jit(lineinfo=True)
def foo(x):
x[0] = 1
self._check(foo, sig=(int32[:],), expect=True)
def test_lineinfo_maintains_error_model(self):
sig = (float32[::1], float32[::1])
@cuda.jit(sig, lineinfo=True)
def divide_kernel(x, y):
x[0] /= y[0]
llvm = divide_kernel.inspect_llvm(sig)
# When the error model is Python, the device function returns 1 to
# signal an exception (e.g. divide by zero) has occurred. When the
# error model is the default NumPy one (as it should be when only
# lineinfo is enabled) the device function always returns 0.
self.assertNotIn('ret i32 1', llvm)
def test_no_lineinfo_in_device_function(self):
# Ensure that no lineinfo is generated in device functions by default.
@cuda.jit
def callee(x):
x[0] += 1
@cuda.jit
def caller(x):
x[0] = 1
callee(x)
sig = (int32[:],)
self._check(caller, sig=sig, expect=False)
def test_lineinfo_in_device_function(self):
# First we define a device function / kernel pair and run the usual
# checks on the generated LLVM and PTX.
@cuda.jit(lineinfo=True)
def callee(x):
x[0] += 1
@cuda.jit(lineinfo=True)
def caller(x):
x[0] = 1
callee(x)
sig = (int32[:],)
self._check(caller, sig=sig, expect=True)
# Now we can check the PTX of the device function specifically.
ptx = caller.inspect_asm(sig)
ptxlines = ptx.splitlines()
# Check that there is no device function in the PTX
# A line beginning with ".weak .func" that identifies a device function
devfn_start = re.compile(r'^\.weak\s+\.func')
for line in ptxlines:
if devfn_start.match(line) is not None:
self.fail(f"Found device function in PTX:\n\n{ptx}")
# Scan for .loc directives that refer to an inlined device function
loc_directive = self._loc_directive_regex()
found = False
for line in ptxlines:
if loc_directive.search(line) is not None:
if 'inlined_at' in line:
found = True
break
if not found:
self.fail(f'No .loc directive with inlined_at info found'
f'in:\n\n{ptx}')
# We also inspect the LLVM to ensure that there's debug info for each
# subprogram (function). A lightweight way to check this is to ensure
# that we have as many DISubprograms as we expect.
llvm = caller.inspect_llvm(sig)
subprograms = 0
for line in llvm.splitlines():
if 'distinct !DISubprogram' in line:
subprograms += 1
# One DISubprogram for each of:
# - The kernel wrapper
# - The caller
# - The callee
expected_subprograms = 3
self.assertEqual(subprograms, expected_subprograms,
f'"Expected {expected_subprograms} DISubprograms; '
f'got {subprograms}')
def test_debug_and_lineinfo_warning(self):
with warnings.catch_warnings(record=True) as w:
ignore_internal_warnings()
# We pass opt=False to prevent the warning about opt and debug
# occurring as well
@cuda.jit(debug=True, lineinfo=True, opt=False)
def f():
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, NumbaInvalidConfigWarning)
self.assertIn('debug and lineinfo are mutually exclusive',
str(w[0].message))
if __name__ == '__main__':
unittest.main()
| TestCudaLineInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 387109,
"end": 387963
} | class ____(sgqlc.types.Interface):
"""An object that can be closed"""
__schema__ = github_schema
__field_names__ = ("closed", "closed_at", "viewer_can_close", "viewer_can_reopen")
closed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="closed")
"""Indicates if the object is closed (definition of closed may depend
on type)
"""
closed_at = sgqlc.types.Field(DateTime, graphql_name="closedAt")
"""Identifies the date and time when the object was closed."""
viewer_can_close = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanClose")
"""Indicates if the object can be closed by the viewer."""
viewer_can_reopen = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanReopen")
"""Indicates if the object can be reopened by the viewer."""
| Closable |
python | fluentpython__example-code | 17-futures/countries/flags2_asyncio_executor.py | {
"start": 424,
"end": 3051
} | class ____(Exception):
def __init__(self, country_code):
self.country_code = country_code
@asyncio.coroutine
def get_flag(base_url, cc):
url = '{}/{cc}/{cc}.gif'.format(base_url, cc=cc.lower())
resp = yield from aiohttp.request('GET', url)
with contextlib.closing(resp):
if resp.status == 200:
image = yield from resp.read()
return image
elif resp.status == 404:
raise web.HTTPNotFound()
else:
raise aiohttp.HttpProcessingError(
code=resp.status, message=resp.reason,
headers=resp.headers)
# BEGIN FLAGS2_ASYNCIO_EXECUTOR
@asyncio.coroutine
def download_one(cc, base_url, semaphore, verbose):
try:
with (yield from semaphore):
image = yield from get_flag(base_url, cc)
except web.HTTPNotFound:
status = HTTPStatus.not_found
msg = 'not found'
except Exception as exc:
raise FetchError(cc) from exc
else:
loop = asyncio.get_event_loop() # <1>
loop.run_in_executor(None, # <2>
save_flag, image, cc.lower() + '.gif') # <3>
status = HTTPStatus.ok
msg = 'OK'
if verbose and msg:
print(cc, msg)
return Result(status, cc)
# END FLAGS2_ASYNCIO_EXECUTOR
@asyncio.coroutine
def downloader_coro(cc_list, base_url, verbose, concur_req):
counter = collections.Counter()
semaphore = asyncio.Semaphore(concur_req)
to_do = [download_one(cc, base_url, semaphore, verbose)
for cc in sorted(cc_list)]
to_do_iter = asyncio.as_completed(to_do)
if not verbose:
to_do_iter = tqdm.tqdm(to_do_iter, total=len(cc_list))
for future in to_do_iter:
try:
res = yield from future
except FetchError as exc:
country_code = exc.country_code
try:
error_msg = exc.__cause__.args[0]
except IndexError:
error_msg = exc.__cause__.__class__.__name__
if verbose and error_msg:
msg = '*** Error for {}: {}'
print(msg.format(country_code, error_msg))
status = HTTPStatus.error
else:
status = res.status
counter[status] += 1
return counter
def download_many(cc_list, base_url, verbose, concur_req):
loop = asyncio.get_event_loop()
coro = downloader_coro(cc_list, base_url, verbose, concur_req)
counts = loop.run_until_complete(coro)
loop.close()
return counts
if __name__ == '__main__':
main(download_many, DEFAULT_CONCUR_REQ, MAX_CONCUR_REQ)
| FetchError |
python | patrick-kidger__equinox | equinox/nn/_normalisation.py | {
"start": 10692,
"end": 15145
} | class ____(Module):
r"""
A simplified version of LayerNorm which rescales the inputs, but does not center
them. Optionally applies a learned reweighting of the transformed array afterward.
Given an input array $x$, this layer computes
$$\frac{x}{\sqrt{\varepsilon + \frac{1}{n}\Vert x \Vert^2_2}} \gamma + \beta$$
where $\Vert x \Vert^2_2 = \sum_{i=1}^n x_i^2$, $n = \dim(x)$, and $\gamma$ is a
learned array with the same shape as $x$ if `use_weight=True`, or
$\gamma = 1$ if `use_weight=False`, as proposed in
[this paper](https://browse.arxiv.org/abs/2307.14995). `\beta` is an optional bias
term.
??? cite
[Root Mean Square Layer Normalization](https://browse.arxiv.org/abs/1910.07467)
```bibtex
@article{zhang2019root,
title={Root Mean Square Layer Normalization},
author={Biao Zhang and Rico Sennrich},
year={2019},
journal={arXiv:1910.07467}
}
```
"""
shape: tuple[int, ...] = field(static=True)
eps: float = field(static=True)
use_weight: bool = field(static=True)
use_bias: bool = field(static=True)
weight: Float[Array, "*shape"] | None
bias: Float[Array, "*shape"] | None
def __init__(
self,
shape: int | Sequence[int],
eps: float = 1e-5,
use_weight: bool = True,
use_bias: bool = True,
dtype=None,
):
"""**Arguments:**
- `shape`: Shape of the input.
- `eps`: Value added to denominator for numerical stability.
- `use_weight`: Whether the module has learnable affine weights.
- `use_bias`: Whether the module has learnable affine shift.
- `dtype`: The dtype to use for the weight and the bias in this layer if
`use_weight` or `use_bias` is set to `True`.
Defaults to either `jax.numpy.float32` or `jax.numpy.float64` depending
on whether JAX is in 64-bit mode.
"""
dtype = default_floating_dtype() if dtype is None else dtype
if isinstance(shape, int):
shape = (shape,)
else:
shape = tuple(shape)
self.shape = shape
self.eps = eps
self.use_weight = use_weight
self.use_bias = use_bias
self.weight = jnp.ones(shape, dtype=dtype) if use_weight else None
self.bias = jnp.zeros(shape, dtype=dtype) if use_bias else None
@overload
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array: ...
@overload
def __call__(
self, x: Array, state: State, *, key: PRNGKeyArray | None = None
) -> tuple[Array, State]: ...
@named_scope("eqx.nn.RMSNorm")
def __call__(
self,
x: Float[Array, "*shape"],
state: State = sentinel,
*,
key: PRNGKeyArray | None = None,
) -> Array | tuple[Array, State]:
"""**Arguments:**
- `x`: A JAX array, with the same shape as the `shape` passed to `__init__`.
- `state`: Ignored; provided for interchangability with the
[`equinox.nn.BatchNorm`][] API.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The output is a JAX array of the same shape as `x`.
If `state` is passed, then a 2-tuple of `(output, state)` is returned. The state
is passed through unchanged. If `state` is not passed, then just the output is
returned.
"""
if x.shape != self.shape:
raise ValueError(
"`RMSNorm(shape)(x)` must satisfy the invariant `shape == x.shape`"
f"Received `shape={self.shape} and `x.shape={x.shape}`. You might need "
"to replace `rms_norm(x)` with `jax.vmap(rms_norm)(x)`.\n"
)
orig_dtype = x.dtype
with jax.numpy_dtype_promotion("standard"):
dtype = jnp.result_type(x.dtype, jnp.float32)
x = x.astype(dtype)
inv_rms = jax.lax.rsqrt(jnp.mean(x**2) + self.eps)
out = inv_rms * x
if self.use_weight:
out = self.weight.astype(dtype) * out # pyright: ignore
if self.use_bias:
out = out + self.bias.astype(dtype) # pyright: ignore
if state is sentinel:
return out.astype(orig_dtype)
else:
return out.astype(orig_dtype), state
| RMSNorm |
python | neetcode-gh__leetcode | python/0152-maximum-product-subarray.py | {
"start": 0,
"end": 357
} | class ____:
def maxProduct(self, nums: List[int]) -> int:
# O(n)/O(1) : Time/Memory
res = nums[0]
curMin, curMax = 1, 1
for n in nums:
tmp = curMax * n
curMax = max(n * curMax, n * curMin, n)
curMin = min(tmp, n * curMin, n)
res = max(res, curMax)
return res
| Solution |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_kerberos_command.py | {
"start": 1137,
"end": 5524
} | class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@mock.patch("airflow.cli.commands.kerberos_command.krb")
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_run_command(self, mock_krb):
args = self.parser.parse_args(["kerberos", "PRINCIPAL", "--keytab", "/tmp/airflow.keytab"])
kerberos_command.kerberos(args)
mock_krb.run.assert_called_once_with(
keytab="/tmp/airflow.keytab", principal="PRINCIPAL", mode=KerberosMode.STANDARD
)
@mock.patch("airflow.cli.commands.daemon_utils.TimeoutPIDLockFile")
@mock.patch("airflow.cli.commands.daemon_utils.setup_locations")
@mock.patch("airflow.cli.commands.daemon_utils.daemon")
@mock.patch("airflow.cli.commands.kerberos_command.krb")
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_run_command_daemon(self, mock_krb, mock_daemon, mock_setup_locations, mock_pid_file):
mock_setup_locations.return_value = (
mock.MagicMock(name="pidfile"),
mock.MagicMock(name="stdout"),
mock.MagicMock(name="stderr"),
mock.MagicMock(name="INVALID"),
)
args = self.parser.parse_args(
[
"kerberos",
"PRINCIPAL",
"--keytab",
"/tmp/airflow.keytab",
"--log-file",
"/tmp/kerberos.log",
"--pid",
"/tmp/kerberos.pid",
"--stderr",
"/tmp/kerberos-stderr.log",
"--stdout",
"/tmp/kerberos-stdout.log",
"--daemon",
]
)
mock_open = mock.mock_open()
with mock.patch("airflow.cli.commands.daemon_utils.open", mock_open):
kerberos_command.kerberos(args)
mock_krb.run.assert_called_once_with(
keytab="/tmp/airflow.keytab", principal="PRINCIPAL", mode=KerberosMode.STANDARD
)
assert mock_daemon.mock_calls[:3] == [
mock.call.DaemonContext(
pidfile=mock_pid_file.return_value,
files_preserve=None,
stderr=mock_open.return_value,
stdout=mock_open.return_value,
umask=0o077,
),
mock.call.DaemonContext().__enter__(),
mock.call.DaemonContext().__exit__(None, None, None),
]
assert mock_setup_locations.mock_calls[0] == mock.call(
process="kerberos",
pid="/tmp/kerberos.pid",
stdout="/tmp/kerberos-stdout.log",
stderr="/tmp/kerberos-stderr.log",
log="/tmp/kerberos.log",
)
python_3_13_close_calls = [mock.call().close()] if PY313 else []
mock_pid_file.mock_calls[0] = mock.call(mock_setup_locations.return_value[0], -1)
assert mock_open.mock_calls == [
mock.call(mock_setup_locations.return_value[1], "a"),
mock.call().__enter__(),
mock.call(mock_setup_locations.return_value[2], "a"),
mock.call().__enter__(),
mock.call().truncate(0),
mock.call().truncate(0),
mock.call().__exit__(None, None, None),
*python_3_13_close_calls,
mock.call().__exit__(None, None, None),
*python_3_13_close_calls,
]
@mock.patch("airflow.cli.commands.kerberos_command.krb")
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_run_command_with_mode_standard(self, mock_krb):
args = self.parser.parse_args(["kerberos", "PRINCIPAL", "--keytab", "/tmp/airflow.keytab"])
kerberos_command.kerberos(args)
mock_krb.run.assert_called_once_with(
keytab="/tmp/airflow.keytab", principal="PRINCIPAL", mode=KerberosMode.STANDARD
)
@mock.patch("airflow.cli.commands.kerberos_command.krb")
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_run_command_with_mode_one_time(self, mock_krb):
args = self.parser.parse_args(
["kerberos", "PRINCIPAL", "--keytab", "/tmp/airflow.keytab", "--one-time"]
)
kerberos_command.kerberos(args)
mock_krb.run.assert_called_once_with(
keytab="/tmp/airflow.keytab", principal="PRINCIPAL", mode=KerberosMode.ONE_TIME
)
| TestKerberosCommand |
python | coleifer__peewee | peewee.py | {
"start": 193794,
"end": 195980
} | class ____(MetaField):
sequence = None
def __init__(self, *field_names):
self.field_names = field_names
self._safe_field_names = None
@property
def safe_field_names(self):
if self._safe_field_names is None:
if self.model is None:
return self.field_names
self._safe_field_names = [self.model._meta.fields[f].safe_name
for f in self.field_names]
return self._safe_field_names
def __get__(self, instance, instance_type=None):
if instance is not None:
return tuple([getattr(instance, f) for f in self.safe_field_names])
return self
def __set__(self, instance, value):
if not isinstance(value, (list, tuple)):
raise TypeError('A list or tuple must be used to set the value of '
'a composite primary key.')
if len(value) != len(self.field_names):
raise ValueError('The length of the value must equal the number '
'of columns of the composite primary key.')
for idx, field_value in enumerate(value):
setattr(instance, self.field_names[idx], field_value)
def __eq__(self, other):
expressions = [(self.model._meta.fields[field] == value)
for field, value in zip(self.field_names, other)]
return reduce(operator.and_, expressions)
def __ne__(self, other):
return ~(self == other)
def __hash__(self):
return hash((self.model.__name__, self.field_names))
def __sql__(self, ctx):
# If the composite PK is being selected, do not use parens. Elsewhere,
# such as in an expression, we want to use parentheses and treat it as
# a row value.
parens = ctx.scope != SCOPE_SOURCE
return ctx.sql(NodeList([self.model._meta.fields[field]
for field in self.field_names], ', ', parens))
def bind(self, model, name, set_attribute=True):
self.model = model
self.column_name = self.name = self.safe_name = name
setattr(model, self.name, self)
| CompositeKey |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 8967,
"end": 10516
} | class ____(Pool):
"""Two-dimensional downsample using an average over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=0,
operation=lax.add,
num_spatial_dims=2,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
@named_scope("eqx.nn.AvgPool2d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(channels, dim_1, dim_2)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim_1, new_dim_2)`.
"""
return super().__call__(x) / math.prod(self.kernel_size)
| AvgPool2d |
python | walkccc__LeetCode | solutions/457. Circular Array Loop/457.py | {
"start": 0,
"end": 694
} | class ____:
def circularArrayLoop(self, nums: list[int]) -> bool:
def advance(i: int) -> int:
return (i + nums[i]) % len(nums)
if len(nums) < 2:
return False
for i, num in enumerate(nums):
if num == 0:
continue
slow = i
fast = advance(slow)
while num * nums[fast] > 0 and num * nums[advance(fast)] > 0:
if slow == fast:
if slow == advance(slow):
break
return True
slow = advance(slow)
fast = advance(advance(fast))
slow = i
sign = num
while sign * nums[slow] > 0:
next = advance(slow)
nums[slow] = 0
slow = next
return False
| Solution |
python | pandas-dev__pandas | pandas/core/computation/pytables.py | {
"start": 12571,
"end": 12849
} | class ____(ConditionBinOp):
# error: Signature of "evaluate" incompatible with supertype "BinOp"
def evaluate(self) -> Self: # type: ignore[override]
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
| JointConditionBinOp |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/launchers/multiprocessing.py | {
"start": 6322,
"end": 10213
} | class ____:
"""Captures a hand-selected set of (global) variables in modules and provides a way to restore them.
It facilitates and encapsulates the transfer of globals like PyTorch's deterministic flags or random generator state
across process boundaries when launching processes with :func:`torch.multiprocessing.spawn`.
Example:
.. code-block:: python
# in main process
snapshot = _GlobalStateSnapshot.capture()
# in worker process
snapshot.restore()
"""
use_deterministic_algorithms: bool
use_deterministic_algorithms_warn_only: bool
cudnn_benchmark: bool
rng_states: dict[str, Any]
@classmethod
def capture(cls) -> "_GlobalStateSnapshot":
"""Capture a few global states from torch, numpy, etc., that we want to restore in a spawned worker process."""
return cls(
use_deterministic_algorithms=torch.are_deterministic_algorithms_enabled(),
use_deterministic_algorithms_warn_only=torch.is_deterministic_algorithms_warn_only_enabled(),
cudnn_benchmark=torch.backends.cudnn.benchmark,
rng_states=_collect_rng_states(),
)
def restore(self) -> None:
"""Restores all globals to the values captured in the :meth:`capture` method."""
torch.use_deterministic_algorithms(
self.use_deterministic_algorithms, warn_only=self.use_deterministic_algorithms_warn_only
)
torch.backends.cudnn.benchmark = self.cudnn_benchmark
_set_rng_states(self.rng_states)
def _check_bad_cuda_fork() -> None:
"""Checks whether it is safe to fork and initialize CUDA in the new processes, and raises an exception if not.
The error message replaces PyTorch's 'Cannot re-initialize CUDA in forked subprocess' with helpful advice for
Lightning users.
"""
if not torch.cuda.is_initialized():
return
message = (
"Lightning can't create new processes if CUDA is already initialized. Did you manually call"
" `torch.cuda.*` functions, have moved the model to the device, or allocated memory on the GPU any"
" other way? Please remove any such calls, or change the selected strategy."
)
if _IS_INTERACTIVE:
message += " You will have to restart the Python kernel."
raise RuntimeError(message)
def _disable_module_memory_sharing(data: Any) -> Any:
"""Disables memory sharing on parameters and buffers of `nn.Module`s contained in the given collection.
Note: This is only required when running on CPU.
"""
# PyTorch enables memory sharing automatically on all tensors that are passed through `mp.spawn`.
# For model weights and buffers, this is undesired and can lead to race conditions between processes.
# Hence, we copy the tensors in the entire module to ensure it doesn't share memory with other processes.
@torch.no_grad()
def unshare(module: Module) -> Module:
for tensor in itertools.chain(module.parameters(), module.buffers()):
tensor.data = tensor.data.clone()
return module
return apply_to_collection(data, function=unshare, dtype=Module)
def _check_missing_main_guard() -> None:
"""Raises an exception if the ``__name__ == "__main__"`` guard is missing."""
if not getattr(mp.current_process(), "_inheriting", False):
return
message = dedent(
"""
Launching multiple processes with the 'spawn' start method requires that your script guards the main
function with an `if __name__ == \"__main__\"` clause. For example:
def main():
# Put your code here
...
if __name__ == "__main__":
main()
Alternatively, you can run with `strategy="ddp"` to avoid this error.
"""
)
raise RuntimeError(message)
| _GlobalStateSnapshot |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_config_output.py | {
"start": 294,
"end": 1389
} | class ____(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
speed: Optional[float] = None
"""
The speed of the model's spoken response as a multiple of the original speed.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
This parameter is a post-processing adjustment to the audio after it is
generated, it's also possible to prompt the model to speak faster or slower.
"""
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
] = None
"""The voice the model uses to respond.
Voice cannot be changed during the session once the model has responded with
audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
`coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend
`marin` and `cedar` for best quality.
"""
| RealtimeAudioConfigOutput |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 7917,
"end": 10869
} | class ____(CompileState):
is_dml_returning = False
def _init_global_attributes(
self, statement, compiler, *, toplevel, process_criteria_for_toplevel
):
self.attributes = {}
if compiler is None:
# this is the legacy / testing only ORM _compile_state() use case.
# there is no need to apply criteria options for this.
self.global_attributes = {}
assert toplevel
return
else:
self.global_attributes = ga = compiler._global_attributes
if toplevel:
ga["toplevel_orm"] = True
if process_criteria_for_toplevel:
for opt in statement._with_options:
if opt._is_criteria_option:
opt.process_compile_state(self)
return
elif ga.get("toplevel_orm", False):
return
stack_0 = compiler.stack[0]
try:
toplevel_stmt = stack_0["selectable"]
except KeyError:
pass
else:
for opt in toplevel_stmt._with_options:
if opt._is_compile_state and opt._is_criteria_option:
opt.process_compile_state(self)
ga["toplevel_orm"] = True
@classmethod
def create_for_statement(
cls,
statement: Executable,
compiler: SQLCompiler,
**kw: Any,
) -> CompileState:
"""Create a context for a statement given a :class:`.Compiler`.
This method is always invoked in the context of SQLCompiler.process().
For a Select object, this would be invoked from
SQLCompiler.visit_select(). For the special FromStatement object used
by Query to indicate "Query.from_statement()", this is called by
FromStatement._compiler_dispatch() that would be called by
SQLCompiler.process().
"""
return super().create_for_statement(statement, compiler, **kw)
@classmethod
def orm_pre_session_exec(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
is_pre_event,
):
raise NotImplementedError()
@classmethod
def orm_execute_statement(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
conn,
) -> Result:
result = conn.execute(
statement, params or {}, execution_options=execution_options
)
return cls.orm_setup_cursor_result(
session,
statement,
params,
execution_options,
bind_arguments,
result,
)
@classmethod
def orm_setup_cursor_result(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
result,
):
raise NotImplementedError()
| _AbstractORMCompileState |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 28495,
"end": 29076
} | class ____(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if (
self.error_event is not None
and self.error_event.is_set()
and worker_info.id == worker_info.num_workers - 1
):
# only error in the last worker
raise RuntimeError("Worker error")
return torch.tensor([idx])
| TestProperExitDataset |
python | scipy__scipy | scipy/stats/tests/test_continued_fraction.py | {
"start": 622,
"end": 6709
} | class ____:
rng = np.random.default_rng(5895448232066142650)
p = rng.uniform(1, 10, size=10)
def a1(self, n, x=1.5):
if n == 0:
y = 0*x
elif n == 1:
y = x
else:
y = -x**2
if np.isscalar(y) and np.__version__ < "2.0":
y = np.full_like(x, y) # preserve dtype pre NEP 50
return y
def b1(self, n, x=1.5):
if n == 0:
y = 0*x
else:
one = x/x # gets array of correct type, dtype, and shape
y = one * (2*n - 1)
if np.isscalar(y) and np.__version__ < "2.0":
y = np.full_like(x, y) # preserve dtype pre NEP 50
return y
def log_a1(self, n, x):
xp = array_namespace(x)
if n == 0:
y = xp.full_like(x, -xp.asarray(math.inf, dtype=x.dtype))
elif n == 1:
y = xp.log(x)
else:
y = 2 * xp.log(x) + math.pi * 1j
return y
def log_b1(self, n, x):
xp = array_namespace(x)
if n == 0:
y = xp.full_like(x, -xp.asarray(math.inf, dtype=x.dtype))
else:
one = x - x # gets array of correct type, dtype, and shape
y = one + math.log(2 * n - 1)
return y
def test_input_validation(self, xp):
a1 = self.a1
b1 = self.b1
message = '`a` and `b` must be callable.'
with pytest.raises(ValueError, match=message):
_continued_fraction(1, b1)
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, 1)
message = r'`eps` and `tiny` must be \(or represent the logarithm of\)...'
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'eps': -10})
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'eps': np.nan})
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'eps': 1+1j}, log=True)
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'tiny': 0})
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'tiny': np.inf})
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, tolerances={'tiny': np.inf}, log=True)
# this should not raise
kwargs = dict(args=xp.asarray(1.5+0j), log=True, maxiter=0)
_continued_fraction(a1, b1, tolerances={'eps': -10}, **kwargs)
_continued_fraction(a1, b1, tolerances={'tiny': -10}, **kwargs)
message = '`maxiter` must be a non-negative integer.'
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, maxiter=-1)
message = '`log` must be boolean.'
with pytest.raises(ValueError, match=message):
_continued_fraction(a1, b1, log=2)
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'complex64', 'complex128'])
@pytest.mark.parametrize('shape', [(), (1,), (3,), (3, 2)])
def test_basic(self, shape, dtype, xp):
np_dtype = getattr(np, dtype)
xp_dtype = getattr(xp, dtype)
rng = np.random.default_rng(2435908729190400)
x = rng.random(shape).astype(np_dtype)
x = x + rng.random(shape).astype(np_dtype)*1j if dtype.startswith('c') else x
x = xp.asarray(x, dtype=xp_dtype)
res = _continued_fraction(self.a1, self.b1, args=(x,))
ref = xp.tan(x)
xp_assert_close(res.f, ref)
@pytest.mark.skip_xp_backends('torch', reason='pytorch/pytorch#136063')
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
@pytest.mark.parametrize('shape', [(), (1,), (3,), (3, 2)])
def test_log(self, shape, dtype, xp):
if (np.__version__ < "2") and (dtype == 'float32'):
pytest.skip("Scalar dtypes only respected after NEP 50.")
np_dtype = getattr(np, dtype)
rng = np.random.default_rng(2435908729190400)
x = rng.random(shape).astype(np_dtype)
x = xp.asarray(x)
res = _continued_fraction(self.log_a1, self.log_b1, args=(x + 0j,), log=True)
ref = xp.tan(x)
xp_assert_close(xp.exp(xp.real(res.f)), ref)
def test_maxiter(self, xp):
rng = np.random.default_rng(2435908729190400)
x = xp.asarray(rng.random(), dtype=xp.float64)
ref = xp.tan(x)
res1 = _continued_fraction(self.a1, self.b1, args=(x,), maxiter=3)
assert res1.nit == 3
res2 = _continued_fraction(self.a1, self.b1, args=(x,), maxiter=6)
assert res2.nit == 6
xp_assert_less(xp.abs(res2.f - ref), xp.abs(res1.f - ref))
def test_eps(self, xp):
x = xp.asarray(1.5, dtype=xp.float64) # x = 1.5 is the default defined above
ref = xp.tan(x)
res1 = _continued_fraction(self.a1, self.b1, args=(x,),
tolerances={'eps': 1e-6})
res2 = _continued_fraction(self.a1, self.b1, args=(x,))
xp_assert_less(res1.nit, res2.nit)
xp_assert_less(xp.abs(res2.f - ref), xp.abs(res1.f - ref))
def test_feval(self, xp):
def a(n, x):
a.nfev += 1
return n * x
def b(n, x):
b.nfev += 1
return n * x
a.nfev, b.nfev = 0, 0
res = _continued_fraction(a, b, args=(xp.asarray(1.),))
assert res.nfev == a.nfev == b.nfev == res.nit + 1
def test_status(self, xp):
x = xp.asarray([1, 10, np.nan], dtype=xp.float64)
res = _continued_fraction(self.a1, self.b1, args=(x,), maxiter=15)
xp_assert_equal(res.success, xp.asarray([True, False, False]))
xp_assert_equal(res.status, xp.asarray([0, -2, -3], dtype=xp.int32))
def test_special_cases(self, xp):
one = xp.asarray(1)
res = _continued_fraction(lambda x: one, lambda x: one, maxiter=0)
xp_assert_close(res.f, xp.asarray(1.))
assert res.nit == res.nfev - 1 == 0
| TestContinuedFraction |
python | kamyu104__LeetCode-Solutions | Python/palindrome-partitioning-iv.py | {
"start": 31,
"end": 1158
} | class ____(object):
def checkPartitioning(self, s):
"""
:type s: str
:rtype: bool
"""
def manacher(s):
s = '^#' + '#'.join(s) + '#$'
P = [0]*len(s)
C, R = 0, 0
for i in xrange(1, len(s)-1):
i_mirror = 2*C-i
if R > i:
P[i] = min(R-i, P[i_mirror])
while s[i+1+P[i]] == s[i-1-P[i]]:
P[i] += 1
if i+P[i] > R:
C, R = i, i+P[i]
return P
P = manacher(s)
prefix, suffix = [], []
for i in xrange(2, len(P)-2):
if i-1-P[i] == 0:
prefix.append(i)
if i+1+P[i] == len(P)-1:
suffix.append(i)
for i in prefix:
for j in suffix:
left, right = i+1+P[i], j-1-P[j]
if left > right:
continue
mid = left + (right-left)//2
if P[mid] >= mid-left:
return True
return False
# Time: O(n^2)
# Space: O(n^2)
| Solution |
python | instagram__MonkeyType | tests/testmodule/__init__.py | {
"start": 202,
"end": 317
} | class ____:
def __init__(self, arg1: str, arg2: int) -> None:
self.arg1 = arg1
self.arg2 = arg2
| Foo |
python | astropy__astropy | astropy/coordinates/spectral_coordinate.py | {
"start": 4694,
"end": 31191
} | class ____(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the |astropy-dev mailing list| or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km / u.s)
def __new__(
cls,
value,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
**kwargs,
):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError(
"Cannot specify radial velocity or redshift if both "
"target and observer are specified"
)
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError("redshift should be dimensionless")
radial_velocity = redshift.to(u.km / u.s, u.doppler_redshift())
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, "observer", None)
if target is None:
target = getattr(value, "target", None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, "radial_velocity", None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label="observer")
obj._target = cls._validate_coordinate(target, label="target")
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, "_radial_velocity", None)
self._observer = getattr(obj, "_observer", None)
self._target = getattr(obj, "_target", None)
@staticmethod
def _validate_coordinate(coord, label=""):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(
f"{label} must be a SkyCoord or coordinate frame instance"
)
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all="ignore"):
distance = getattr(coord, "distance", None)
if distance is not None and distance.unit.physical_type == "dimensionless":
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"arbitrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning,
)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if "s" not in coord.data.differentials:
warnings.warn(
f"No velocity defined on frame, assuming {ZERO_VELOCITIES}.",
NoVelocityWarning,
)
coord = attach_zero_velocities(coord)
return coord
def replicate(
self,
value=None,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
doppler_convention=None,
doppler_rest=None,
copy=False,
):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError(
"Cannot specify value as a Quantity and also specify unit"
)
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is True
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (
(self.observer is None or self.target is None)
and radial_velocity is None
and redshift is None
):
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter("ignore", NoVelocityWarning)
return self.__class__(
value=value,
unit=unit,
observer=observer,
target=target,
radial_velocity=radial_velocity,
redshift=redshift,
doppler_convention=doppler_convention,
doppler_rest=doppler_rest,
copy=COPY_IF_NEEDED,
)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label="observer")
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label="target")
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity` ['speed']
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
return self._calculate_radial_velocity(self._observer, self._target)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
`astropy.units.Quantity`
Redshift of target.
"""
return self.radial_velocity.to(u.dimensionless_unscaled, u.doppler_redshift())
@staticmethod
def _calculate_radial_velocity(observer, target):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
Returns
-------
`~astropy.units.Quantity` ['speed']
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
return pos_hat.dot(d_vel)
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (
target.cartesian.without_differentials()
- observer.cartesian.without_differentials()
)
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
return d_pos / dp_norm
@u.quantity_input(velocity=u.km / u.s)
def with_observer_stationary_relative_to(
self, frame, velocity=None, preserve_observer_frame=False
):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError(
"This method can only be used if both observer "
"and target are defined on the SpectralCoord."
)
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(
CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)
)
if frame.data.differentials:
if velocity is not None:
raise ValueError(
"frame already has differentials, cannot also specify velocity"
)
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(
frame.data.with_differentials(differentials)
)
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError("velocity should be a Quantity vector with 3 elements")
frame = frame_cls(
0 * u.m,
0 * u.m,
0 * u.m,
*velocity,
representation_type="cartesian",
differential_type="cartesian",
)
observer = update_differentials_to_match(
self.observer, frame, preserve_observer_frame=preserve_observer_frame
)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data, observer=observer)
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (
self.target is None or self.observer is None
):
raise ValueError(
"Both an observer and target must be defined "
"before applying a velocity shift."
)
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError(
"Argument must have unit physical type 'speed' for radial velocty"
" or 'dimensionless' for redshift."
)
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == "dimensionless":
target_shift = target_shift.to(u.km / u.s, u.doppler_redshift())
if self._observer is None or self._target is None:
return self.replicate(
value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift,
)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == "dimensionless":
observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift())
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = target_icrs.realize_frame(
target_icrs.cartesian.with_differentials(target_velocity)
).transform_to(self._target)
new_observer = observer_icrs.realize_frame(
observer_icrs.cartesian.with_differentials(observer_velocity)
).transform_to(self._observer)
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data, observer=new_observer, target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0.0 * KMS, redshift=None)
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = "Undefined"
repr_items = [f"{prefixstr}"]
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * " ").lstrip()
repr_items.append(f" observer: {observer_repr}")
if self.target is not None:
target_repr = indent(repr(self.target), 12 * " ").lstrip()
repr_items.append(f" target: {target_repr}")
if (
self._observer is not None and self._target is not None
) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(" observer to target (computed from above):")
else:
repr_items.append(" observer to target:")
repr_items.append(f" radial_velocity={radial_velocity}")
repr_items.append(f" redshift={redshift}")
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f" doppler_rest={self.doppler_rest}")
repr_items.append(f" doppler_convention={self.doppler_convention}")
arrstr = np.array2string(self.view(np.ndarray), separator=", ", prefix=" ")
if len(repr_items) == 1:
repr_items[0] += f"{arrstr}{self._unitstr:s}"
else:
repr_items[1] = " (" + repr_items[1].lstrip()
repr_items[-1] += ")"
repr_items.append(f" {arrstr}{self._unitstr:s}")
return "\n".join(repr_items) + ">"
| SpectralCoord |
python | astropy__astropy | astropy/io/fits/file.py | {
"start": 3878,
"end": 28373
} | class ____:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(
self,
fileobj=None,
mode=None,
memmap=None,
overwrite=False,
cache=True,
*,
use_fsspec=None,
fsspec_kwargs=None,
decompress_in_memory=False,
):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
f"Requested FITS mode '{mode}' not compatible with open file "
f"handle mode '{objmode}'"
)
mode = objmode
if mode is None:
mode = "readonly"
# Handle cloud-hosted files using the optional ``fsspec`` dependency
if (use_fsspec or _requires_fsspec(fileobj)) and mode != "ostream":
# Note: we don't use `get_readable_fileobj` as a context manager
# because io.fits takes care of closing files itself
fileobj = get_readable_fileobj(
fileobj,
encoding="binary",
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
close_files=False,
).__enter__()
# Handle raw URLs
if (
isinstance(fileobj, (str, bytes))
and mode not in ("ostream", "append", "update")
and _is_url(fileobj)
):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ("ostream", "append", "update"):
raise ValueError(f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = "gzip"
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = "zip"
elif _is_bz2file(fileobj):
self.compression = "bzip2"
elif _is_lzmafile(fileobj):
self.compression = "lzma"
elif _is_lzwfile(fileobj):
self.compression = "lzw"
if (
self.compression is not None
and decompress_in_memory
and mode in ("readonly", "copyonwrite", "denywrite")
):
# By default blocks are decompressed on the fly, when calling
# self.read. This is good for memory usage, avoiding decompression
# of the whole file, but it can be slow. With
# decompress_in_memory=True it is possible to decompress instead
# the whole file in memory.
fd = self._file
self._file = io.BytesIO(self._file.read())
fd.close()
if mode in ("readonly", "copyonwrite", "denywrite") or (
self.compression and mode == "update"
):
self.readonly = True
elif mode == "ostream" or (self.compression and mode == "append"):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if mode == "ostream" or self.compression or not hasattr(self._file, "seek"):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f"<{self.__module__}.{self.__class__.__name__} {self._file}>"
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, "read"):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == "gzip":
return ""
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, "read"):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f"size {size} not a multiple of {dtype}")
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn(
"No size or shape given to readarray(); assuming a shape of (1,)",
AstropyUserWarning,
)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError(
f"size {size} is too few bytes for a {shape} array of {dtype}"
)
elif actualsize < size:
raise ValueError(
f"size {size} is too many bytes for a {shape} array of {dtype}"
)
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(
self._file.fileno(), 0, access=access_mode, offset=0
)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. Also on windows
# a '[WinError 1455] The paging file is too small for
# this operation to complete' Windows error is raised or
# equiivalent a '[Errno 22] Invalid argument. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if (
exc.errno == errno.ENOMEM
or (
exc.errno == errno.EINVAL
and getattr(exc, "winerror", 0) == 1455
)
) and self.mode == "readonly":
warnings.warn(
"Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning,
)
self._mmap = mmap.mmap(
self._file.fileno(),
0,
access=MEMMAP_MODES["denywrite"],
offset=0,
)
else:
raise
return np.ndarray(
shape=shape, dtype=dtype, offset=offset, buffer=self._mmap
)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
return data.reshape(shape)
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer.
# Also for Windows when using mmap seek() may return weird
# negative values, which is fixed by calling tell() before.
self._file.tell()
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, "write"):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, "write"):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, "flush"):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, "seek"):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn(
"File may have been truncated: actual file length "
f"({self.size}) is smaller than the expected size ({pos})",
AstropyUserWarning,
)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, "tell"):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, "truncate"):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, "close"):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
# sys.getrefcount is CPython specific and not on PyPy.
if (
self._mmap is not None
and hasattr(sys, "getrefcount")
and sys.getrefcount(self._mmap) == 2 + refcount_delta
):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if (self.file_like and hasattr(fileobj, "len") and fileobj.len > 0) or (
os.path.exists(self.name) and os.path.getsize(self.name) != 0
):
if overwrite:
if self.file_like and hasattr(fileobj, "truncate"):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=""):
"""Attempt to determine if the given file is compressed."""
is_ostream = mode == "ostream"
if (is_ostream and ext == ".gz") or magic.startswith(GZIP_MAGIC):
if mode == "append":
raise OSError(
"'append' mode is not supported with gzip files."
"Use 'update' mode instead"
)
# Handle gzip files
kwargs = {"mode": IO_FITS_MODES[mode]}
if isinstance(obj_or_name, str):
kwargs["filename"] = obj_or_name
else:
kwargs["fileobj"] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = "gzip"
elif (is_ostream and ext == ".zip") or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = "zip"
elif (is_ostream and ext == ".bz2") or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ["update", "append"]:
raise OSError(
"update and append modes are not supported with bzip2 files"
)
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = "w" if is_ostream else "r"
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = "bzip2"
elif (is_ostream and ext == ".xz") or magic.startswith(LZMA_MAGIC):
# Handle lzma files
if mode in ["update", "append"]:
raise OSError(
"update and append modes are not supported with lzma files"
)
if not HAS_LZMA:
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
lzma_mode = "w" if is_ostream else "r"
self._file = lzma.LZMAFile(obj_or_name, mode=lzma_mode)
self.compression = "lzma"
elif (is_ostream and ext == ".Z") or magic.startswith(LZW_MAGIC):
# Handle LZW files
if mode in ["update", "append", "ostream"]:
raise OSError(f"{mode} mode not supported with LZW files")
if not HAS_UNCOMPRESSPY:
raise ModuleNotFoundError(
"The optional package uncompresspy is necessary for reading"
" LZW compressed files (.Z extension)."
)
self._file = uncompresspy.LZWFile(obj_or_name, mode="rb")
self.compression = "lzw"
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ["ostream", "append"]:
self._file.seek(0)
magic = self._file.read(6)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError(
f"Cannot read from/write to a closed file-like object ({fileobj!r})."
)
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if not hasattr(self._file, "seek") or not hasattr(self._file, "tell"):
self.mode = mode = "ostream"
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if self.mode in ("update", "append", "ostream") and not hasattr(
self._file, "write"
):
raise OSError(
"File-like object does not have a 'write' "
f"method, required for mode '{self.mode}'."
)
# Any mode except for 'ostream' requires readability
if self.mode != "ostream" and not hasattr(self._file, "read"):
raise OSError(
"File-like object does not have a 'read' "
f"method, required for mode {self.mode!r}."
)
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == "ostream":
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with open(self.name, "rb") as f:
magic = f.read(6)
else:
magic = b""
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (
(_is_bz2file(self._file) or (_is_lzmafile(self._file)))
and mode == "ostream"
):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b" ")
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn(
f"Failed to create mmap: {exc}; mmap use will be disabled",
AstropyUserWarning,
)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn(
"mmap.flush is unavailable on this platform; "
"using mmap in writeable mode will be disabled",
AstropyUserWarning,
)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ("update", "append"):
raise OSError("Writing to zipped fits files is not currently supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError("Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix=".fits")
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
| _File |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 24145,
"end": 30976
} | class ____(AwsBaseOperator[DmsHook]):
"""
Starts an AWS DMS Serverless replication.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStartReplicationOperator`
:param replication_config_arn: ARN of the replication config
:param replication_start_type: Type of replication.
:param cdc_start_time: Start time of CDC
:param cdc_start_pos: Indicates when to start CDC.
:param cdc_stop_pos: Indicates when to stop CDC.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
"""
RUNNING_STATES = ["running"]
STARTABLE_STATES = ["stopped", "failed", "created"]
TERMINAL_STATES = ["failed", "stopped", "created"]
TERMINAL_PROVISION_STATES = ["deprovisioned", ""]
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields(
"replication_config_arn", "replication_start_type", "cdc_start_time", "cdc_start_pos", "cdc_stop_pos"
)
def __init__(
self,
*,
replication_config_arn: str,
replication_start_type: str,
cdc_start_time: datetime | str | None = None,
cdc_start_pos: str | None = None,
cdc_stop_pos: str | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(
aws_conn_id=aws_conn_id,
**kwargs,
)
self.replication_config_arn = replication_config_arn
self.replication_start_type = replication_start_type
self.cdc_start_time = cdc_start_time
self.cdc_start_pos = cdc_start_pos
self.cdc_stop_pos = cdc_stop_pos
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.wait_for_completion = wait_for_completion
if self.cdc_start_time and self.cdc_start_pos:
raise AirflowException("Only one of cdc_start_time or cdc_start_pos should be provided.")
def execute(self, context: Context):
result = self.hook.describe_replications(
filters=[{"Name": "replication-config-arn", "Values": [self.replication_config_arn]}]
)
current_status = result[0].get("Status", "")
provision_status = self.hook.get_provision_status(replication_config_arn=self.replication_config_arn)
if provision_status == "deprovisioning":
# wait for deprovisioning to complete before start/restart
self.log.info(
"Replication is deprovisioning. Must wait for deprovisioning before running replication"
)
if self.deferrable:
self.log.info("Deferring until deprovisioning completes.")
self.defer(
trigger=DmsReplicationDeprovisionedTrigger(
replication_config_arn=self.replication_config_arn,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="retry_execution",
)
else:
self.hook.get_waiter("replication_deprovisioned").wait(
Filters=[{"Name": "replication-config-arn", "Values": [self.replication_config_arn]}],
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
provision_status = self.hook.get_provision_status(
replication_config_arn=self.replication_config_arn
)
self.log.info("Replication deprovisioning complete. Provision status: %s", provision_status)
if (
current_status.lower() in self.STARTABLE_STATES
and provision_status in self.TERMINAL_PROVISION_STATES
):
resp = self.hook.start_replication(
replication_config_arn=self.replication_config_arn,
start_replication_type=self.replication_start_type,
cdc_start_time=self.cdc_start_time,
cdc_start_pos=self.cdc_start_pos,
cdc_stop_pos=self.cdc_stop_pos,
)
current_status = resp.get("Replication", {}).get("Status", "Unknown")
self.log.info(
"Replication(%s) started with status %s.",
self.replication_config_arn,
current_status,
)
if self.wait_for_completion:
self.log.info("Waiting for %s replication to complete.", self.replication_config_arn)
if self.deferrable:
self.log.info("Deferring until %s replication completes.", self.replication_config_arn)
self.defer(
trigger=DmsReplicationCompleteTrigger(
replication_config_arn=self.replication_config_arn,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
self.hook.get_waiter("replication_complete").wait(
Filters=[{"Name": "replication-config-arn", "Values": [self.replication_config_arn]}],
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
self.log.info("Replication(%s) has completed.", self.replication_config_arn)
else:
self.log.info("Replication(%s) is not in startable state.", self.replication_config_arn)
self.log.info("Status: %s Provision status: %s", current_status, provision_status)
def execute_complete(self, context, event=None):
self.replication_config_arn = event.get("replication_config_arn")
self.log.info("Replication(%s) has completed.", self.replication_config_arn)
def retry_execution(self, context, event=None):
self.replication_config_arn = event.get("replication_config_arn")
self.log.info("Retrying replication %s.", self.replication_config_arn)
self.execute(context)
| DmsStartReplicationOperator |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/schemas/user_schema.py | {
"start": 1193,
"end": 1877
} | class ____(SQLAlchemySchema):
"""user collection item schema."""
class Meta:
"""Meta."""
model = User
dateformat = "iso"
first_name = auto_field()
last_name = auto_field()
username = auto_field()
active = auto_field(dump_only=True)
email = auto_field()
last_login = auto_field(dump_only=True)
login_count = auto_field(dump_only=True)
fail_login_count = auto_field(dump_only=True)
roles = fields.List(fields.Nested(RoleSchema, only=("name",)))
created_on = auto_field(validate=validate_istimezone, dump_only=True)
changed_on = auto_field(validate=validate_istimezone, dump_only=True)
| UserCollectionItemSchema |
python | jupyterlab__jupyterlab | jupyterlab/semver.py | {
"start": 9495,
"end": 19578
} | class ____:
def __init__(self, version, loose):
logger.debug("SemVer %s, %s", version, loose)
self.loose = loose
self.raw = version
m = regexp[LOOSE if loose else FULL].search(version.strip())
if not m:
if not loose:
raise ValueError(f"Invalid Version: {version}")
m = regexp[RECOVERYVERSIONNAME].search(version.strip())
self.major = int(m.group(1)) if m.group(1) else 0
self.minor = int(m.group(2)) if m.group(2) else 0
self.patch = 0
if not m.group(3):
self.prerelease = []
else:
self.prerelease = [
(int(id_) if NUMERIC.search(id_) else id_) for id_ in m.group(3).split(".")
]
else:
# these are actually numbers
self.major = int(m.group(1))
self.minor = int(m.group(2))
self.patch = int(m.group(3))
# numberify any prerelease numeric ids
if not m.group(4):
self.prerelease = []
else:
self.prerelease = [
(int(id_) if NUMERIC.search(id_) else id_) for id_ in m.group(4).split(".")
]
if m.group(5):
self.build = m.group(5).split(".")
else:
self.build = []
self.format() # xxx:
def format(self):
self.version = f"{self.major}.{self.minor}.{self.patch}"
if len(self.prerelease) > 0:
self.version += "-{}".format(".".join(str(v) for v in self.prerelease))
return self.version
def __repr__(self):
return f"<SemVer {self} >"
def __str__(self):
return self.version
def compare(self, other):
logger.debug("SemVer.compare %s %s %s", self.version, self.loose, other)
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
result = self.compare_main(other) or self.compare_pre(other)
logger.debug("compare result %s", result)
return result
def compare_main(self, other):
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
return (
compare_identifiers(str(self.major), str(other.major))
or compare_identifiers(str(self.minor), str(other.minor))
or compare_identifiers(str(self.patch), str(other.patch))
)
def compare_pre(self, other): # noqa PLR0911
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
# NOT having a prerelease is > having one
is_self_more_than_zero = len(self.prerelease) > 0
is_other_more_than_zero = len(other.prerelease) > 0
if not is_self_more_than_zero and is_other_more_than_zero:
return 1
elif is_self_more_than_zero and not is_other_more_than_zero:
return -1
elif not is_self_more_than_zero and not is_other_more_than_zero:
return 0
i = 0
while True:
a = list_get(self.prerelease, i)
b = list_get(other.prerelease, i)
logger.debug("prerelease compare %s: %s %s", i, a, b)
i += 1
if a is None and b is None:
return 0
elif b is None:
return 1
elif a is None:
return -1
elif a == b:
continue
else:
return compare_identifiers(str(a), str(b))
def inc(self, release, identifier=None): # noqa PLR0915
logger.debug("inc release %s %s", self.prerelease, release)
if release == "premajor":
self.prerelease = []
self.patch = 0
self.minor = 0
self.major += 1
self.inc("pre", identifier=identifier)
elif release == "preminor":
self.prerelease = []
self.patch = 0
self.minor += 1
self.inc("pre", identifier=identifier)
elif release == "prepatch":
# If this is already a prerelease, it will bump to the next version
# drop any prereleases that might already exist, since they are not
# relevant at this point.
self.prerelease = []
self.inc("patch", identifier=identifier)
self.inc("pre", identifier=identifier)
elif release == "prerelease":
# If the input is a non-prerelease version, this acts the same as
# prepatch.
if len(self.prerelease) == 0:
self.inc("patch", identifier=identifier)
self.inc("pre", identifier=identifier)
elif release == "major":
# If this is a pre-major version, bump up to the same major version.
# Otherwise increment major.
# 1.0.0-5 bumps to 1.0.0
# 1.1.0 bumps to 2.0.0
if self.minor != 0 or self.patch != 0 or len(self.prerelease) == 0:
self.major += 1
self.minor = 0
self.patch = 0
self.prerelease = []
elif release == "minor":
# If this is a pre-minor version, bump up to the same minor version.
# Otherwise increment minor.
# 1.2.0-5 bumps to 1.2.0
# 1.2.1 bumps to 1.3.0
if self.patch != 0 or len(self.prerelease) == 0:
self.minor += 1
self.patch = 0
self.prerelease = []
elif release == "patch":
# If this is not a pre-release version, it will increment the patch.
# If it is a pre-release it will bump up to the same patch version.
# 1.2.0-5 patches to 1.2.0
# 1.2.0 patches to 1.2.1
if len(self.prerelease) == 0:
self.patch += 1
self.prerelease = []
elif release == "pre":
# This probably shouldn't be used publicly.
# 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction.
logger.debug("inc prerelease %s", self.prerelease)
if len(self.prerelease) == 0:
self.prerelease = [0]
else:
i = len(self.prerelease) - 1
while i >= 0:
if isinstance(self.prerelease[i], int):
self.prerelease[i] += 1
i -= 2
i -= 1
# ## this is needless code in python ##
# if i == -1: # didn't increment anything
# self.prerelease.append(0)
if identifier is not None:
# 1.2.0-beta.1 bumps to 1.2.0-beta.2,
# 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0
if self.prerelease[0] == identifier:
if not isinstance(self.prerelease[1], int):
self.prerelease = [identifier, 0]
else:
self.prerelease = [identifier, 0]
else:
raise ValueError(f"invalid increment argument: {release}")
self.format()
self.raw = self.version
return self
def inc(version, release, loose, identifier=None): # wow!
try:
return make_semver(version, loose).inc(release, identifier=identifier).version
except Exception as e:
logger.debug(e, exc_info=5)
return None
def compare_identifiers(a, b):
anum = NUMERIC.search(a)
bnum = NUMERIC.search(b)
if anum and bnum:
a = int(a)
b = int(b)
if anum and not bnum:
return -1
elif bnum and not anum:
return 1
elif a < b:
return -1
elif a > b:
return 1
else:
return 0
def rcompare_identifiers(a, b):
return compare_identifiers(b, a)
def compare(a, b, loose):
return make_semver(a, loose).compare(b)
def compare_loose(a, b):
return compare(a, b, True)
def rcompare(a, b, loose):
return compare(b, a, loose)
def make_key_function(loose):
def key_function(version):
v = make_semver(version, loose)
key = (v.major, v.minor, v.patch)
if v.prerelease: # noqa SIM108
key = key + tuple(v.prerelease)
else:
# NOT having a prerelease is > having one
key = (*key, float("inf"))
return key
return key_function
loose_key_function = make_key_function(True)
full_key_function = make_key_function(True)
def sort(list_, loose):
keyf = loose_key_function if loose else full_key_function
list_.sort(key=keyf)
return list_
def rsort(list_, loose):
keyf = loose_key_function if loose else full_key_function
list_.sort(key=keyf, reverse=True)
return list_
def gt(a, b, loose):
return compare(a, b, loose) > 0
def lt(a, b, loose):
return compare(a, b, loose) < 0
def eq(a, b, loose):
return compare(a, b, loose) == 0
def neq(a, b, loose):
return compare(a, b, loose) != 0
def gte(a, b, loose):
return compare(a, b, loose) >= 0
def lte(a, b, loose):
return compare(a, b, loose) <= 0
def cmp(a, op, b, loose): # noqa PLR0911
logger.debug("cmp: %s", op)
if op == "===":
return a == b
elif op == "!==":
return a != b
elif op == "" or op == "=" or op == "==":
return eq(a, b, loose)
elif op == "!=":
return neq(a, b, loose)
elif op == ">":
return gt(a, b, loose)
elif op == ">=":
return gte(a, b, loose)
elif op == "<":
return lt(a, b, loose)
elif op == "<=":
return lte(a, b, loose)
else:
raise ValueError(f"Invalid operator: {op}")
def comparator(comp, loose):
if isinstance(comp, Comparator):
if comp.loose == loose:
return comp
else:
comp = comp.value
# if (!(this instanceof Comparator))
# return new Comparator(comp, loose)
return Comparator(comp, loose)
make_comparator = comparator
ANY = object()
| SemVer |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 35165,
"end": 36373
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(5861367021)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50), random_state=self.rng)
assert_(np.all(vals >= 0))
assert_(np.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75, random_state=self.rng)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3, random_state=self.rng)
assert_(isinstance(val, np.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
def test_logcdf_gh16159(self):
# check that gh16159 is resolved.
vals = stats.nbinom.logcdf([0, 5, 0, 5], n=4.8, p=0.45)
ref = np.log(stats.nbinom.cdf([0, 5, 0, 5], n=4.8, p=0.45))
assert_allclose(vals, ref)
| TestNBinom |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 6955,
"end": 7143
} | class ____(scale_position_discrete):
"""
Discrete y position
"""
_aesthetics = ["y", "ymin", "ymax", "yend", "yintercept"]
# Not part of the user API
@alias
| scale_y_discrete |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/default_types.py | {
"start": 9484,
"end": 12282
} | class ____(trace.TraceType, serialization.Serializable):
"""Represents a list of TraceType objects."""
def __init__(self, *components: trace.TraceType):
self.components_tuple = Tuple(*components)
def is_subtype_of(self, other: trace.TraceType) -> bool:
if not isinstance(other, List):
return False
return self.components_tuple.is_subtype_of(other.components_tuple)
def most_specific_common_supertype(
self, others: Sequence[trace.TraceType]) -> Optional["Tuple"]:
"""See base class."""
if not all(isinstance(other, List) for other in others):
return None
supertyped_components_tuple = (
self.components_tuple.most_specific_common_supertype(
[other.components_tuple for other in others]
)
)
if supertyped_components_tuple is None:
return None
return List(*supertyped_components_tuple.components)
@classmethod
def experimental_type_proto(cls) -> Type[default_types_pb2.SerializedList]:
return default_types_pb2.SerializedList
@classmethod
def experimental_from_proto(
cls, proto: default_types_pb2.SerializedList) -> "List":
return List(
*Tuple.experimental_from_proto(proto.components_tuple).components)
def experimental_as_proto(self) -> default_types_pb2.SerializedList:
return default_types_pb2.SerializedList(
components_tuple=self.components_tuple.experimental_as_proto())
def placeholder_value(self, placeholder_context) -> Any:
return list(self.components_tuple.placeholder_value(placeholder_context))
def to_tensors(self, value):
assert isinstance(value, list)
return self.components_tuple.to_tensors(tuple(value))
def from_tensors(self, tensors) -> Any:
return list(self.components_tuple.from_tensors(tensors))
def flatten(self) -> PythonList[trace.TraceType]:
return self.components_tuple.flatten()
def cast(self, value: Any, casting_context) -> Any:
assert isinstance(value, list), f"Can not cast {value!r} to list type."
assert len(value) == len(
self.components_tuple.components
), f"Expected {value} to have length of {len(self.components_tuple)}"
casted_values, was_casted = util.cast_and_return_whether_casted(
self.components_tuple.components, value, casting_context
)
if was_casted:
return list(casted_values)
else:
return value
def __eq__(self, other: Any) -> bool:
if not isinstance(other, trace.TraceType):
return NotImplemented
if not isinstance(other, List):
return False
return self.components_tuple == other.components_tuple
def __hash__(self) -> int:
return hash(self.components_tuple)
def __repr__(self) -> str:
return f"List[{', '.join(map(repr, self.components_tuple.components))}]"
| List |
python | Textualize__textual | src/textual/scrollbar.py | {
"start": 1740,
"end": 7234
} | class ____:
VERTICAL_BARS: ClassVar[list[str]] = ["▁", "▂", "▃", "▄", "▅", "▆", "▇", " "]
"""Glyphs used for vertical scrollbar ends, for smoother display."""
HORIZONTAL_BARS: ClassVar[list[str]] = ["▉", "▊", "▋", "▌", "▍", "▎", "▏", " "]
"""Glyphs used for horizontal scrollbar ends, for smoother display."""
BLANK_GLYPH: ClassVar[str] = " "
"""Glyph used for the main body of the scrollbar"""
def __init__(
self,
virtual_size: int = 100,
window_size: int = 0,
position: float = 0,
thickness: int = 1,
vertical: bool = True,
style: StyleType = "bright_magenta on #555555",
) -> None:
self.virtual_size = virtual_size
self.window_size = window_size
self.position = position
self.thickness = thickness
self.vertical = vertical
self.style = style
@classmethod
def render_bar(
cls,
size: int = 25,
virtual_size: float = 50,
window_size: float = 20,
position: float = 0,
thickness: int = 1,
vertical: bool = True,
back_color: Color = Color.parse("#555555"),
bar_color: Color = Color.parse("bright_magenta"),
) -> Segments:
if vertical:
bars = cls.VERTICAL_BARS
else:
bars = cls.HORIZONTAL_BARS
back = back_color
bar = bar_color
len_bars = len(bars)
width_thickness = thickness if vertical else 1
_Segment = Segment
_Style = Style
blank = cls.BLANK_GLYPH * width_thickness
foreground_meta = {"@mouse.down": "grab"}
if window_size and size and virtual_size and size != virtual_size:
bar_ratio = virtual_size / size
thumb_size = max(1, window_size / bar_ratio)
position_ratio = position / (virtual_size - window_size)
position = (size - thumb_size) * position_ratio
start = int(position * len_bars)
end = start + ceil(thumb_size * len_bars)
start_index, start_bar = divmod(max(0, start), len_bars)
end_index, end_bar = divmod(max(0, end), len_bars)
upper = {"@mouse.down": "scroll_up"}
lower = {"@mouse.down": "scroll_down"}
upper_back_segment = Segment(blank, _Style(bgcolor=back, meta=upper))
lower_back_segment = Segment(blank, _Style(bgcolor=back, meta=lower))
segments = [upper_back_segment] * int(size)
segments[end_index:] = [lower_back_segment] * (size - end_index)
segments[start_index:end_index] = [
_Segment(blank, _Style(color=bar, reverse=True, meta=foreground_meta))
] * (end_index - start_index)
# Apply the smaller bar characters to head and tail of scrollbar for more "granularity"
if start_index < len(segments):
bar_character = bars[len_bars - 1 - start_bar]
if bar_character != " ":
segments[start_index] = _Segment(
bar_character * width_thickness,
(
_Style(bgcolor=back, color=bar, meta=foreground_meta)
if vertical
else _Style(
bgcolor=back,
color=bar,
meta=foreground_meta,
reverse=True,
)
),
)
if end_index < len(segments):
bar_character = bars[len_bars - 1 - end_bar]
if bar_character != " ":
segments[end_index] = _Segment(
bar_character * width_thickness,
(
_Style(
bgcolor=back,
color=bar,
meta=foreground_meta,
reverse=True,
)
if vertical
else _Style(bgcolor=back, color=bar, meta=foreground_meta)
),
)
else:
style = _Style(bgcolor=back)
segments = [_Segment(blank, style=style)] * int(size)
if vertical:
return Segments(segments, new_lines=True)
else:
return Segments((segments + [_Segment.line()]) * thickness, new_lines=False)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
size = (
(options.height or console.height)
if self.vertical
else (options.max_width or console.width)
)
thickness = (
(options.max_width or console.width)
if self.vertical
else (options.height or console.height)
)
_style = console.get_style(self.style)
bar = self.render_bar(
size=size,
window_size=self.window_size,
virtual_size=self.virtual_size,
position=self.position,
vertical=self.vertical,
thickness=thickness,
back_color=_style.bgcolor or Color.parse("#555555"),
bar_color=_style.color or Color.parse("bright_magenta"),
)
yield bar
@rich.repr.auto
| ScrollBarRender |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py | {
"start": 6131,
"end": 6882
} | class ____:
__slots__ = ()
__slots__ = []
__slots__ = ("single_item",)
__slots__ = (
"single_item_multiline",
)
__slots__ = {"single_item",}
__slots__ = {"single_item_no_trailing_comma": "docs for that"}
__slots__ = [
"single_item_multiline_no_trailing_comma"
]
__slots__ = ("not_a_tuple_just_a_string")
__slots__ = ["a", "b", "c", "d"]
__slots__ += ["e", "f", "g"]
__slots__ = ("a", "b", "c", "d")
if bool():
__slots__ += ("e", "f", "g")
else:
__slots__ += ["alpha", "omega"]
__slots__ = {"not": "sorted", "but": "includes", **a_kwarg_splat}
__slots__ = ("b", "a", "e", "d")
__slots__ = ["b", "a", "e", "d"]
__slots__ = ["foo", "bar", "antipasti"]
| Klass6 |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_strategy.py | {
"start": 79888,
"end": 86880
} | class ____(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=0):
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self.replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
return self.strategy.extended.experimental_logical_device(logical_device_id)
def _compute_all_gather_output_shape(self, value_shape, value_rank, axis):
if isinstance(value_rank, int):
output_shape = list(value_shape)
output_shape[axis] *= self.num_replicas_in_sync
else:
output_shape = array_ops.where_v2(
math_ops.equal(math_ops.range(value_rank), axis),
value_shape * context.num_replicas_in_sync,
value_shape)
return output_shape
def all_gather(self, value, axis, experimental_hints=None):
del experimental_hints
for v in nest.flatten(value):
if isinstance(v, indexed_slices.IndexedSlices):
raise NotImplementedError("all_gather does not support IndexedSlices")
def _all_gather_tensor(value, axis):
value = ops.convert_to_tensor(value)
# Compute the shape and rank and rank of the input tensor. Use static
# shapes when possible to help with shape inference in graph mode, but
# fall back on dynamic shapes when necessary.
if value.shape.rank is None:
value_rank = array_ops.rank(value)
value_shape = array_ops.shape(value)
else:
value_rank = value.shape.rank
value_shape = value.shape.as_list()
value_shape_tensor = array_ops.shape(value)
for i in range(len(value_shape)):
if value_shape[i] is None:
value_shape[i] = value_shape_tensor[i]
# In the code below, we will insert a new "replica" dimension immediately
# *before* `axis`. To ensure that it's inserted before and not after, we
# must make `axis` non-negative.
axis = _make_axis_nonnegative(axis, value_rank)
# Create a list or 1D int Tensor such as
# [1, 1, ..., 1, num_replicas_in_sync, 1, ..., 1],
# which is equal to `num_replicas_in_sync` at index `axis`
# and is equal to 1 everywhere else.
if isinstance(value_rank, int):
replica_broadcast_shape = [1] * (value_rank + 1)
replica_broadcast_shape[axis] = self.num_replicas_in_sync
else:
replica_broadcast_shape = array_ops.where_v2(
math_ops.equal(math_ops.range(value_rank+1), axis),
self.num_replicas_in_sync,
1)
output_shape = self._compute_all_gather_output_shape(
value_shape, value_rank, axis)
if value.dtype in _DTYPES_SUPPORTED_BY_CROSS_REPLICA_SUM:
# optimized all_gather implementation based on cross_replica_sum().
replica_id_mask = array_ops.one_hot(
self.replica_id_in_sync_group, self.num_replicas_in_sync)
replica_id_mask = array_ops.reshape(
replica_id_mask, replica_broadcast_shape)
replica_id_mask = math_ops.cast(replica_id_mask, value.dtype)
gathered_value = array_ops.expand_dims(value, axis) * replica_id_mask
gathered_value = self.all_reduce(
reduce_util.ReduceOp.SUM, gathered_value)
return array_ops.reshape(gathered_value, output_shape)
else:
# value.dtype isn't supported by cross_replica_sum(), so we fall back
# on a less efficient implementation based on all_to_all().
# The underlying AllToAllOp first do a split of the input value and then
# cross-replica communication and concatenation of the result. So we
# concatenate the local tensor here first.
inputs = array_ops.expand_dims(value, axis=axis)
inputs = array_ops.tile(inputs, replica_broadcast_shape)
unordered_output = tpu_ops.all_to_all(
inputs,
concat_dimension=axis,
split_dimension=axis,
split_count=self.num_replicas_in_sync)
# Re-order since xla.replica_id and ReplicaContext.replica_id mismatch.
# Start by computing a permutation -- a 1D Tensor which maps
# tensor[xla.replica_id] = ReplicaContext.replica_id
concat_replica_id = array_ops.reshape(
self.replica_id_in_sync_group, [1])
concat_replica_id = array_ops.tile(
concat_replica_id, [self.num_replicas_in_sync])
xla_to_replica_context_id = tpu_ops.all_to_all(
concat_replica_id,
concat_dimension=0,
split_dimension=0,
split_count=self.num_replicas_in_sync)
# Now invert the mapping to get
# tensor[ReplicaContext.replica_id] = xla.replica_id
replica_context_to_xla_id = math_ops.argmax(
array_ops.one_hot(xla_to_replica_context_id,
self.num_replicas_in_sync),
axis=0)
# Reorder the output elements so that they're sorted based on
# ReplicaContext.replica_id instead of xla.replica_id.
sorted_with_extra_dim = array_ops.gather(
unordered_output, replica_context_to_xla_id, axis=axis)
return array_ops.reshape(sorted_with_extra_dim, output_shape)
ys = [_all_gather_tensor(t, axis=axis) for t in nest.flatten(value)]
return nest.pack_sequence_as(value, ys)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that aren't reduced, return a PerReplica of all values. Else
# take the first value from the list as each value should be the same.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = values.PerReplica(output)
else:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
| _TPUReplicaContext |
python | django__django | tests/force_insert_update/models.py | {
"start": 424,
"end": 468
} | class ____(SubCounter):
pass
| SubSubCounter |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-colbert/llama_index/indices/managed/colbert/retriever.py | {
"start": 442,
"end": 2133
} | class ____(BaseRetriever):
"""
Vector index retriever.
Args:
index (ColbertIndex): Colbert index.
similarity_top_k (int): number of top k results to return.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
doc_ids (Optional[List[str]]): list of documents to constrain search.
colbert_kwargs (dict): Additional colbert specific kwargs to pass
through to the colbert index at query time.
"""
def __init__(
self,
index: ColbertIndex,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
filters: Optional[MetadataFilters] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._kwargs: Dict[str, Any] = kwargs.get("colbert_kwargs", {})
super().__init__(
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
return self._index.query(
query_str=query_bundle.query_str,
top_k=self._similarity_top_k,
**self._kwargs,
)
| ColbertRetriever |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 7310,
"end": 7467
} | class ____(PrefectException):
"""
Raised when a task relies on the result of another task but that task is not
'COMPLETE'
"""
| UpstreamTaskError |
python | facebook__pyre-check | tools/generate_taint_models/get_exit_nodes.py | {
"start": 435,
"end": 1603
} | class ____(ModelGenerator[CallableModel]):
def __init__(
self,
django_urls: DjangoUrls,
whitelisted_views: Optional[List[str]] = None,
taint_annotation: str = "TaintSink[ReturnedToUser]",
) -> None:
self.django_urls = django_urls
self.whitelisted_views: List[str] = whitelisted_views or []
self.taint_annotation: str = taint_annotation
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return get_all_views(self.django_urls)
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[CallableModel]:
exit_nodes = set()
for view_function in functions_to_model:
qualified_name = extract_qualified_name(view_function)
if qualified_name in self.whitelisted_views:
continue
try:
model = CallableModel(
returns=self.taint_annotation, callable_object=view_function
)
exit_nodes.add(model)
except ValueError:
pass
return sorted(exit_nodes)
| ExitNodeGenerator |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_np_array_ops_test.py | {
"start": 19008,
"end": 42039
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super(ArrayMethodsTest, self).setUp()
set_up_virtual_devices()
self.array_transforms = [
lambda x: x,
_get_weak_tensor,
np_array_ops.array,
]
def testCopy(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.copy(arg, *args, **kwargs)
expected = np.copy(arg, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='copy({})'.format(arr),
)
run_test([])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test(np.arange(9).reshape((3, 3)).tolist())
a = np_array_ops.asarray(0)
self.assertNotIn('CPU:1', a.backing_device)
with ops.device('CPU:1'):
self.assertIn('CPU:1', np_array_ops.array(a, copy=True)
.backing_device)
self.assertIn('CPU:1', np_array_ops.array(np.array(0), copy=True)
.backing_device)
def testCumProdAndSum(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
# Cumprod Test
actual = np_array_ops.cumprod(arg, *args, **kwargs)
expected = np.cumprod(arg, *args, **kwargs)
self.assertAllEqual(actual, expected)
if kwargs.get('dtype', None) is None:
self.match_dtype_and_type(
actual,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
else:
self.match_dtype_and_type(
actual,
flexible_dtypes.result_type(kwargs['dtype'])[0],
tensor.Tensor,
)
# Cumsum Test
actual = np_array_ops.cumsum(arg, *args, **kwargs)
expected = np.cumsum(arg, *args, **kwargs)
self.assertAllEqual(actual, expected)
if kwargs.get('dtype', None) is None:
self.match_dtype_and_type(
actual,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
else:
self.match_dtype_and_type(
actual,
flexible_dtypes.result_type(kwargs['dtype'])[0],
tensor.Tensor,
)
run_test([])
run_test([1, 2, 3])
run_test([1, 2, 3], dtype=float)
run_test([1, 2, 3], dtype=np.float32)
run_test([1, 2, 3], dtype=np.float64)
run_test([1., 2., 3.])
run_test([1., 2., 3.], dtype=int)
run_test([1., 2., 3.], dtype=np.int32)
run_test([1., 2., 3.], dtype=np.int64)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testImag(self):
def run_test(arr, dtype):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.imag(arg)
# np.imag may return a scalar so we convert to a np.ndarray.
expected = np.array(np.imag(arg))
self.match_expected_attrs(actual, expected, dtype, WeakTensor)
# Weak complex128 input returns float64.
run_test(1, dtypes.int32)
run_test(5.5, dtypes.float32)
run_test(5 + 3j, dtypes.float64)
run_test(3j, dtypes.float64)
run_test([], dtypes.float32)
run_test([1, 2, 3], dtypes.int32)
run_test([1 + 5j, 2 + 3j], dtypes.float64)
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]], dtypes.float64)
def testAMaxAMin(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
actual = np_array_ops.amax(arr_arg, axis=axis_arg, *args, **kwargs)
expected = np.amax(arr_arg, axis=axis, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='amax({})'.format(arr),
)
actual = np_array_ops.amin(arr_arg, axis=axis_arg, *args, **kwargs)
expected = np.amin(arr_arg, axis=axis, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='amin({})'.format(arr),
)
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
self.assertRaises(ValueError, np_array_ops.amax, np.ones([2, 2]), out=[])
self.assertRaises(ValueError, np_array_ops.amin, np.ones([2, 2]), out=[])
def testMean(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
actual = np_array_ops.mean(arr_arg, axis=axis_arg, *args, **kwargs)
expected = np.mean(arr_arg, axis=axis, *args, **kwargs)
dtype = kwargs.get('dtype', None)
if dtype is None:
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
else:
self.match_expected_attrs(
actual,
expected,
flexible_dtypes.result_type(dtype)[0],
tensor.Tensor,
)
run_test([1, 2, 1])
run_test([1.0, 2.0, 1.0])
run_test([1.0, 2.0, 1.0], dtype=int)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(0, 2),
keepdims=True,
)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(2, 0),
keepdims=True,
)
self.assertRaises(ValueError, np_array_ops.mean, np.ones([2, 2]), out=[])
def testStd(self):
def run_test(arr, res_dtype, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
actual = np_array_ops.std(arr_arg, axis=axis_arg, *args, **kwargs)
expected = np.std(arr_arg, axis=axis, *args, **kwargs)
res_dtype = (
_NP_to_TF_result_inferred_types[expected.dtype]
if res_dtype is None
else res_dtype
)
self.match_expected_attrs(actual, expected, res_dtype, WeakTensor)
run_test([1, 2, 1], res_dtype=None)
run_test([1.0, 2.0, 1.0], res_dtype=None)
run_test([1.0j, 2.0, 1.0j], res_dtype=dtypes.float64)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=1)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=0)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=-1)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=-2)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=(0, 1))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), res_dtype=None, axis=(0, 2)
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
res_dtype=None,
axis=(0, 2),
keepdims=True,
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), res_dtype=None, axis=(2, 0)
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
res_dtype=None,
axis=(2, 0),
keepdims=True,
)
def testVar(self):
def run_test(arr, res_dtype, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
actual = np_array_ops.var(arr_arg, axis=axis_arg, *args, **kwargs)
expected = np.var(arr_arg, axis=axis, *args, **kwargs)
dtype = kwargs.get('dtype', None)
res_type = tensor.Tensor if dtype is not None else WeakTensor
res_dtype = (
_NP_to_TF_result_inferred_types[expected.dtype]
if res_dtype is None
else res_dtype
)
self.match_expected_attrs(actual, expected, res_dtype, res_type)
# Input of weak complex type (complex 128) always outputs float64.
run_test([1, 2, 1], res_dtype=None)
run_test([1.0, 2.0, 1.0], res_dtype=None)
run_test([1.0j, 2.0, 1.0j], res_dtype=dtypes.float64)
run_test([1.0, 2.0, 1.0], res_dtype=dtypes.int64, dtype=np.int64)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=1)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=0)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=-1)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=-2)
run_test([[1, 2], [3, 4]], res_dtype=None, axis=(0, 1))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), res_dtype=None, axis=(0, 2)
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(0, 2),
res_dtype=None,
keepdims=True,
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), res_dtype=None, axis=(2, 0)
)
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(2, 0),
res_dtype=None,
keepdims=True,
)
self.assertRaises(ValueError, np_array_ops.var, np.ones([2, 2]), out=[])
def testProd(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.prod(arg, *args, **kwargs)
expected = np.prod(arg, *args, **kwargs)
dtype = kwargs.get('dtype', None)
if dtype is None:
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
else:
self.match_expected_attrs(
actual,
expected,
flexible_dtypes.result_type(dtype)[0],
tensor.Tensor,
)
run_test([1, 2, 3])
run_test([1.0, 2.0, 3.0])
run_test(np.array([1, 2, 3]), dtype=np.int32)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(0, 2),
keepdims=True,
)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(),
axis=(2, 0),
keepdims=True,
)
def _testReduce(self, math_fun, np_fun, name):
axis_transforms = [
lambda x: x, # Identity,
np_array_ops.array,
_get_weak_tensor,
]
def run_test(a, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in axis_transforms:
arg1 = fn1(a)
axis_arg = fn2(axis) if axis is not None else None
actual = math_fun(arg1, axis=axis_arg, **kwargs)
expected = np_fun(arg1, axis=axis, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='{}({}, axis={}, keepdims={})'.format(
name, arg1, axis, kwargs.get('keepdims')
),
)
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
run_test([[2, -3], [-6, 7]], axis=0)
run_test([[2, -3], [-6, 7]], axis=0, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=1)
run_test([[2, -3], [-6, 7]], axis=1, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=(0, 1))
run_test([[2, -3], [-6, 7]], axis=(1, 0))
def testSum(self):
self._testReduce(np_array_ops.sum, np.sum, 'sum')
def testAmax(self):
self._testReduce(np_array_ops.amax, np.amax, 'amax')
def testSize(self):
def run_test(arr, axis=None):
onp_arr = np.array(arr)
self.assertEqual(np_array_ops.size(arr, axis), np.size(onp_arr, axis))
run_test(np_array_ops.array([1]))
run_test(np_array_ops.array([1, 2, 3, 4, 5]))
run_test(np_array_ops.ones((2, 3, 2)))
run_test(np_array_ops.ones((3, 2)))
run_test(np_array_ops.zeros((5, 6, 7)))
run_test(1)
run_test(np_array_ops.ones((3, 2, 1)))
run_test(constant_op.constant(5))
run_test(constant_op.constant([1, 1, 1]))
self.assertRaises(NotImplementedError, np_array_ops.size, np.ones((2, 2)),
1)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=None)
]
)
def f(arr):
arr = np_array_ops.asarray(arr)
return np_array_ops.size(arr)
self.assertEqual(f(np_array_ops.ones((3, 2))).numpy(), 6)
def testRavel(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.ravel(arg, *args, **kwargs)
expected = np.ravel(arg, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(5)
run_test(5.)
run_test([])
run_test([[]])
run_test([[], []])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]])
run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def testReal(self):
def run_test(arr, res_dtype, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.real(arg, *args, **kwargs)
expected = np.array(np.real(arg, *args, **kwargs))
res_dtype = (
_NP_to_TF_result_inferred_types[expected.dtype]
if res_dtype is None
else res_dtype
)
self.match_expected_attrs(
actual,
expected,
res_dtype,
WeakTensor,
)
run_test(1, None)
run_test(5.5, None)
run_test(5 + 3j, dtypes.float64)
run_test(3j, dtypes.float64)
run_test([], None)
run_test([1, 2, 3], None)
run_test([1 + 5j, 2 + 3j], dtypes.float64)
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]], dtypes.float64)
def testRepeat(self):
def run_test(arr, repeats, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
repeats_arg = fn2(repeats)
actual = np_array_ops.repeat(arr_arg, repeats_arg, *args, **kwargs)
expected = np.repeat(arr_arg, repeats_arg, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(1, 2)
run_test([1, 2], 2)
run_test([1, 2], [2])
run_test([1, 2], [1, 2])
run_test([[1, 2], [3, 4]], 3, axis=0)
run_test([[1, 2], [3, 4]], 3, axis=1)
run_test([[1, 2], [3, 4]], [3], axis=0)
run_test([[1, 2], [3, 4]], [3], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=0)
run_test([[1, 2], [3, 4]], [3, 2], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-2)
def testAround(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
actual = np_array_ops.around(arg, *args, **kwargs)
expected = np.around(arg, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(5.5)
run_test(5.567, decimals=2)
run_test([])
run_test([1.27, 2.49, 2.75], decimals=1)
run_test([23.6, 45.1], decimals=-1)
def testReshape(self):
def run_test(arr, newshape, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
newshape_arg = fn2(newshape)
actual = np_array_ops.reshape(arr_arg, newshape_arg, *args, **kwargs)
expected = np.reshape(arr_arg, newshape, *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(5, [-1])
run_test([], [-1])
run_test([1, 2, 3], [1, 3])
run_test([1, 2, 3], [3, 1])
run_test([1, 2, 3, 4], [2, 2])
run_test([1, 2, 3, 4], [2, 1, 2])
def testExpandDims(self):
def run_test(arr, axis):
actual = np_array_ops.expand_dims(arr, axis)
expected = np.expand_dims(arr, axis)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test([1, 2, 3], 0)
run_test([1, 2, 3], 1)
def testSqueeze(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
# Note: np.squeeze ignores the axis arg for non-ndarray objects.
# This looks like a bug: https://github.com/numpy/numpy/issues/8201
# So we convert the arg to np.ndarray before passing to np.squeeze.
actual = np_array_ops.squeeze(arg, *args, **kwargs)
expected = np.squeeze(np.array(arg), *args, **kwargs)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(5)
run_test([])
run_test([5])
run_test([[1, 2, 3]])
run_test([[[1], [2], [3]]])
run_test([[[1], [2], [3]]], axis=0)
run_test([[[1], [2], [3]]], axis=2)
run_test([[[1], [2], [3]]], axis=(0, 2))
run_test([[[1], [2], [3]]], axis=-1)
run_test([[[1], [2], [3]]], axis=-3)
def testTranspose(self):
def run_test(arr, axes=None):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axes_arg = fn2(axes) if axes is not None else None
actual = np_array_ops.transpose(arr_arg, axes_arg)
expected = np.transpose(arr_arg, axes)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
run_test(5)
run_test([])
run_test([5])
run_test([5, 6, 7])
run_test(np.arange(30).reshape(2, 3, 5).tolist())
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 1, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 2, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 0, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 2, 0])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 0, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 1, 0])
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
def match_dtype_and_type(self, actual, expected_dtype, res_type, msg=None):
if msg:
msg = (
'Dtype and type match failed for: {}. Expected dtype: {} Actual'
' dtype: {}. Expected type: {} Actual type: {}.'.format(
msg, expected_dtype, actual.dtype, res_type, type(actual)
)
)
self.assertIsInstance(actual, res_type, msg=msg)
self.assertEqual(actual.dtype, expected_dtype, msg=msg)
def match_expected_attrs(
self, actual, expected, expected_dtype, res_type, msg=None
):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.match_dtype_and_type(actual, expected_dtype, res_type, msg)
self.match_shape(actual, expected, msg)
if not actual.shape.rank:
self.assertAllClose(actual.tolist(), expected.tolist())
else:
self.assertAllClose(actual.tolist(), expected.tolist())
def testShape(self):
self.assertAllEqual((1, 2), np_array_ops.shape([[0, 0]]))
@parameterized.parameters(
([[1, 2, 3]], 0, 1, [[1], [2], [3]]),
([[1, 2, 3]], -2, -1, [[1], [2], [3]]),
(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
0,
2,
[[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
),
(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
-3,
-1,
[[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
),
)
def testSwapaxes(self, x, axis1, axis2, expected):
actual = np_array_ops.swapaxes(x, axis1, axis2)
self.assertIsInstance(actual, WeakTensor)
self.assertAllEqual(actual, expected)
def testMoveaxis(self):
def _test(a, *args):
# pylint: disable=no-value-for-parameter
expected = np.moveaxis(a, *args)
wt_a = _get_weak_tensor(a)
raw_ans = np_array_ops.moveaxis(wt_a, *args)
self.assertIsInstance(raw_ans, WeakTensor)
self.assertAllEqual(expected, raw_ans)
a = np.random.rand(1, 2, 3, 4, 5, 6)
# Basic
_test(a, (0, 2), (3, 5))
_test(a, (0, 2), (-1, -3))
_test(a, (-6, -4), (3, 5))
_test(a, (-6, -4), (-1, -3))
_test(a, 0, 4)
_test(a, -6, -2)
_test(a, tuple(range(6)), tuple(range(6)))
_test(a, tuple(range(6)), tuple(reversed(range(6))))
_test(a, (), ())
def testNdim(self):
self.assertAllEqual(0, np_array_ops.ndim(0.5))
self.assertAllEqual(1, np_array_ops.ndim([1, 2]))
if __name__ == '__main__':
ops.enable_eager_execution()
ops.set_dtype_conversion_mode('all')
np_math_ops.enable_numpy_methods_on_tensor()
test.main()
| ArrayMethodsTest |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 7593,
"end": 7824
} | class ____(Generic[P, R]):
"""The return type of a query within the `.query` namespace of a collection."""
objects: List[Object[P, R]]
_GQLEntryReturnType: TypeAlias = Dict[str, List[Dict[str, Any]]]
@dataclass
| QueryReturn |
python | explosion__spaCy | spacy/lang/ru/__init__.py | {
"start": 401,
"end": 657
} | class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
| RussianDefaults |
python | pytorch__pytorch | torch/autograd/function.py | {
"start": 923,
"end": 11523
} | class ____:
def save_for_backward(self, *tensors: torch.Tensor):
r"""Save given tensors for a future call to :func:`~Function.backward`.
``save_for_backward`` should be called at most once, in either the
:func:`setup_context` or :func:`forward` methods, and only with tensors.
All tensors intended to be used in the backward pass should be saved
with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
incorrect gradients and memory leaks, and enable the application of saved
tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`.
See :ref:`extending-autograd` for more details.
Note that if intermediary tensors, tensors that are neither inputs
nor outputs of :func:`forward`, are saved for backward, your custom Function
may not support double backward.
Custom Functions that do not support double backward should decorate their
:func:`backward` method with ``@once_differentiable`` so that performing
double backward raises an error. If you'd like to support double backward,
you can either recompute intermediaries based on the inputs during backward
or return the intermediaries as the outputs of the custom Function. See the
`double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutorial.html>`_
for more details.
In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
attribute. Before returning them to the user, a check is made to ensure
they weren't used in any in-place operation that modified their content.
Arguments can also be ``None``. This is a no-op.
See :ref:`extending-autograd` for more details on how to use this method.
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
>>> w = x * z
>>> out = x * y + y * z + w * y
>>> ctx.save_for_backward(x, y, w, out)
>>> ctx.z = z # z is not a tensor
>>> return out
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, grad_out):
>>> x, y, w, out = ctx.saved_tensors
>>> z = ctx.z
>>> gx = grad_out * (y + y * z)
>>> gy = grad_out * (x + z + w)
>>> gz = None
>>> return gx, gy, gz
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
>>> c = 4
>>> d = Func.apply(a, b, c)
"""
self.to_save = tensors
def save_for_forward(self, *tensors: torch.Tensor):
r"""Save given tensors for a future call to :func:`~Function.jvp`.
``save_for_forward`` should be called at most once, in either the
:func:`setup_context` or :func:`forward` methods, and all arguments
should be tensors.
In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
attribute.
Arguments can also be ``None``. This is a no-op.
See :ref:`extending-autograd` for more details on how to use this method.
Example::
>>> # xdoctest: +SKIP
>>> class Func(torch.autograd.Function):
>>> @staticmethod
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
>>> ctx.save_for_backward(x, y)
>>> ctx.save_for_forward(x, y)
>>> ctx.z = z
>>> return x * y * z
>>>
>>> @staticmethod
>>> def jvp(ctx, x_t, y_t, _):
>>> x, y = ctx.saved_tensors
>>> z = ctx.z
>>> return z * (y * x_t + x * y_t)
>>>
>>> @staticmethod
>>> def vjp(ctx, grad_out):
>>> x, y = ctx.saved_tensors
>>> z = ctx.z
>>> return z * grad_out * y, z * grad_out * x, None
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
>>> t = torch.tensor(1., dtype=torch.double)
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
>>> c = 4
>>>
>>> with fwAD.dual_level():
>>> a_dual = fwAD.make_dual(a, t)
>>> d = Func.apply(a_dual, b, c)
"""
for tensor in tensors:
if not (isinstance(tensor, torch.Tensor) or tensor is None):
raise AssertionError(
"save_for_forward expects all arguments to be tensors; you should "
"save non-tensors as attributes on ctx."
)
self.saved_for_forward = tensors
def mark_dirty(self, *args: torch.Tensor):
r"""Mark given tensors as modified in an in-place operation.
This should be called at most once, in either the :func:`setup_context`
or :func:`forward` methods, and all arguments should be inputs.
Every tensor that's been modified in-place in a call to :func:`forward`
should be given to this function, to ensure correctness of our checks.
It doesn't matter whether the function is called before or after
modification.
Examples::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
>>> class Inplace(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> x_npy = x.numpy() # x_npy shares storage with x
>>> x_npy += 1
>>> ctx.mark_dirty(x)
>>> return x
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, grad_output):
>>> return grad_output
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone()
>>> b = a * a
>>> Inplace.apply(a) # This would lead to wrong gradients!
>>> # but the engine would not know unless we mark_dirty
>>> # xdoctest: +SKIP
>>> b.backward() # RuntimeError: one of the variables needed for gradient
>>> # computation has been modified by an inplace operation
"""
self.dirty_tensors = args
@deprecated(
"`mark_shared_storage` is deprecated. "
"Tensors with shared storages are automatically tracked. "
"Note that calls to `set_()` are not tracked",
category=FutureWarning,
)
def mark_shared_storage(self, *pairs):
pass
def mark_non_differentiable(self, *args: torch.Tensor):
r"""Mark outputs as non-differentiable.
This should be called at most once, in either the :func:`setup_context`
or :func:`forward` methods, and all arguments should be tensor outputs.
This will mark outputs as not requiring gradients, increasing the
efficiency of backward computation. You still need to accept a gradient
for each output in :meth:`~Function.backward`, but it's always going to
be a zero tensor with the same shape as the shape of a corresponding
output.
This is used e.g. for indices returned from a sort. See example::
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> sorted, idx = x.sort()
>>> ctx.mark_non_differentiable(idx)
>>> ctx.save_for_backward(x, idx)
>>> return sorted, idx
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2): # still need to accept g2
>>> x, idx = ctx.saved_tensors
>>> grad_input = torch.zeros_like(x)
>>> grad_input.index_add_(0, idx, g1)
>>> return grad_input
"""
self.non_differentiable = args
def set_materialize_grads(self, value: bool):
r"""Set whether to materialize grad tensors. Default is ``True``.
This should be called only from either the :func:`setup_context` or
:func:`forward` methods.
If ``True``, undefined grad tensors will be expanded to tensors full of zeros
prior to calling the :func:`backward` and :func:`jvp` methods.
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
>>> class SimpleFunc(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> return x.clone(), x.clone()
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2):
>>> return g1 + g2 # No check for None necessary
>>>
>>> # We modify SimpleFunc to handle non-materialized grad outputs
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> ctx.set_materialize_grads(False)
>>> ctx.save_for_backward(x)
>>> return x.clone(), x.clone()
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2):
>>> x, = ctx.saved_tensors
>>> grad_input = torch.zeros_like(x)
>>> if g1 is not None: # We must check for None now
>>> grad_input += g1
>>> if g2 is not None:
>>> grad_input += g2
>>> return grad_input
>>>
>>> a = torch.tensor(1., requires_grad=True)
>>> b, _ = Func.apply(a) # induces g2 to be undefined
"""
self.materialize_grads = value
# DO NOT USE: This is only defined to be able to load old serialized models
_ContextMethodMixin = FunctionCtx
| FunctionCtx |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_single.py | {
"start": 82498,
"end": 83527
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
def test_discrim_on_column_prop(self, registry):
Base = registry.generate_base()
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "employee",
}
class Engineer(Employee):
__mapper_args__ = {"polymorphic_identity": "engineer"}
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
max_engineer_id = column_property(
select(func.max(Engineer.id)).scalar_subquery()
)
self.assert_compile(
select(Company.max_engineer_id),
"SELECT (SELECT max(employee.id) AS max_1 FROM employee "
"WHERE employee.type IN (__[POSTCOMPILE_type_1])) AS anon_1",
)
| ColExprTest |
python | encode__django-rest-framework | tests/test_write_only_fields.py | {
"start": 75,
"end": 909
} | class ____(TestCase):
def setUp(self):
class ExampleSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField(write_only=True)
self.Serializer = ExampleSerializer
def test_write_only_fields_are_present_on_input(self):
data = {
'email': 'foo@example.com',
'password': '123'
}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == data
def test_write_only_fields_are_not_present_on_output(self):
instance = {
'email': 'foo@example.com',
'password': '123'
}
serializer = self.Serializer(instance)
assert serializer.data == {'email': 'foo@example.com'}
| WriteOnlyFieldTests |
python | numpy__numpy | numpy/ma/tests/test_extras.py | {
"start": 58815,
"end": 68788
} | class ____:
def test_unique_onlist(self):
# Test unique on list
data = [1, 1, 1, 2, 2, 3]
test = unique(data, return_index=True, return_inverse=True)
assert_(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
def test_unique_onmaskedarray(self):
# Test unique on masked data w/use_mask=True
data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
#
data.fill_value = 3
data = masked_array(data=[1, 1, 1, 2, 2, 3],
mask=[0, 0, 1, 0, 1, 0], fill_value=3)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
def test_unique_allmasked(self):
# Test all masked
data = masked_array([1, 1, 1], mask=True)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, ], mask=[True]))
assert_equal(test[1], [0])
assert_equal(test[2], [0, 0, 0])
#
# Test masked
data = masked
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array(masked))
assert_equal(test[1], [0])
assert_equal(test[2], [0])
def test_ediff1d(self):
# Tests mediff1d
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
test = ediff1d(x)
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin(self):
# Test ediff1d w/ to_begin
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_begin=masked)
control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_begin=[1, 2, 3])
control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_toend(self):
# Test ediff1d w/ to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked)
control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3])
control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin_toend(self):
# Test ediff1d w/ to_begin and to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
control = array([0, 1, 1, 1, 4, 1, 2, 3],
mask=[1, 1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_ndarray(self):
# Test ediff1d w/ a ndarray
x = np.arange(5)
test = ediff1d(x)
control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
assert_equal(test, control)
assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_intersect1d(self):
# Test intersect1d
x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
test = intersect1d(x, y)
control = array([1, 3, -1], mask=[0, 0, 1])
assert_equal(test, control)
def test_setxor1d(self):
# Test setxor1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7]))
#
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = [1, 2, 3, 4, 5]
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
#
a = array([1, 2, 3])
b = array([6, 5, 4])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
assert_array_equal([], setxor1d([], []))
def test_setxor1d_unique(self):
# Test setxor1d with assume_unique=True
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = [1, 2, 3, 4, 5]
test = setxor1d(a, b, assume_unique=True)
assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
#
a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
test = setxor1d(a, b, assume_unique=True)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
a = array([[1], [8], [2], [3]])
b = array([[6, 5], [4, 8]])
test = setxor1d(a, b, assume_unique=True)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
def test_isin(self):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
# isin instead.
a = np.arange(24).reshape([2, 3, 4])
mask = np.zeros([2, 3, 4])
mask[1, 2, 0] = 1
a = array(a, mask=mask)
b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33],
mask=[0, 1, 0, 1, 0, 1, 0, 1, 0])
ec = zeros((2, 3, 4), dtype=bool)
ec[0, 0, 0] = True
ec[0, 0, 1] = True
ec[0, 2, 3] = True
c = isin(a, b)
assert_(isinstance(c, MaskedArray))
assert_array_equal(c, ec)
# compare results of np.isin to ma.isin
d = np.isin(a, b[~b.mask]) & ~a.mask
assert_array_equal(c, d)
def test_in1d(self):
# Test in1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, True, False, True])
#
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, False, True, True])
#
assert_array_equal([], in1d([], []))
def test_in1d_invert(self):
# Test in1d's invert parameter
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
assert_array_equal([], in1d([], [], invert=True))
def test_union1d(self):
# Test union1d
a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = union1d(a, b)
control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
assert_equal(test, control)
# Tests gh-10340, arguments to union1d should be
# flattened if they are not already 1D
x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]])
y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1])
ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1])
z = union1d(x, y)
assert_equal(z, ez)
#
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
# Test setdiff1d
a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
b = array([2, 4, 3, 3, 2, 1, 5])
test = setdiff1d(a, b)
assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
#
a = arange(10)
b = arange(8)
assert_equal(setdiff1d(a, b), array([8, 9]))
a = array([], np.uint32, mask=[])
assert_equal(setdiff1d(a, []).dtype, np.uint32)
def test_setdiff1d_char_array(self):
# Test setdiff1d_charray
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
assert_array_equal(setdiff1d(a, b), np.array(['c']))
| TestArraySetOps |
python | coleifer__peewee | tests/regressions.py | {
"start": 54792,
"end": 54856
} | class ____(TestModel):
a = TextField()
b = TextField()
| CQA |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/synthesis.py | {
"start": 497,
"end": 856
} | class ____(BaseEvent):
"""
SynthesizeEndEvent.
Args:
query (QueryType): Query as a string or query bundle.
response (RESPONSE_TYPE): Response.
"""
query: QueryType
response: RESPONSE_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SynthesizeEndEvent"
| SynthesizeEndEvent |
python | walkccc__LeetCode | solutions/2431. Maximize Total Tastiness of Purchased Fruits/2431.py | {
"start": 0,
"end": 1315
} | class ____:
def maxTastiness(
self,
price: list[int],
tastiness: list[int],
maxAmount: int,
maxCoupons: int,
) -> int:
n = len(price)
# dp[i][j][k] := the maximum tastiness of the first i price with j amount of
# money and k coupons
dp = [[[0] * (maxCoupons + 1)
for j in range(maxAmount + 1)]
for i in range(n + 1)]
for i in range(1, n + 1):
# 1-indexed
currPrice = price[i - 1]
currTastiness = tastiness[i - 1]
for amount in range(maxAmount + 1):
for coupon in range(maxCoupons + 1):
# 1. Don't buy, the tastiness will be the same as the first i - 1
# price.
dp[i][amount][coupon] = dp[i - 1][amount][coupon]
# 2. Buy without coupon if have enough money.
if amount >= currPrice:
dp[i][amount][coupon] = max(
dp[i][amount][coupon],
dp[i - 1][amount - currPrice][coupon] + currTastiness)
# 3. Buy with coupon if have coupon and enough money.
if coupon > 0 and amount >= currPrice // 2:
dp[i][amount][coupon] = max(
dp[i][amount][coupon],
dp[i - 1][amount - currPrice // 2][coupon - 1] + currTastiness)
return dp[n][maxAmount][maxCoupons]
| Solution |
python | spack__spack | lib/spack/spack/hash_types.py | {
"start": 321,
"end": 2425
} | class ____:
"""This class defines how hashes are generated on Spec objects.
Spec hashes in Spack are generated from a serialized (e.g., with
YAML) representation of the Spec graph. The representation may only
include certain dependency types, and it may optionally include a
canonicalized hash of the package.py for each node in the graph.
We currently use different hashes for different use cases."""
__slots__ = "depflag", "package_hash", "name", "attr", "override"
def __init__(
self,
depflag: dt.DepFlag,
package_hash: bool,
name: str,
override: Optional[Callable[[Any], str]] = None,
):
self.depflag = depflag
self.package_hash = package_hash
self.name = name
self.attr = f"_{name}"
HASHES.append(self)
# Allow spec hashes to have an alternate computation method
self.override = override
def __call__(self, spec):
"""Run this hash on the provided spec."""
return spec.spec_hash(self)
def __repr__(self):
return (
f"SpecHashDescriptor(depflag={self.depflag!r}, "
f"package_hash={self.package_hash!r}, name={self.name!r}, override={self.override!r})"
)
#: The DAG hash includes all inputs that can affect how a package is built.
dag_hash = SpecHashDescriptor(
depflag=dt.BUILD | dt.LINK | dt.RUN | dt.TEST, package_hash=True, name="hash"
)
def _content_hash_override(spec):
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
return pkg.content_hash()
#: Package hash used as part of dag hash
package_hash = SpecHashDescriptor(
depflag=0, package_hash=True, name="package_hash", override=_content_hash_override
)
# Deprecated hash types, no longer used, but needed to understand old serialized
# spec formats
full_hash = SpecHashDescriptor(
depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=True, name="full_hash"
)
build_hash = SpecHashDescriptor(
depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=False, name="build_hash"
)
| SpecHashDescriptor |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 557,
"end": 769
} | class ____(IntEnum):
"""Integer enum for device type codes matching DLPack."""
CPU = 1
CUDA = 2
CPU_PINNED = 3
OPENCL = 4
VULKAN = 7
METAL = 8
VPI = 9
ROCM = 10
| DlpackDeviceType |
python | donnemartin__interactive-coding-challenges | sorting_searching/merge_sort/test_merge_sort.py | {
"start": 18,
"end": 671
} | class ____(unittest.TestCase):
def test_merge_sort(self):
merge_sort = MergeSort()
print('None input')
self.assertRaises(TypeError, merge_sort.sort, None)
print('Empty input')
self.assertEqual(merge_sort.sort([]), [])
print('One element')
self.assertEqual(merge_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
self.assertEqual(merge_sort.sort(data), sorted(data))
print('Success: test_merge_sort')
def main():
test = TestMergeSort()
test.test_merge_sort()
if __name__ == '__main__':
main()
| TestMergeSort |
python | simonw__datasette | datasette/permissions.py | {
"start": 5101,
"end": 6352
} | class ____:
"""
A plugin contributes SQL that yields:
parent TEXT NULL,
child TEXT NULL,
allow INTEGER, -- 1 allow, 0 deny
reason TEXT
For restriction-only plugins, sql can be None and only restriction_sql is provided.
"""
sql: str | None = (
None # SQL that SELECTs the 4 columns above (can be None for restriction-only)
)
params: dict[str, Any] | None = (
None # bound params for the SQL (values only; no ':' prefix)
)
source: str | None = None # System will set this to the plugin name
restriction_sql: str | None = (
None # Optional SQL that returns (parent, child) for restriction filtering
)
@classmethod
def allow(cls, reason: str, _allow: bool = True) -> "PermissionSQL":
global _reason_id
i = _reason_id
_reason_id += 1
return cls(
sql=f"SELECT NULL AS parent, NULL AS child, {1 if _allow else 0} AS allow, :reason_{i} AS reason",
params={f"reason_{i}": reason},
)
@classmethod
def deny(cls, reason: str) -> "PermissionSQL":
return cls.allow(reason=reason, _allow=False)
# This is obsolete, replaced by Action and ResourceType
@dataclass
| PermissionSQL |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-cart/source_cart/streams.py | {
"start": 3471,
"end": 5049
} | class ____(CartStream, ABC):
state_checkpoint_interval = 1000
cursor_field = "updated_at"
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""
Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md
"""
params = super().request_params(stream_state=stream_state, **kwargs)
cursor_value = stream_state.get(self.cursor_field) or self._start_date
params["sort"] = self.cursor_field
start_date = max(cursor_value, self._start_date)
query = f"gt:{start_date}"
if self._end_date and self._end_date > start_date:
query += f" AND lt:{self._end_date}"
params[self.cursor_field] = query
ord_params = ["count", "page", "sort", self.cursor_field]
ordered_params = {k: params[k] for k in ord_params if k in params}
return ordered_params
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
"""
latest_state = latest_record.get(self.cursor_field)
current_state = current_stream_state.get(self.cursor_field) or latest_state
if current_state:
return {self.cursor_field: max(latest_state, current_state)}
return {}
| IncrementalCartStream |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/utils/test_exponential_backoff_retry.py | {
"start": 1057,
"end": 10644
} | class ____:
def test_exponential_backoff_retry_base_case(self, time_machine):
time_machine.move_to(datetime(2023, 1, 1, 12, 0, 5))
mock_callable_function = mock.Mock()
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=0,
callable_function=mock_callable_function,
)
mock_callable_function.assert_called_once()
@pytest.mark.parametrize(
("attempt_number", "utcnow_value", "expected_calls"),
[
(
0,
datetime(2023, 1, 1, 12, 0, 2),
1,
),
(
1,
datetime(2023, 1, 1, 12, 0, 3),
0,
), # delay is 4 seconds; no call made
(
1,
datetime(2023, 1, 1, 12, 0, 4),
1,
),
(
2,
datetime(2023, 1, 1, 12, 0, 15),
0,
), # delay is 16 seconds; no call made
(
2,
datetime(2023, 1, 1, 12, 0, 16),
1,
),
(
3,
datetime(2023, 1, 1, 12, 1, 3),
0,
), # delay is 64 seconds; no call made
(
3,
datetime(2023, 1, 1, 12, 1, 4),
1,
),
(
4,
datetime(2023, 1, 1, 12, 1, 59),
0,
), # delay is 120 seconds; no call made
(
4,
datetime(2023, 1, 1, 12, 2, 0),
1,
),
(
5,
datetime(2023, 1, 1, 12, 1, 59),
0,
), # delay is 120 seconds; no call made
(
5,
datetime(2023, 1, 1, 12, 2, 0),
1,
),
(
99,
datetime(2023, 1, 1, 12, 1, 59),
0,
), # delay is 120 seconds; no call made
(
99,
datetime(2023, 1, 1, 12, 2, 0),
1,
),
],
)
def test_exponential_backoff_retry_parameterized(
self, attempt_number, utcnow_value, expected_calls, time_machine
):
time_machine.move_to(utcnow_value)
mock_callable_function = mock.Mock()
mock_callable_function.__name__ = "test_callable_function"
mock_callable_function.side_effect = Exception()
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=attempt_number,
callable_function=mock_callable_function,
)
assert mock_callable_function.call_count == expected_calls
def test_exponential_backoff_retry_fail_success(self, time_machine, caplog):
mock_callable_function = mock.Mock()
mock_callable_function.__name__ = "test_callable_function"
mock_callable_function.side_effect = [Exception(), True]
time_machine.move_to(datetime(2023, 1, 1, 12, 0, 2))
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=0,
callable_function=mock_callable_function,
)
mock_callable_function.assert_called_once()
assert any("Error calling" in log for log in caplog.messages)
caplog.clear() # clear messages so that we have clean logs for the next call
time_machine.move_to(datetime(2023, 1, 1, 12, 0, 6))
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=1,
callable_function=mock_callable_function,
)
assert all("Error calling" not in log for log in caplog.messages)
def test_exponential_backoff_retry_max_delay(self, time_machine):
mock_callable_function = mock.Mock()
mock_callable_function.__name__ = "test_callable_function"
mock_callable_function.return_value = Exception()
time_machine.move_to(datetime(2023, 1, 1, 12, 4, 15))
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=4,
callable_function=mock_callable_function,
max_delay=60 * 5,
)
mock_callable_function.assert_not_called() # delay is 256 seconds; no calls made
time_machine.move_to(datetime(2023, 1, 1, 12, 4, 16))
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=4,
callable_function=mock_callable_function,
max_delay=60 * 5,
)
mock_callable_function.assert_called_once()
time_machine.move_to(datetime(2023, 1, 1, 12, 5, 0))
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=5,
callable_function=mock_callable_function,
max_delay=60 * 5,
)
# delay should be 4^5=1024 seconds, but max_delay is 60*5=300 seconds
assert mock_callable_function.call_count == 2
def test_exponential_backoff_retry_max_attempts(self, time_machine, caplog):
mock_callable_function = mock.Mock()
mock_callable_function.__name__ = "test_callable_function"
mock_callable_function.return_value = Exception()
time_machine.move_to(datetime(2023, 1, 1, 12, 55, 0))
for i in range(10):
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=i,
callable_function=mock_callable_function,
max_attempts=3,
)
assert any("Max attempts reached." in log for log in caplog.messages)
assert mock_callable_function.call_count == 3
@pytest.mark.parametrize(
("attempt_number", "utcnow_value", "expected_calls"),
[
(
0,
datetime(2023, 1, 1, 12, 0, 2),
1,
),
(
1,
datetime(2023, 1, 1, 12, 0, 2),
0,
), # delay is 3 seconds; no call made
(
1,
datetime(2023, 1, 1, 12, 0, 3),
1,
),
(
2,
datetime(2023, 1, 1, 12, 0, 8),
0,
), # delay is 9 seconds; no call made
(
2,
datetime(2023, 1, 1, 12, 0, 9),
1,
),
(
3,
datetime(2023, 1, 1, 12, 0, 26),
0,
), # delay is 27 seconds; no call made
(
3,
datetime(2023, 1, 1, 12, 0, 27),
1,
),
(
4,
datetime(2023, 1, 1, 12, 1, 20),
0,
), # delay is 81 seconds; no call made
(
4,
datetime(2023, 1, 1, 12, 1, 21),
1,
),
(
5,
datetime(2023, 1, 1, 12, 1, 59),
0,
), # delay is 120 seconds; no call made
(
5,
datetime(2023, 1, 1, 12, 2, 0),
1,
),
(
99,
datetime(2023, 1, 1, 12, 1, 59),
0,
), # delay is 120 seconds; no call made
(
99,
datetime(2023, 1, 1, 12, 2, 0),
1,
),
],
)
def test_exponential_backoff_retry_exponent_base_parameterized(
self, time_machine, attempt_number, utcnow_value, expected_calls
):
mock_callable_function = mock.Mock()
mock_callable_function.__name__ = "test_callable_function"
mock_callable_function.side_effect = Exception()
time_machine.move_to(utcnow_value)
exponential_backoff_retry(
last_attempt_time=datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
attempts_since_last_successful=attempt_number,
callable_function=mock_callable_function,
exponent_base=3,
)
assert mock_callable_function.call_count == expected_calls
def test_calculate_next_attempt_delay(self):
exponent_base: int = 4
num_loops: int = 3
# Setting max_delay this way means there will be three loops will run to test:
# one will return a value under max_delay, one equal to max_delay, and one over.
max_delay: int = exponent_base**num_loops - 1
for attempt_number in range(1, num_loops):
returned_delay = calculate_next_attempt_delay(attempt_number, max_delay, exponent_base).seconds
if (expected_delay := exponent_base**attempt_number) <= max_delay:
assert returned_delay == expected_delay
else:
assert returned_delay == max_delay
| TestExponentialBackoffRetry |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 29659,
"end": 34145
} | class ____(nn.Module):
"""
Vision Rotary Position Embedding for SAM2, following transformers library standards.
Supports 2D (axial) rotary embeddings for spatial dimensions.
"""
def __init__(self, config: Sam2VideoConfig):
super().__init__()
dim = config.memory_attention_hidden_size // (
config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads
)
# Ensure even dimension for proper axial splitting
if dim % 4 != 0:
raise ValueError("Dimension must be divisible by 4 for axial RoPE")
end_x, end_y = config.memory_attention_rope_feat_sizes
freqs = 1.0 / (config.memory_attention_rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
# Generate 2D position indices for axial rotary embedding
flattened_indices = torch.arange(end_x * end_y, dtype=torch.long)
x_positions = flattened_indices % end_x
y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor")
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
# directly register the cos and sin embeddings as we have a fixed feature shape
self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False)
self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False)
@torch.no_grad()
def forward(self) -> tuple[torch.Tensor, torch.Tensor]:
# As the feature map size is fixed, we can just return the pre-computed embeddings.
return self.rope_embeddings_cos, self.rope_embeddings_sin
def rotate_pairwise(x):
"""
pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation.
This is an optimized version of the following more explicit implementation:
```python
x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device)
x_rotated[..., ::2] = -x[..., 1::2]
x_rotated[..., 1::2] = x[..., ::2]
return x_rotated
```
"""
x = x.view(*x.shape[:-1], -1, 2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(start_dim=-2)
# TODO: This leads to ~1e-07 max diff and ~1e-09 avg diff for q_embed and k_embed from the original implementation, most likely due to the use of complex tensors in the original implementation.
def apply_rotary_pos_emb_2d(
q: torch.Tensor,
k: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
num_k_exclude_rope: int = 0,
repeat_freqs_k: bool = False,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary position embedding to query and key tensors for vision models.
Follows the standard transformers library pattern.
Args:
q: Query tensor of shape (..., seq_len, head_dim)
k: Key tensor of shape (..., seq_len, head_dim)
cos: Cosine position embedding of shape (seq_len, head_dim)
sin: Sine position embedding of shape (seq_len, head_dim)
repeat_freqs_k: Whether to repeat frequencies for keys (for cross-attention)
Returns:
Rotated (q, k) tensors
"""
k_rot, k_pass = k[..., : k.shape[-2] - num_k_exclude_rope, :], k[..., k.shape[-2] - num_k_exclude_rope :, :]
q_embed = q.float() # force upscale to float32 as in the original implementation
q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin)
if k_rot.shape[-2] == 0:
# Handle case where keys might be empty due to dropout
return q_embed.type_as(q), torch.cat([k_rot, k_pass], dim=-2)
# Handle key tensor - may need to repeat frequencies if different sequence length
if repeat_freqs_k and k_rot.shape[-2] != q.shape[-2]:
# Repeat cos/sin to match key sequence length
repeat_factor = k_rot.shape[-2] // q.shape[-2]
cos_k = cos.repeat(1, 1, repeat_factor, 1)
sin_k = sin.repeat(1, 1, repeat_factor, 1)
else:
cos_k = cos
sin_k = sin
# Apply rotary embedding to keys
k_embed = k_rot.float() # force upscale to float32 as in the original implementation
k_embed = (k_embed * cos_k) + (rotate_pairwise(k_embed) * sin_k)
# Concatenate back to full shape
k_embed = torch.cat([k_embed.type_as(k), k_pass], dim=-2)
return q_embed.type_as(q), k_embed
| Sam2VideoVisionRotaryEmbedding |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 20791,
"end": 20970
} | class ____:
"""LSP completion text interpretations."""
PLAIN_TEXT = 1
SNIPPET = 2
# ----------------- SAVING REQUEST RELATED VALUES -------------------
| InsertTextFormat |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_datasync.py | {
"start": 27032,
"end": 35121
} | class ____(DataSyncTestCaseBase):
def set_up_operator(
self, task_id="test_datasync_task_operator", task_arn="self", wait_for_completion=True
):
if task_arn == "self":
task_arn = self.task_arn
# Create operator
self.datasync = DataSyncOperator(
task_id=task_id,
dag=self.dag,
wait_interval_seconds=0,
wait_for_completion=wait_for_completion,
task_arn=task_arn,
)
def test_init(self, mock_get_conn):
self.set_up_operator()
# Airflow built-ins
assert self.datasync.task_id == MOCK_DATA["task_id"]
# Defaults
assert self.datasync.aws_conn_id == "aws_default"
assert self.datasync.wait_interval_seconds == 0
# Assignments
assert self.datasync.task_arn == self.task_arn
# ### Check mocks:
mock_get_conn.assert_not_called()
def test_init_fails(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
with pytest.raises(AirflowException):
self.set_up_operator(task_arn=None)
# ### Check mocks:
mock_get_conn.assert_not_called()
def test_task_extra_links(self, mock_get_conn):
mock_get_conn.return_value = self.client
self.set_up_operator()
region = "us-east-1"
aws_domain = DataSyncTaskLink.get_aws_domain("aws")
task_id = self.task_arn.split("/")[-1]
base_url = f"https://console.{aws_domain}/datasync/home?region={region}#"
task_url = f"{base_url}/tasks/{task_id}"
with mock.patch.object(self.datasync.log, "info") as mock_logging:
result = self.datasync.execute(None)
task_execution_arn = result["TaskExecutionArn"]
execution_id = task_execution_arn.split("/")[-1]
execution_url = f"{base_url}/history/{task_id}/{execution_id}"
assert self.datasync.task_arn == self.task_arn
mock_logging.assert_any_call("You can view this DataSync task at %s", task_url)
mock_logging.assert_any_call("You can view this DataSync task execution at %s", execution_url)
def test_execute_task(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Configure the Operator with the specific task_arn
self.set_up_operator()
assert self.datasync.task_arn == self.task_arn
# Check how many tasks and locations we have
tasks = self.client.list_tasks()
len_tasks_before = len(tasks["Tasks"])
locations = self.client.list_locations()
len_locations_before = len(locations["Locations"])
# Execute the task
result = self.datasync.execute(None)
assert result is not None
task_execution_arn = result["TaskExecutionArn"]
assert task_execution_arn is not None
# Assert 0 additional task and 0 additional locations
tasks = self.client.list_tasks()
assert len(tasks["Tasks"]) == len_tasks_before
locations = self.client.list_locations()
assert len(locations["Locations"]) == len_locations_before
# Check with the DataSync client what happened
task_execution = self.client.describe_task_execution(TaskExecutionArn=task_execution_arn)
assert task_execution["Status"] == "SUCCESS"
# Insist that this specific task was executed, not anything else
task_execution_arn = task_execution["TaskExecutionArn"]
# format of task_execution_arn:
# arn:aws:datasync:us-east-1:111222333444:task/task-00000000000000003/execution/exec-00000000000000004
# format of task_arn:
# arn:aws:datasync:us-east-1:111222333444:task/task-00000000000000003
assert "/".join(task_execution_arn.split("/")[:2]) == self.task_arn
# ### Check mocks:
mock_get_conn.assert_called()
@mock.patch.object(DataSyncHook, "wait_for_task_execution")
def test_execute_task_without_wait_for_completion(self, mock_wait, mock_get_conn):
self.set_up_operator(wait_for_completion=False)
# Execute the task
result = self.datasync.execute(None)
assert result is not None
task_execution_arn = result["TaskExecutionArn"]
assert task_execution_arn is not None
mock_wait.assert_not_called()
@mock.patch.object(DataSyncHook, "wait_for_task_execution")
def test_failed_task(self, mock_wait, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
mock_wait.return_value = False
# ### Begin tests:
self.set_up_operator()
# Execute the task
with pytest.raises(AirflowException):
self.datasync.execute(None)
# ### Check mocks:
mock_get_conn.assert_called()
@mock.patch.object(DataSyncHook, "wait_for_task_execution")
def test_killed_task(self, mock_wait, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Kill the task when doing wait_for_task_execution
def kill_task(*args, **kwargs):
self.datasync.on_kill()
return True
mock_wait.side_effect = kill_task
self.set_up_operator()
# Execute the task
result = self.datasync.execute(None)
assert result is not None
task_execution_arn = result["TaskExecutionArn"]
assert task_execution_arn is not None
# Verify the task was killed
task = self.client.describe_task(TaskArn=self.task_arn)
assert task["Status"] == "AVAILABLE"
task_execution = self.client.describe_task_execution(TaskExecutionArn=task_execution_arn)
assert task_execution["Status"] == "ERROR"
# ### Check mocks:
mock_get_conn.assert_called()
def test_execute_specific_task(self, mock_get_conn):
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_arn = self.client.create_task(
SourceLocationArn=self.source_location_arn,
DestinationLocationArn=self.destination_location_arn,
)["TaskArn"]
self.set_up_operator(task_arn=task_arn)
result = self.datasync.execute(None)
assert result["TaskArn"] == task_arn
assert self.datasync.task_arn == task_arn
# ### Check mocks:
mock_get_conn.assert_called()
@pytest.mark.db_test
def test_return_value(
self, mock_get_conn, session, clean_dags_dagruns_and_dagbundles, testing_dag_bundle
):
"""Test we return the right value -- that will get put in to XCom by the execution engine"""
# ### Set up mocks:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.set_up_operator()
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
sync_dag_to_db(self.dag)
dag_version = DagVersion.get_latest_version(self.dag.dag_id)
ti = TaskInstance(task=self.datasync, dag_version_id=dag_version.id)
dag_run = DagRun(
dag_id=self.dag.dag_id,
logical_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
else:
dag_run = DagRun(
dag_id=self.dag.dag_id,
execution_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
ti = TaskInstance(task=self.datasync)
ti.dag_run = dag_run
session.add(ti)
session.commit()
assert self.datasync.execute(ti.get_template_context()) is not None
# ### Check mocks:
mock_get_conn.assert_called()
@mock_aws
@mock.patch.object(DataSyncHook, "get_conn")
| TestDataSyncOperator |
python | kamyu104__LeetCode-Solutions | Python/convert-binary-search-tree-to-sorted-doubly-linked-list.py | {
"start": 29,
"end": 168
} | class ____(object):
def __init__(self, val, left, right):
self.val = val
self.left = left
self.right = right
| Node |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0036_change_mkdocs_name.py | {
"start": 149,
"end": 998
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0035_backport_indexes"),
]
operations = [
migrations.AlterField(
model_name="version",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
("mkdocs_html", "Mkdocs Html Pages"),
],
default="sphinx",
help_text="Type of documentation the version was built with.",
max_length=20,
verbose_name="Documentation type",
),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/profiling_test.py | {
"start": 972,
"end": 3777
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
node_1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
self.profile_datum_1 = profiling.ProfileDatum(
"cpu:0", node_1, "/foo/bar.py", 10, "func1", "Add")
node_2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
op_start_rel_micros=13,
op_end_rel_micros=16,
all_end_rel_micros=17)
self.profile_datum_2 = profiling.ProfileDatum(
"cpu:0", node_2, "/foo/bar.py", 11, "func1", "Mul")
node_3 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=103,
op_end_rel_micros=105,
all_end_rel_micros=4)
self.profile_datum_3 = profiling.ProfileDatum(
"cpu:0", node_3, "/foo/bar.py", 12, "func1", "Add")
node_4 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=203,
op_end_rel_micros=205,
all_end_rel_micros=4)
self.profile_datum_4 = profiling.ProfileDatum(
"gpu:0", node_4, "/foo/bar.py", 13, "func1", "Add")
def testAggregateProfileConstructorWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
self.assertEqual(2, aggregate_data.total_op_time)
self.assertEqual(4, aggregate_data.total_exec_time)
self.assertEqual(1, aggregate_data.node_count)
self.assertEqual(1, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
self.assertEqual(5, aggregate_data.total_op_time)
self.assertEqual(21, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
aggregate_data.add(self.profile_datum_3)
self.assertEqual(7, aggregate_data.total_op_time)
self.assertEqual(25, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(3, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentDeviceSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_4)
self.assertEqual(4, aggregate_data.total_op_time)
self.assertEqual(8, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
if __name__ == "__main__":
googletest.main()
| AggregateProfile |
python | ansible__ansible | test/integration/targets/assert/lookup_plugins/yield_terms.py | {
"start": 84,
"end": 221
} | class ____(LookupBase):
accept_args_markers = True
def run(self, terms, variables=None, **kwargs):
return terms
| LookupModule |
python | jd__tenacity | tenacity/asyncio/retry.py | {
"start": 1828,
"end": 2611
} | class ____(async_retry_base):
"""Retry strategy that retries if an exception verifies a predicate."""
def __init__(
self, predicate: typing.Callable[[BaseException], typing.Awaitable[bool]]
) -> None:
self.predicate = predicate
async def __call__(self, retry_state: "RetryCallState") -> bool: # type: ignore[override]
if retry_state.outcome is None:
raise RuntimeError("__call__() called before outcome was set")
if retry_state.outcome.failed:
exception = retry_state.outcome.exception()
if exception is None:
raise RuntimeError("outcome failed but the exception is None")
return await self.predicate(exception)
else:
return False
| retry_if_exception |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 32665,
"end": 34381
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self) -> None:
self.darray.plot.hist() # type: ignore[call-arg]
def test_xlabel_uses_name(self) -> None:
self.darray.name = "testpoints"
self.darray.attrs["units"] = "testunits"
self.darray.plot.hist() # type: ignore[call-arg]
assert "testpoints [testunits]" == plt.gca().get_xlabel()
def test_title_is_histogram(self) -> None:
self.darray.coords["d"] = 10
self.darray.plot.hist() # type: ignore[call-arg]
assert "d = 10" == plt.gca().get_title()
def test_can_pass_in_kwargs(self) -> None:
nbins = 5
self.darray.plot.hist(bins=nbins) # type: ignore[call-arg]
assert nbins == len(plt.gca().patches)
def test_can_pass_in_axis(self) -> None:
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self) -> None:
n, bins, patches = self.darray.plot.hist() # type: ignore[call-arg]
assert isinstance(n, np.ndarray)
assert isinstance(bins, np.ndarray)
assert isinstance(patches, mpl.container.BarContainer)
assert isinstance(patches[0], mpl.patches.Rectangle)
@pytest.mark.slow
def test_plot_nans(self) -> None:
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist() # type: ignore[call-arg]
def test_hist_coord_with_interval(self) -> None:
(
self.darray.groupby_bins("dim_0", [-1, 0, 1, 2]) # type: ignore[call-arg]
.mean(...)
.plot.hist(range=(-1, 2))
)
@requires_matplotlib
| TestPlotHistogram |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 6068,
"end": 6300
} | class ____(BaseModel):
"""A chunk of a chat completion stream response."""
id: str | None = None
object: str = "chat.completion.chunk"
created: int
model: str
choices: list[ChatChunkChoice]
| ChatCompletionChunk |
python | getsentry__sentry | src/sentry/utils/concurrent.py | {
"start": 928,
"end": 1200
} | class ____[T](NamedTuple):
priority: int
item: tuple[sentry_sdk.Scope, sentry_sdk.Scope, Callable[[], T], Future[T]]
def __eq__(self, b):
return self.priority == b.priority
def __lt__(self, b):
return self.priority < b.priority
| PriorityTask |
python | rapidsai__cudf | python/cudf/cudf/tests/general_functions/test_register_accessor.py | {
"start": 308,
"end": 1597
} | class ____:
def __init__(self, obj):
self._validate(obj)
self._obj = obj
@staticmethod
def _validate(obj):
cols = obj.columns
if not all(vertex in cols for vertex in ["x", "y"]):
raise AttributeError("Must have vertices 'x', 'y'.")
@property
def bounding_box(self):
xs, ys = self._obj["x"], self._obj["y"]
min_x, min_y, max_x, max_y = xs.min(), ys.min(), xs.max(), ys.max()
return (min_x, min_y, max_x, max_y)
def test_dataframe_accessor():
gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
pdf = gdf.to_pandas()
assert_eq(gdf.point.bounding_box, pdf.point.bounding_box)
def test_dataframe_accessor_identity():
"""Test for accessor identities
- An object should hold persistent reference to the same accessor
- Different objects should hold difference instances of the accessor
"""
gdf1 = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
gdf2 = gdf1.copy()
assert gdf1.point is gdf1.point
assert gdf1.point is not gdf2.point
@pd.api.extensions.register_index_accessor("odd")
@pd.api.extensions.register_series_accessor("odd")
@cudf.api.extensions.register_index_accessor("odd")
@cudf.api.extensions.register_series_accessor("odd")
| PointsAccessor |
python | django__django | tests/queries/models.py | {
"start": 10634,
"end": 10749
} | class ____(models.Model):
CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE)
| MixedCaseFieldCategoryItem |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 19871,
"end": 21600
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = HubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = HubertFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if getattr(config, "adapter_attn_dim", None) is not None:
self.adapter_layer = HubertAttnAdapterLayer(config)
else:
self.adapter_layer = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
if self.adapter_layer is not None:
hidden_states = hidden_states + self.adapter_layer(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| HubertEncoderLayerStableLayerNorm |
python | pandas-dev__pandas | pandas/core/arrays/base.py | {
"start": 90118,
"end": 93140
} | class ____:
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_arithmetic_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_arithmetic_ops(cls) -> None:
setattr(cls, "__add__", cls._create_arithmetic_method(operator.add))
setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd))
setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub))
setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub))
setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul))
setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul))
setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow))
setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow))
setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod))
setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod))
setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv))
setattr(
cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv)
)
setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv))
setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv))
setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod))
setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod))
@classmethod
def _create_comparison_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_comparison_ops(cls) -> None:
setattr(cls, "__eq__", cls._create_comparison_method(operator.eq))
setattr(cls, "__ne__", cls._create_comparison_method(operator.ne))
setattr(cls, "__lt__", cls._create_comparison_method(operator.lt))
setattr(cls, "__gt__", cls._create_comparison_method(operator.gt))
setattr(cls, "__le__", cls._create_comparison_method(operator.le))
setattr(cls, "__ge__", cls._create_comparison_method(operator.ge))
@classmethod
def _create_logical_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_logical_ops(cls) -> None:
setattr(cls, "__and__", cls._create_logical_method(operator.and_))
setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_))
setattr(cls, "__or__", cls._create_logical_method(operator.or_))
setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_))
setattr(cls, "__xor__", cls._create_logical_method(operator.xor))
setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor))
@set_module("pandas.api.extensions")
| ExtensionOpsMixin |
python | tensorflow__tensorflow | tensorflow/python/ops/clustering_ops_test.py | {
"start": 3375,
"end": 3871
} | class ____(test.TestCase):
def setUp(self):
self._distances = np.zeros(10)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_point = clustering_ops.kmc2_chain_initialization(
self._distances, seed)
self.assertAllEqual(sampled_point, 0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
@test_util.run_all_in_graph_and_eager_modes
# A simple test that can be verified by hand.
| KMC2InitializationCornercaseTest |
python | pallets__flask | src/flask/sansio/blueprints.py | {
"start": 4397,
"end": 27017
} | class ____(Scaffold):
"""Represents a blueprint, a collection of routes and other
app-related functions that can be registered on a real application
later.
A blueprint is an object that allows defining application functions
without requiring an application object ahead of time. It uses the
same decorators as :class:`~flask.Flask`, but defers the need for an
application by recording them for later registration.
Decorating a function with a blueprint creates a deferred function
that is called with :class:`~flask.blueprints.BlueprintSetupState`
when the blueprint is registered on an application.
See :doc:`/blueprints` for more information.
:param name: The name of the blueprint. Will be prepended to each
endpoint name.
:param import_name: The name of the blueprint package, usually
``__name__``. This helps locate the ``root_path`` for the
blueprint.
:param static_folder: A folder with static files that should be
served by the blueprint's static route. The path is relative to
the blueprint's root path. Blueprint static files are disabled
by default.
:param static_url_path: The url to serve static files from.
Defaults to ``static_folder``. If the blueprint does not have
a ``url_prefix``, the app's static route will take precedence,
and the blueprint's static files won't be accessible.
:param template_folder: A folder with templates that should be added
to the app's template search path. The path is relative to the
blueprint's root path. Blueprint templates are disabled by
default. Blueprint templates have a lower precedence than those
in the app's templates folder.
:param url_prefix: A path to prepend to all of the blueprint's URLs,
to make them distinct from the rest of the app's routes.
:param subdomain: A subdomain that blueprint routes will match on by
default.
:param url_defaults: A dict of default values that blueprint routes
will receive by default.
:param root_path: By default, the blueprint will automatically set
this based on ``import_name``. In certain situations this
automatic detection can fail, so the path can be specified
manually instead.
.. versionchanged:: 1.1.0
Blueprints have a ``cli`` group to register nested CLI commands.
The ``cli_group`` parameter controls the name of the group under
the ``flask`` command.
.. versionadded:: 0.7
"""
_got_registered_once = False
def __init__(
self,
name: str,
import_name: str,
static_folder: str | os.PathLike[str] | None = None,
static_url_path: str | None = None,
template_folder: str | os.PathLike[str] | None = None,
url_prefix: str | None = None,
subdomain: str | None = None,
url_defaults: dict[str, t.Any] | None = None,
root_path: str | None = None,
cli_group: str | None = _sentinel, # type: ignore[assignment]
):
super().__init__(
import_name=import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
root_path=root_path,
)
if not name:
raise ValueError("'name' may not be empty.")
if "." in name:
raise ValueError("'name' may not contain a dot '.' character.")
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.deferred_functions: list[DeferredSetupFunction] = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
self.cli_group = cli_group
self._blueprints: list[tuple[Blueprint, dict[str, t.Any]]] = []
def _check_setup_finished(self, f_name: str) -> None:
if self._got_registered_once:
raise AssertionError(
f"The setup method '{f_name}' can no longer be called on the blueprint"
f" '{self.name}'. It has already been registered at least once, any"
" changes will not be applied consistently.\n"
"Make sure all imports, decorators, functions, etc. needed to set up"
" the blueprint are done before registering it."
)
@setupmethod
def record(self, func: DeferredSetupFunction) -> None:
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
self.deferred_functions.append(func)
@setupmethod
def record_once(self, func: DeferredSetupFunction) -> None:
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state: BlueprintSetupState) -> None:
if state.first_registration:
func(state)
self.record(update_wrapper(wrapper, func))
def make_setup_state(
self, app: App, options: dict[str, t.Any], first_registration: bool = False
) -> BlueprintSetupState:
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on this blueprint. Keyword
arguments passed to this method will override the defaults set
on the blueprint.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 2.0
"""
if blueprint is self:
raise ValueError("Cannot register a blueprint on itself")
self._blueprints.append((blueprint, options))
def register(self, app: App, options: dict[str, t.Any]) -> None:
"""Called by :meth:`Flask.register_blueprint` to register all
views and callbacks registered on the blueprint with the
application. Creates a :class:`.BlueprintSetupState` and calls
each :meth:`record` callback with it.
:param app: The application this blueprint is being registered
with.
:param options: Keyword arguments forwarded from
:meth:`~Flask.register_blueprint`.
.. versionchanged:: 2.3
Nested blueprints now correctly apply subdomains.
.. versionchanged:: 2.1
Registering the same blueprint with the same name multiple
times is an error.
.. versionchanged:: 2.0.1
Nested blueprints are registered with their dotted name.
This allows different blueprints with the same name to be
nested at different locations.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
"""
name_prefix = options.get("name_prefix", "")
self_name = options.get("name", self.name)
name = f"{name_prefix}.{self_name}".lstrip(".")
if name in app.blueprints:
bp_desc = "this" if app.blueprints[name] is self else "a different"
existing_at = f" '{name}'" if self_name != name else ""
raise ValueError(
f"The name '{self_name}' is already registered for"
f" {bp_desc} blueprint{existing_at}. Use 'name=' to"
f" provide a unique name."
)
first_bp_registration = not any(bp is self for bp in app.blueprints.values())
first_name_registration = name not in app.blueprints
app.blueprints[name] = self
self._got_registered_once = True
state = self.make_setup_state(app, options, first_bp_registration)
if self.has_static_folder:
state.add_url_rule(
f"{self.static_url_path}/<path:filename>",
view_func=self.send_static_file, # type: ignore[attr-defined]
endpoint="static",
)
# Merge blueprint data into parent.
if first_bp_registration or first_name_registration:
self._merge_blueprint_funcs(app, name)
for deferred in self.deferred_functions:
deferred(state)
cli_resolved_group = options.get("cli_group", self.cli_group)
if self.cli.commands:
if cli_resolved_group is None:
app.cli.commands.update(self.cli.commands)
elif cli_resolved_group is _sentinel:
self.cli.name = name
app.cli.add_command(self.cli)
else:
self.cli.name = cli_resolved_group
app.cli.add_command(self.cli)
for blueprint, bp_options in self._blueprints:
bp_options = bp_options.copy()
bp_url_prefix = bp_options.get("url_prefix")
bp_subdomain = bp_options.get("subdomain")
if bp_subdomain is None:
bp_subdomain = blueprint.subdomain
if state.subdomain is not None and bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain + "." + state.subdomain
elif bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain
elif state.subdomain is not None:
bp_options["subdomain"] = state.subdomain
if bp_url_prefix is None:
bp_url_prefix = blueprint.url_prefix
if state.url_prefix is not None and bp_url_prefix is not None:
bp_options["url_prefix"] = (
state.url_prefix.rstrip("/") + "/" + bp_url_prefix.lstrip("/")
)
elif bp_url_prefix is not None:
bp_options["url_prefix"] = bp_url_prefix
elif state.url_prefix is not None:
bp_options["url_prefix"] = state.url_prefix
bp_options["name_prefix"] = name
blueprint.register(app, bp_options)
def _merge_blueprint_funcs(self, app: App, name: str) -> None:
def extend(
bp_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
parent_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
) -> None:
for key, values in bp_dict.items():
key = name if key is None else f"{name}.{key}"
parent_dict[key].extend(values)
for key, value in self.error_handler_spec.items():
key = name if key is None else f"{name}.{key}"
value = defaultdict(
dict,
{
code: {exc_class: func for exc_class, func in code_values.items()}
for code, code_values in value.items()
},
)
app.error_handler_spec[key] = value
for endpoint, func in self.view_functions.items():
app.view_functions[endpoint] = func
extend(self.before_request_funcs, app.before_request_funcs)
extend(self.after_request_funcs, app.after_request_funcs)
extend(
self.teardown_request_funcs,
app.teardown_request_funcs,
)
extend(self.url_default_functions, app.url_default_functions)
extend(self.url_value_preprocessors, app.url_value_preprocessors)
extend(self.template_context_processors, app.template_context_processors)
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
"""Register a URL rule with the blueprint. See :meth:`.Flask.add_url_rule` for
full documentation.
The URL rule is prefixed with the blueprint's URL prefix. The endpoint name,
used with :func:`url_for`, is prefixed with the blueprint's name.
"""
if endpoint and "." in endpoint:
raise ValueError("'endpoint' may not contain a dot '.' character.")
if view_func and hasattr(view_func, "__name__") and "." in view_func.__name__:
raise ValueError("'view_func' name may not contain a dot '.' character.")
self.record(
lambda s: s.add_url_rule(
rule,
endpoint,
view_func,
provide_automatic_options=provide_automatic_options,
**options,
)
)
@t.overload
def app_template_filter(self, name: T_template_filter) -> T_template_filter: ...
@t.overload
def app_template_filter(
self, name: str | None = None
) -> t.Callable[[T_template_filter], T_template_filter]: ...
@setupmethod
def app_template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
"""Decorate a function to register it as a custom Jinja filter. The name
is optional. The decorator may be used without parentheses.
The :meth:`add_app_template_filter` method may be used to register a
function later rather than decorating.
The filter is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.template_filter`.
:param name: The name to register the filter as. If not given, uses the
function's name.
"""
if callable(name):
self.add_app_template_filter(name)
return name
def decorator(f: T_template_filter) -> T_template_filter:
self.add_app_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja filter.
The :meth:`app_template_filter` decorator can be used to register a
function by decorating instead.
The filter is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.add_template_filter`.
:param f: The function to register.
:param name: The name to register the filter as. If not given, uses the
function's name.
"""
def register_template_filter(state: BlueprintSetupState) -> None:
state.app.add_template_filter(f, name=name)
self.record_once(register_template_filter)
@t.overload
def app_template_test(self, name: T_template_test) -> T_template_test: ...
@t.overload
def app_template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]: ...
@setupmethod
def app_template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
"""Decorate a function to register it as a custom Jinja test. The name
is optional. The decorator may be used without parentheses.
The :meth:`add_app_template_test` method may be used to register a
function later rather than decorating.
The test is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.template_test`.
:param name: The name to register the filter as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
if callable(name):
self.add_app_template_test(name)
return name
def decorator(f: T_template_test) -> T_template_test:
self.add_app_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja test.
The :meth:`app_template_test` decorator can be used to register a
function by decorating instead.
The test is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.add_template_test`.
:param f: The function to register.
:param name: The name to register the test as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
def register_template_test(state: BlueprintSetupState) -> None:
state.app.add_template_test(f, name=name)
self.record_once(register_template_test)
@t.overload
def app_template_global(self, name: T_template_global) -> T_template_global: ...
@t.overload
def app_template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]: ...
@setupmethod
def app_template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
"""Decorate a function to register it as a custom Jinja global. The name
is optional. The decorator may be used without parentheses.
The :meth:`add_app_template_global` method may be used to register a
function later rather than decorating.
The global is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.template_global`.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
if callable(name):
self.add_app_template_global(name)
return name
def decorator(f: T_template_global) -> T_template_global:
self.add_app_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja global.
The :meth:`app_template_global` decorator can be used to register a function
by decorating instead.
The global is available in all templates, not only those under this
blueprint. Equivalent to :meth:`.Flask.add_template_global`.
:param f: The function to register.
:param name: The name to register the global as. If not given, uses the
function's name.
.. versionadded:: 0.10
"""
def register_template_global(state: BlueprintSetupState) -> None:
state.app.add_template_global(f, name=name)
self.record_once(register_template_global)
@setupmethod
def before_app_request(self, f: T_before_request) -> T_before_request:
"""Like :meth:`before_request`, but before every request, not only those handled
by the blueprint. Equivalent to :meth:`.Flask.before_request`.
"""
self.record_once(
lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def after_app_request(self, f: T_after_request) -> T_after_request:
"""Like :meth:`after_request`, but after every request, not only those handled
by the blueprint. Equivalent to :meth:`.Flask.after_request`.
"""
self.record_once(
lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def teardown_app_request(self, f: T_teardown) -> T_teardown:
"""Like :meth:`teardown_request`, but after every request, not only those
handled by the blueprint. Equivalent to :meth:`.Flask.teardown_request`.
"""
self.record_once(
lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_context_processor(
self, f: T_template_context_processor
) -> T_template_context_processor:
"""Like :meth:`context_processor`, but for templates rendered by every view, not
only by the blueprint. Equivalent to :meth:`.Flask.context_processor`.
"""
self.record_once(
lambda s: s.app.template_context_processors.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_errorhandler(
self, code: type[Exception] | int
) -> t.Callable[[T_error_handler], T_error_handler]:
"""Like :meth:`errorhandler`, but for every request, not only those handled by
the blueprint. Equivalent to :meth:`.Flask.errorhandler`.
"""
def decorator(f: T_error_handler) -> T_error_handler:
def from_blueprint(state: BlueprintSetupState) -> None:
state.app.errorhandler(code)(f)
self.record_once(from_blueprint)
return f
return decorator
@setupmethod
def app_url_value_preprocessor(
self, f: T_url_value_preprocessor
) -> T_url_value_preprocessor:
"""Like :meth:`url_value_preprocessor`, but for every request, not only those
handled by the blueprint. Equivalent to :meth:`.Flask.url_value_preprocessor`.
"""
self.record_once(
lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_url_defaults(self, f: T_url_defaults) -> T_url_defaults:
"""Like :meth:`url_defaults`, but for every request, not only those handled by
the blueprint. Equivalent to :meth:`.Flask.url_defaults`.
"""
self.record_once(
lambda s: s.app.url_default_functions.setdefault(None, []).append(f)
)
return f
| Blueprint |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/units.py | {
"start": 755,
"end": 969
} | class ____(Enum):
"""
Represents family of units contains all units that are coercible between each other.
"""
DURATION = "duration"
INFORMATION = "information"
@dataclass(frozen=True)
| UnitFamily |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 9749,
"end": 10394
} | class ____(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
@pytest.mark.skipif(not NUMPY_LT_2_0, reason="np.asfarray is removed in NumPy 2.0")
def test_asfarray(self):
self.check(np.asfarray) # noqa: NPY201
farray = np.asfarray(a=self.ma) # noqa: NPY201
assert_array_equal(farray, self.ma)
if not NUMPY_LT_2_0:
def test_astype(self):
int32ma = self.ma.astype("int32")
assert_array_equal(np.astype(int32ma, "int32"), int32ma)
| TestCopyAndCreation |
python | getsentry__sentry | tests/sentry/monitors/test_utils.py | {
"start": 4172,
"end": 8111
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.monitor = self.create_monitor()
def test_deletes_data_source_and_detector(self) -> None:
ensure_cron_detector(self.monitor)
data_source = DataSource.objects.get(
type=DATA_SOURCE_CRON_MONITOR,
organization_id=self.monitor.organization_id,
source_id=str(self.monitor.id),
)
datasource_detector = DataSourceDetector.objects.get(data_source=data_source)
detector = datasource_detector.detector
data_source_id = data_source.id
detector_id = detector.id
ensure_cron_detector_deletion(self.monitor)
assert not DataSource.objects.filter(id=data_source_id).exists()
assert not Detector.objects.filter(id=detector_id).exists()
def test_does_nothing_when_no_data_source_exists(self) -> None:
initial_datasource_count = DataSource.objects.count()
initial_detector_count = Detector.objects.count()
ensure_cron_detector_deletion(self.monitor)
assert DataSource.objects.count() == initial_datasource_count
assert Detector.objects.count() == initial_detector_count
def test_deletes_only_data_source_when_no_detector_exists(self) -> None:
data_source = DataSource.objects.create(
type=DATA_SOURCE_CRON_MONITOR,
organization_id=self.monitor.organization_id,
source_id=str(self.monitor.id),
)
data_source_id = data_source.id
ensure_cron_detector_deletion(self.monitor)
assert not DataSource.objects.filter(id=data_source_id).exists()
def test_deletes_correct_detector_for_specific_monitor(self) -> None:
monitor1 = self.monitor
monitor2 = self.create_monitor(name="Monitor 2")
ensure_cron_detector(monitor1)
ensure_cron_detector(monitor2)
data_source1 = DataSource.objects.get(
type=DATA_SOURCE_CRON_MONITOR,
organization_id=monitor1.organization_id,
source_id=str(monitor1.id),
)
datasource_detector1 = DataSourceDetector.objects.get(data_source=data_source1)
detector1 = datasource_detector1.detector
data_source2 = DataSource.objects.get(
type=DATA_SOURCE_CRON_MONITOR,
organization_id=monitor2.organization_id,
source_id=str(monitor2.id),
)
datasource_detector2 = DataSourceDetector.objects.get(data_source=data_source2)
detector2 = datasource_detector2.detector
data_source1_id = data_source1.id
detector1_id = detector1.id
data_source2_id = data_source2.id
detector2_id = detector2.id
ensure_cron_detector_deletion(monitor1)
assert not DataSource.objects.filter(id=data_source1_id).exists()
assert not Detector.objects.filter(id=detector1_id).exists()
assert DataSource.objects.filter(id=data_source2_id).exists()
assert Detector.objects.filter(id=detector2_id).exists()
def test_atomic_transaction_ensures_both_deleted(self) -> None:
ensure_cron_detector(self.monitor)
data_source = DataSource.objects.get(
type=DATA_SOURCE_CRON_MONITOR,
organization_id=self.monitor.organization_id,
source_id=str(self.monitor.id),
)
datasource_detector = DataSourceDetector.objects.get(data_source=data_source)
detector = datasource_detector.detector
with patch("sentry.monitors.utils.Detector.delete") as mock_delete:
mock_delete.side_effect = Exception("Cannot delete detector")
try:
ensure_cron_detector_deletion(self.monitor)
except Exception:
pass
assert DataSource.objects.filter(id=data_source.id).exists()
assert Detector.objects.filter(id=detector.id).exists()
| EnsureCronDetectorDeletionTest |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/validators/alertrule_detector.py | {
"start": 41,
"end": 652
} | class ____(serializers.Serializer):
rule_id = serializers.CharField(required=False)
alert_rule_id = serializers.CharField(required=False)
detector_id = serializers.CharField(required=False)
def validate(self, attrs):
super().validate(attrs)
if (
not attrs.get("rule_id")
and not attrs.get("alert_rule_id")
and not attrs.get("detector_id")
):
raise serializers.ValidationError(
"One of 'rule_id', 'alert_rule_id', or 'detector_id' must be provided."
)
return attrs
| AlertRuleDetectorValidator |
python | keras-team__keras | keras/src/metrics/probabilistic_metrics_test.py | {
"start": 5335,
"end": 7372
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.CategoricalCrossentropy(
name="cce", dtype="int32", label_smoothing=0.2
)
)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
self.assertAllClose(loss, 3.667, atol=1e-3)
| CategoricalCrossentropyTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/isinstance5.py | {
"start": 492,
"end": 1094
} | class ____(Protocol):
def method1(self) -> int: ...
def func2(a: Any):
if isinstance(a, DataProtocol):
return
if isinstance(a, NonDataProtocol):
return
# This should generate an error because data protocols
# are not allowed with issubclass checks.
if issubclass(a, (DataProtocol, NonDataProtocol)):
return
# This should generate an error because data protocols
# are not allowed with issubclass checks.
if issubclass(a, (DataProtocol2, NonDataProtocol)):
return
if issubclass(a, NonDataProtocol):
return
| NonDataProtocol |
python | PyCQA__pylint | pylint/lint/pylinter.py | {
"start": 2285,
"end": 8408
} | class ____(Protocol):
def __call__(
self, filepath: str, modname: str, data: str | None = None
) -> nodes.Module: ...
def _read_stdin() -> str:
# See https://github.com/python/typeshed/pull/5623 for rationale behind assertion
assert isinstance(sys.stdin, TextIOWrapper)
sys.stdin = TextIOWrapper(sys.stdin.detach(), encoding="utf-8")
return sys.stdin.read()
def _load_reporter_by_class(reporter_class: str) -> type[BaseReporter]:
qname = reporter_class
module_part = astroid.modutils.get_module_part(qname)
module = astroid.modutils.load_module_from_name(module_part)
class_name = qname.split(".")[-1]
klass = getattr(module, class_name)
assert issubclass(klass, BaseReporter), f"{klass} is not a BaseReporter"
return klass # type: ignore[no-any-return]
# Python Linter class #########################################################
# pylint: disable-next=consider-using-namedtuple-or-dataclass
MSGS: dict[str, MessageDefinitionTuple] = {
"F0001": (
"%s",
"fatal",
"Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).",
{"scope": WarningScope.LINE},
),
"F0002": (
"%s: %s",
"astroid-error",
"Used when an unexpected error occurred while building the "
"Astroid representation. This is usually accompanied by a "
"traceback. Please report such errors !",
{"scope": WarningScope.LINE},
),
"F0010": (
"error while code parsing: %s",
"parse-error",
"Used when an exception occurred while building the Astroid "
"representation which could be handled by astroid.",
{"scope": WarningScope.LINE},
),
"F0011": (
"error while parsing the configuration: %s",
"config-parse-error",
"Used when an exception occurred while parsing a pylint configuration file.",
{"scope": WarningScope.LINE},
),
"I0001": (
"Unable to run raw checkers on built-in module %s",
"raw-checker-failed",
"Used to inform that a built-in module has not been checked "
"using the raw checkers.",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0010": (
"Unable to consider inline option %r",
"bad-inline-option",
"Used when an inline option is either badly formatted or can't "
"be used inside modules.",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0011": (
"Locally disabling %s (%s)",
"locally-disabled",
"Used when an inline option disables a message or a messages category.",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0013": (
"Ignoring entire file",
"file-ignored",
"Used to inform that the file will not be checked",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0020": (
"Suppressed %s (from line %d)",
"suppressed-message",
"A message was triggered on a line, but suppressed explicitly "
"by a disable= comment in the file. This message is not "
"generated for messages that are ignored due to configuration "
"settings.",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0021": (
"Useless suppression of %s",
"useless-suppression",
"Reported when a message is explicitly disabled for a line or "
"a block of code, but never triggered.",
{
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"I0022": (
'Pragma "%s" is deprecated, use "%s" instead',
"deprecated-pragma",
"Some inline pylint options have been renamed or reworked, "
"only the most recent form should be used. "
"NOTE:skip-all is only available with pylint >= 0.26",
{
"old_names": [("I0014", "deprecated-disable-all")],
"scope": WarningScope.LINE,
"default_enabled": False,
},
),
"E0001": (
"%s",
"syntax-error",
"Used when a syntax error is raised for a module.",
{"scope": WarningScope.LINE},
),
"E0011": (
"Unrecognized file option %r",
"unrecognized-inline-option",
"Used when an unknown inline option is encountered.",
{"scope": WarningScope.LINE},
),
"W0012": (
"Unknown option value for '%s', expected a valid pylint message and got '%s'",
"unknown-option-value",
"Used when an unknown value is encountered for an option.",
{
"scope": WarningScope.LINE,
"old_names": [("E0012", "bad-option-value")],
},
),
"R0022": (
"Useless option value for '%s', %s",
"useless-option-value",
"Used when a value for an option that is now deleted from pylint"
" is encountered.",
{
"scope": WarningScope.LINE,
"old_names": [("E0012", "bad-option-value")],
},
),
"E0013": (
"Plugin '%s' is impossible to load, is it installed ? ('%s')",
"bad-plugin-value",
"Used when a bad value is used in 'load-plugins'.",
{"scope": WarningScope.LINE},
),
"E0014": (
"Out-of-place setting encountered in top level configuration-section '%s' : '%s'",
"bad-configuration-section",
"Used when we detect a setting in the top level of a toml configuration that"
" shouldn't be there.",
{"scope": WarningScope.LINE},
),
"E0015": (
"Unrecognized option found: %s",
"unrecognized-option",
"Used when we detect an option that we do not recognize.",
{"scope": WarningScope.LINE},
),
}
# pylint: disable=too-many-instance-attributes,too-many-public-methods
| GetAstProtocol |
python | spack__spack | lib/spack/spack/test/repo.py | {
"start": 19136,
"end": 36784
} | class ____(PackageBase):
pass
"""
)
with spack.repo.use_repositories(str(repo_dir)) as repo:
assert repo.exists("1example-2-test")
pkg_cls = repo.get_pkg_class("1example-2-test")
assert pkg_cls.name == "1example-2-test"
assert pkg_cls.module.__name__ == "spack_repo.repo_2.packages._1example_2_test.package"
def test_valid_module_name_v2():
api = (2, 0)
# no hyphens
assert not valid_module_name("zlib-ng", api)
# cannot start with a number
assert not valid_module_name("7zip", api)
# no consecutive underscores
assert not valid_module_name("zlib__ng", api)
# reserved names
assert not valid_module_name("finally", api)
assert not valid_module_name("assert", api)
# cannot contain uppercase
assert not valid_module_name("False", api)
assert not valid_module_name("zlib_NG", api)
# reserved names are allowed when preceded by underscore
assert valid_module_name("_finally", api)
assert valid_module_name("_assert", api)
# digits are allowed when preceded by underscore
assert valid_module_name("_1example_2_test", api)
# underscore is not allowed unless followed by reserved name or digit
assert not valid_module_name("_zlib", api)
assert not valid_module_name("_false", api)
def test_namespace_is_optional_in_v2(tmp_path: pathlib.Path):
"""Test that a repo without a namespace is valid in v2."""
repo_yaml_dir = tmp_path / "spack_repo" / "foo" / "bar" / "baz"
(repo_yaml_dir / "packages").mkdir(parents=True)
(repo_yaml_dir / "repo.yaml").write_text(
"""\
repo:
api: v2.0
"""
)
cache = spack.util.file_cache.FileCache(tmp_path / "cache")
repo = spack.repo.Repo(str(repo_yaml_dir), cache=cache)
assert repo.namespace == "foo.bar.baz"
assert repo.full_namespace == "spack_repo.foo.bar.baz.packages"
assert repo.root == str(repo_yaml_dir)
assert repo.packages_path == str(repo_yaml_dir / "packages")
assert repo.python_path == str(tmp_path)
assert repo.package_api == (2, 0)
def test_subdir_in_v2():
"""subdir cannot be . or empty in v2, because otherwise we cannot statically distinguish
between namespace and subdir."""
with pytest.raises(spack.repo.BadRepoError, match="Use a symlink packages -> . instead"):
spack.repo._validate_and_normalize_subdir(subdir="", root="root", package_api=(2, 0))
with pytest.raises(spack.repo.BadRepoError, match="Use a symlink packages -> . instead"):
spack.repo._validate_and_normalize_subdir(subdir=".", root="root", package_api=(2, 0))
with pytest.raises(spack.repo.BadRepoError, match="Expected a directory name, not a path"):
subdir = os.path.join("a", "b")
spack.repo._validate_and_normalize_subdir(subdir=subdir, root="root", package_api=(2, 0))
with pytest.raises(spack.repo.BadRepoError, match="Must be a valid Python module name"):
spack.repo._validate_and_normalize_subdir(subdir="123", root="root", package_api=(2, 0))
def test_is_package_module():
assert spack.repo.is_package_module("spack.pkg.something.something")
assert spack.repo.is_package_module("spack_repo.foo.bar.baz.package")
assert not spack.repo.is_package_module("spack_repo.builtin.build_systems.cmake")
assert not spack.repo.is_package_module("spack.something.else")
def test_environment_activation_updates_repo_path(tmp_path: pathlib.Path):
"""Test that the environment activation updates the repo path correctly."""
repo_root, _ = spack.repo.create_repo(str(tmp_path / "foo"), namespace="bar")
(tmp_path / "spack.yaml").write_text(
"""\
spack:
repos:
bar: $env/foo/spack_repo/bar
"""
)
env = spack.environment.Environment(tmp_path)
with env:
assert any(os.path.samefile(repo_root, r.root) for r in spack.repo.PATH.repos)
assert not any(os.path.samefile(repo_root, r.root) for r in spack.repo.PATH.repos)
with env:
assert any(os.path.samefile(repo_root, r.root) for r in spack.repo.PATH.repos)
assert not any(os.path.samefile(repo_root, r.root) for r in spack.repo.PATH.repos)
def test_repo_update(tmp_path: pathlib.Path):
existing_root, _ = spack.repo.create_repo(str(tmp_path), namespace="foo")
nonexisting_root = str(tmp_path / "nonexisting")
config = {"repos": [existing_root, nonexisting_root]}
assert spack.schema.repos.update(config)
assert config["repos"] == {
"foo": existing_root
# non-existing root is removed for simplicity; would be a warning otherwise.
}
def test_mock_builtin_repo(mock_packages):
assert spack.repo.builtin_repo() is spack.repo.PATH.get_repo("builtin_mock")
def test_parse_config_descriptor_git_1(tmp_path: pathlib.Path):
descriptor = spack.repo.parse_config_descriptor(
name="name",
descriptor={
"git": str(tmp_path / "repo.git"),
"destination": str(tmp_path / "some/destination"),
},
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
assert isinstance(descriptor, spack.repo.RemoteRepoDescriptor)
assert descriptor.name == "name"
assert descriptor.repository == str(tmp_path / "repo.git")
assert descriptor.destination == str(tmp_path / "some/destination")
assert descriptor.relative_paths is None
def test_parse_config_descriptor_git_2(tmp_path: pathlib.Path):
descriptor = spack.repo.parse_config_descriptor(
name="name",
descriptor={"git": str(tmp_path / "repo.git"), "paths": ["some/path"]},
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
assert isinstance(descriptor, spack.repo.RemoteRepoDescriptor)
assert descriptor.relative_paths == ["some/path"]
def test_remote_descriptor_no_git(tmp_path: pathlib.Path):
"""Test that descriptor fails without git."""
descriptor = spack.repo.parse_config_descriptor(
name="name",
descriptor={
"git": str(tmp_path / "repo.git"),
"destination": str(tmp_path / "some/destination"),
},
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
descriptor.initialize(fetch=True, git=None)
assert isinstance(descriptor, spack.repo.RemoteRepoDescriptor)
assert descriptor.error == "Git executable not found"
def test_remote_descriptor_update_no_git(tmp_path: pathlib.Path):
"""Test that descriptor fails without git."""
descriptor = spack.repo.parse_config_descriptor(
name="name",
descriptor={
"git": str(tmp_path / "repo.git"),
"destination": str(tmp_path / "some/destination"),
},
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
assert isinstance(descriptor, spack.repo.RemoteRepoDescriptor)
with pytest.raises(spack.repo.RepoError, match="Git executable not found"):
descriptor.update(git=None)
def test_parse_config_descriptor_local(tmp_path: pathlib.Path):
descriptor = spack.repo.parse_config_descriptor(
name="name",
descriptor=str(tmp_path / "local_repo"),
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
assert isinstance(descriptor, spack.repo.LocalRepoDescriptor)
assert descriptor.name == "name"
assert descriptor.path == str(tmp_path / "local_repo")
def test_parse_config_descriptor_no_git(tmp_path: pathlib.Path):
"""Test that we can parse a descriptor without a git key."""
with pytest.raises(RuntimeError, match="Invalid configuration for repository"):
spack.repo.parse_config_descriptor(
name="name",
descriptor={"destination": str(tmp_path / "some/destination"), "paths": ["some/path"]},
lock=spack.util.lock.Lock(str(tmp_path / "x"), enable=False),
)
def test_repo_descriptors_construct(tmp_path: pathlib.Path):
"""Test the RepoDescriptors construct function. Ensure it does not raise when we cannot
construct a Repo instance, e.g. due to missing repo.yaml file. Check that it parses the
spack-repo-index.yaml file both when newly initialized and when already cloned."""
lock = spack.util.lock.Lock(str(tmp_path / "x"), enable=False)
cache = spack.util.file_cache.FileCache(str(tmp_path / "cache"))
# Construct 3 identical descriptors
descriptors_1, descriptors_2, descriptors_3 = [
{
"foo": spack.repo.RemoteRepoDescriptor(
name="foo",
repository=str(tmp_path / "foo.git"),
destination=str(tmp_path / "foo_destination"),
branch=None,
tag=None,
commit=None,
relative_paths=None,
lock=lock,
)
}
for _ in range(3)
]
repos_1 = spack.repo.RepoDescriptors(descriptors_1) # type: ignore
repos_2 = spack.repo.RepoDescriptors(descriptors_2) # type: ignore
repos_3 = spack.repo.RepoDescriptors(descriptors_3) # type: ignore
class MockGit(spack.util.executable.Executable):
def __init__(self):
pass
def __call__(self, *args, **kwargs) -> str: # type: ignore
action = args[0]
if action == "ls-remote":
return """\
a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD
165c479984b94051c982a6be1bd850f8bae02858 refs/heads/feature-branch
a8eff4da7aab59bbf5996ac1720954bf82443247 refs/heads/develop
3bd0276ab0491552247fa055921a23d2ffd9443c refs/heads/releases/v0.20"""
elif action == "rev-parse":
return "develop"
elif action == "config":
return "origin"
elif action == "init":
# The git repo needs a .git subdir
os.makedirs(os.path.join(".git"))
elif action == "checkout":
# The spack-repo-index.yaml is optional; we test Spack reads from it.
with open(os.path.join("spack-repo-index.yaml"), "w", encoding="utf-8") as f:
f.write(
"""\
repo_index:
paths:
- spack_repo/foo
"""
)
return ""
repo_path_1, errors_1 = repos_1.construct(cache=cache, find_git=MockGit)
# Verify it cannot construct a Repo instance, and that this does *not* throw, since that would
# break Spack very early on. Instead, an error is returned. Also verify that
# relative_paths is read from spack-repo-index.yaml.
assert len(repo_path_1.repos) == 0
assert len(errors_1) == 1
assert all("No repo.yaml" in str(err) for err in errors_1.values()), errors_1
assert descriptors_1["foo"].relative_paths == ["spack_repo/foo"]
# Verify that the default branch was detected from ls-remote
assert descriptors_1["foo"].branch == "develop"
# Do the same test with another instance: it should *not* clone a second time.
repo_path_2, errors_2 = repos_2.construct(cache=cache, find_git=MockGit)
assert len(repo_path_2.repos) == 0
assert len(errors_2) == 1
assert all("No repo.yaml" in str(err) for err in errors_2.values()), errors_2
assert descriptors_1["foo"].relative_paths == ["spack_repo/foo"]
# Finally fill the repo with an actual repo and check that the repo can be constructed.
spack.repo.create_repo(str(tmp_path / "foo_destination"), "foo")
repo_path_3, errors_3 = repos_3.construct(cache=cache, find_git=MockGit)
assert not errors_3
assert len(repo_path_3.repos) == 1
assert repo_path_3.repos[0].namespace == "foo"
def test_repo_descriptors_update(tmp_path: pathlib.Path):
"""Test the RepoDescriptors construct function. Ensure it does not raise when we cannot
construct a Repo instance, e.g. due to missing repo.yaml file. Check that it parses the
spack-repo-index.yaml file both when newly initialized and when already cloned."""
lock = spack.util.lock.Lock(str(tmp_path / "x"), enable=False)
cache = spack.util.file_cache.FileCache(str(tmp_path / "cache"))
# Construct 3 identical descriptors
descriptors_1, descriptors_2, descriptors_3, descriptors_4 = [
{
"foo": spack.repo.RemoteRepoDescriptor(
name="foo",
repository=str(tmp_path / "foo.git"),
destination=str(tmp_path / "foo_destination"),
branch="develop" if i == 0 else None,
tag="v1.0" if i == 1 else None,
commit="abc123" if i == 2 else None,
relative_paths=None,
lock=lock,
)
}
for i in range(4)
]
repos_1 = spack.repo.RepoDescriptors(descriptors_1) # type: ignore
repos_2 = spack.repo.RepoDescriptors(descriptors_2) # type: ignore
repos_3 = spack.repo.RepoDescriptors(descriptors_3) # type: ignore
repos_4 = spack.repo.RepoDescriptors(descriptors_4) # type: ignore
class MockGit(spack.util.executable.Executable):
def __init__(self):
pass
def __call__(self, *args, **kwargs) -> str: # type: ignore
action = args[0]
if action == "ls-remote":
return """\
a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD
165c479984b94051c982a6be1bd850f8bae02858 refs/heads/feature-branch
a8eff4da7aab59bbf5996ac1720954bf82443247 refs/heads/develop
3bd0276ab0491552247fa055921a23d2ffd9443c refs/heads/releases/v0.20"""
elif action == "rev-parse":
return "develop"
elif action == "config":
return "origin"
elif action == "init":
# The git repo needs a .git subdir
os.makedirs(os.path.join(".git"))
elif action == "checkout":
# The spack-repo-index.yaml is optional; we test Spack reads from it.
with open(os.path.join("spack-repo-index.yaml"), "w", encoding="utf-8") as f:
f.write(
"""\
repo_index:
paths:
- spack_repo/foo
"""
)
return ""
spack.repo.create_repo(str(tmp_path / "foo_destination"), "foo")
# branch develop
_, errors_1 = repos_1.construct(cache=cache, find_git=MockGit)
assert not errors_1
for descriptor in repos_1.values():
descriptor.update(git=MockGit())
# tag v1.0
_, errors_2 = repos_2.construct(cache=cache, find_git=MockGit)
assert not errors_2
for descriptor in repos_2.values():
descriptor.update(git=MockGit())
# commit abc123
_, errors_3 = repos_3.construct(cache=cache, find_git=MockGit)
assert not errors_3
for descriptor in repos_3.values():
descriptor.update(git=MockGit())
# default branch
_, errors_4 = repos_4.construct(cache=cache, find_git=MockGit)
assert not errors_4
for descriptor in repos_4.values():
descriptor.update(git=MockGit())
# Rerun construction after initialization to test early exit logic
_, errors_4 = repos_4.construct(cache=cache, find_git=MockGit)
assert not errors_4
def test_repo_descriptors_update_invalid(tmp_path: pathlib.Path):
"""Test the RepoDescriptors construct function. Ensure it does not raise when we cannot
construct a Repo instance, e.g. due to missing repo.yaml file. Check that it parses the
spack-repo-index.yaml file both when newly initialized and when already cloned."""
lock = spack.util.lock.Lock(str(tmp_path / "x"), enable=False)
cache = spack.util.file_cache.FileCache(str(tmp_path / "cache"))
# Construct 3 identical descriptors
descriptors_1 = {
"foo": spack.repo.RemoteRepoDescriptor(
name="foo",
repository=str(tmp_path / "foo.git"),
destination=str(tmp_path / "foo_destination"),
branch=None,
tag=None,
commit=None,
relative_paths=None,
lock=lock,
)
}
repos_1 = spack.repo.RepoDescriptors(descriptors_1) # type: ignore
class MockGitInvalidRemote(spack.util.executable.Executable):
def __init__(self):
pass
def __call__(self, *args, **kwargs) -> str: # type: ignore
action = args[0]
if action == "ls-remote":
# HEAD ref exists, but no default branch (i.e. no refs/heads/*)
return "a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD"
return ""
class MockGitFailed(spack.util.executable.Executable):
def __init__(self):
pass
def __call__(self, *args, **kwargs) -> str: # type: ignore
raise spack.util.executable.ProcessError("failed")
spack.repo.create_repo(str(tmp_path / "foo_destination"), "foo")
_, errors_1 = repos_1.construct(cache=cache, find_git=MockGitFailed)
assert len(errors_1) == 1
assert all("Failed to clone repository" in str(err) for err in errors_1.values()), errors_1
with pytest.raises(spack.repo.RepoError, match="Unable to locate a default branch"):
for descriptor in repos_1.values():
descriptor.update(git=MockGitInvalidRemote())
def test_repo_use_bad_import(config, repo_builder: RepoBuilder):
"""Demonstrate failure when attempt to get the class for package containing
a failing import (e.g., missing repository)."""
package_py = pathlib.Path(repo_builder._recipe_filename("importer"))
package_py.parent.mkdir(parents=True)
package_py.write_text(
"""\
from spack_repo.missing.packages import base
from spack.package import *
| _1example2Test |
python | scipy__scipy | tools/authors.py | {
"start": 5596,
"end": 7381
} | class ____:
executable = None
def __init__(self, executable):
self.executable = executable
def _call(self, command, args, kw, repository=None, call=False):
cmd = [self.executable, command] + list(args)
cwd = None
if repository is not None:
cwd = os.getcwd()
os.chdir(repository)
try:
if call:
return subprocess.call(cmd, **kw)
else:
return subprocess.Popen(cmd, **kw)
finally:
if cwd is not None:
os.chdir(cwd)
def __call__(self, command, *a, **kw):
ret = self._call(command, a, {}, call=True, **kw)
if ret != 0:
raise RuntimeError(f"{self.executable} failed")
def pipe(self, command, *a, **kw):
stdin = kw.pop('stdin', None)
p = self._call(command, a, dict(stdin=stdin, stdout=subprocess.PIPE),
call=False, **kw)
return p.stdout
def read(self, command, *a, **kw):
p = self._call(command, a, dict(stdout=subprocess.PIPE),
call=False, **kw)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError(f"{self.executable} failed")
return out
def readlines(self, command, *a, **kw):
out = self.read(command, *a, **kw)
return out.rstrip("\n").split("\n")
def test(self, command, *a, **kw):
ret = self._call(command, a, dict(stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
call=True, **kw)
return (ret == 0)
git = Cmd("git")
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
| Cmd |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 400,
"end": 4355
} | class ____(object):
"""Base class for all fields."""
types = None
def __init__(
self,
required: bool = False,
nullable: bool = False,
help_text: str = None,
validators: Any = None,
default: Any = NotSet,
name: str = None,
) -> None:
self.memory = WeakKeyDictionary()
self.required = required
self.help_text = help_text
self.nullable = nullable
self._assign_validators(validators)
self.name = name
self._validate_name()
if default is not NotSet:
self.validate(default)
self._default = default
@property
def has_default(self) -> bool:
return self._default is not NotSet
def _assign_validators(self, validators: Union[None, list, Any]) -> None:
if validators and not isinstance(validators, list):
validators = [validators]
self.validators = validators or []
def __set__(self, instance: Any, value: Any) -> None:
self._finish_initialization(type(instance))
value = self.parse_value(value)
self.validate(value)
self.memory[instance._cache_key] = value
def __get__(self, instance: Any, owner: Optional[Type[Any]] = None) -> Any:
if instance is None:
self._finish_initialization(owner)
return self
self._finish_initialization(type(instance))
self._check_value(instance)
return self.memory[instance._cache_key]
def _finish_initialization(self, owner: type) -> None:
pass
def _check_value(self, obj: Any) -> None:
if obj._cache_key not in self.memory:
self.__set__(obj, self.get_default_value())
def validate_for_object(self, obj: Any) -> None:
value = self.__get__(obj)
self.validate(value)
def validate(self, value: Any) -> None:
self._check_types()
self._validate_against_types(value)
self._check_against_required(value)
self._validate_with_custom_validators(value)
def _check_against_required(self, value: Any) -> None:
if value is None and self.required:
raise ValidationError("Field is required!")
def _validate_against_types(self, value: Any) -> None:
if value is not None and not isinstance(value, self.types):
raise ValidationError(
'Value is wrong, expected type "{types}"'.format(types=", ".join([t.__name__ for t in self.types])),
value,
)
def _check_types(self) -> None:
if self.types is None:
raise ValidationError(
'Field "{type}" is not usable, try ' "different field type.".format(type=type(self).__name__)
)
def to_struct(self, value: Any) -> Any:
"""Cast value to Python structure."""
return value
def parse_value(self, value: Any) -> Any:
"""Parse value from primitive to desired format.
Each field can parse value to form it wants it to be (like string or
int).
"""
return value
def _validate_with_custom_validators(self, value: Any) -> None:
if value is None and self.nullable:
return
for validator in self.validators:
try:
validator.validate(value)
except AttributeError:
validator(value)
def get_default_value(self) -> Any:
"""Get default value for field.
Each field can specify its default.
"""
return self._default if self.has_default else None
def _validate_name(self) -> None:
if self.name is None:
return
if not re.match(r"^[A-Za-z_](([\w\-]*)?\w+)?$", self.name): # noqa: W605
raise ValueError("Wrong name", self.name)
def structue_name(self, default: Any) -> Any:
return self.name if self.name is not None else default
| BaseField |
python | realpython__materials | python-enum/days.py | {
"start": 177,
"end": 332
} | class ____(Enum):
MONDAY = auto()
TUESDAY = auto()
WEDNESDAY = 3
THURSDAY = auto()
FRIDAY = auto()
SATURDAY = auto()
SUNDAY = 7
| Day |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk3agg.py | {
"start": 216,
"end": 2387
} | class ____(backend_agg.FigureCanvasAgg,
backend_gtk3.FigureCanvasGTK3):
def __init__(self, figure):
super().__init__(figure=figure)
self._bbox_queue = []
def on_draw_event(self, widget, ctx):
if self._idle_draw_id:
GLib.source_remove(self._idle_draw_id)
self._idle_draw_id = 0
self.draw()
scale = self.device_pixel_ratio
allocation = self.get_allocation()
w = allocation.width * scale
h = allocation.height * scale
if not len(self._bbox_queue):
Gtk.render_background(
self.get_style_context(), ctx,
allocation.x, allocation.y,
allocation.width, allocation.height)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
bbox_queue = self._bbox_queue
for bbox in bbox_queue:
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
buf = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(
np.asarray(self.copy_from_bbox(bbox)))
image = cairo.ImageSurface.create_for_data(
buf.ravel().data, cairo.FORMAT_ARGB32, width, height)
image.set_device_scale(scale, scale)
ctx.set_source_surface(image, x / scale, y / scale)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
scale = self.device_pixel_ratio
allocation = self.get_allocation()
x = int(bbox.x0 / scale)
y = allocation.height - int(bbox.y1 / scale)
width = (int(bbox.x1) - int(bbox.x0)) // scale
height = (int(bbox.y1) - int(bbox.y0)) // scale
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
@_BackendGTK3.export
| FigureCanvasGTK3Agg |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 27519,
"end": 53570
} | class ____(TestCase):
# TODO: These really need unit tests for each individual keyword, rather
# than just these higher level tests.
def test_anyOf(self):
instance = 5
schema = {
"anyOf": [
{"minimum": 20},
{"type": "string"},
],
}
validator = validators.Draft4Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "anyOf")
self.assertEqual(e.validator_value, schema["anyOf"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.json_path, "$")
self.assertEqual(e.schema_path, deque(["anyOf"]))
self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "minimum")
self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["anyOf"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.json_path, "$")
self.assertEqual(e1.schema_path, deque([0, "minimum"]))
self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
self.assertEqual(
e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
)
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "type")
self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
self.assertEqual(e2.instance, instance)
self.assertEqual(e2.schema, schema["anyOf"][1])
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque([]))
self.assertEqual(e2.relative_path, deque([]))
self.assertEqual(e2.absolute_path, deque([]))
self.assertEqual(e2.json_path, "$")
self.assertEqual(e2.schema_path, deque([1, "type"]))
self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
self.assertEqual(len(e2.context), 0)
def test_type(self):
instance = {"foo": 1}
schema = {
"type": [
{"type": "integer"},
{
"type": "object",
"properties": {"foo": {"enum": [2]}},
},
],
}
validator = validators.Draft3Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "type")
self.assertEqual(e.validator_value, schema["type"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.json_path, "$")
self.assertEqual(e.schema_path, deque(["type"]))
self.assertEqual(e.relative_schema_path, deque(["type"]))
self.assertEqual(e.absolute_schema_path, deque(["type"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "type")
self.assertEqual(e1.validator_value, schema["type"][0]["type"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["type"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.json_path, "$")
self.assertEqual(e1.schema_path, deque([0, "type"]))
self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "enum")
self.assertEqual(e2.validator_value, [2])
self.assertEqual(e2.instance, 1)
self.assertEqual(e2.schema, {"enum": [2]})
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e2.relative_path, deque(["foo"]))
self.assertEqual(e2.absolute_path, deque(["foo"]))
self.assertEqual(e2.json_path, "$.foo")
self.assertEqual(
e2.schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.absolute_schema_path,
deque(["type", 1, "properties", "foo", "enum"]),
)
self.assertFalse(e2.context)
def test_single_nesting(self):
instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
schema = {
"properties": {
"foo": {"type": "string"},
"bar": {"minItems": 2},
"baz": {"maximum": 10, "enum": [2, 4, 6, 8]},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["baz"]))
self.assertEqual(e3.path, deque(["baz"]))
self.assertEqual(e4.path, deque(["foo"]))
self.assertEqual(e1.relative_path, deque(["bar"]))
self.assertEqual(e2.relative_path, deque(["baz"]))
self.assertEqual(e3.relative_path, deque(["baz"]))
self.assertEqual(e4.relative_path, deque(["foo"]))
self.assertEqual(e1.absolute_path, deque(["bar"]))
self.assertEqual(e2.absolute_path, deque(["baz"]))
self.assertEqual(e3.absolute_path, deque(["baz"]))
self.assertEqual(e4.absolute_path, deque(["foo"]))
self.assertEqual(e1.json_path, "$.bar")
self.assertEqual(e2.json_path, "$.baz")
self.assertEqual(e3.json_path, "$.baz")
self.assertEqual(e4.json_path, "$.foo")
self.assertEqual(e1.validator, "minItems")
self.assertEqual(e2.validator, "enum")
self.assertEqual(e3.validator, "maximum")
self.assertEqual(e4.validator, "type")
def test_multiple_nesting(self):
instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"]
schema = {
"type": "string",
"items": {
"type": ["string", "object"],
"properties": {
"foo": {"enum": [1, 3]},
"bar": {
"type": "array",
"properties": {
"bar": {"required": True},
"baz": {"minItems": 2},
},
},
},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e2.path, deque([0]))
self.assertEqual(e3.path, deque([1, "bar"]))
self.assertEqual(e4.path, deque([1, "bar", "bar"]))
self.assertEqual(e5.path, deque([1, "bar", "baz"]))
self.assertEqual(e6.path, deque([1, "foo"]))
self.assertEqual(e1.json_path, "$")
self.assertEqual(e2.json_path, "$[0]")
self.assertEqual(e3.json_path, "$[1].bar")
self.assertEqual(e4.json_path, "$[1].bar.bar")
self.assertEqual(e5.json_path, "$[1].bar.baz")
self.assertEqual(e6.json_path, "$[1].foo")
self.assertEqual(e1.schema_path, deque(["type"]))
self.assertEqual(e2.schema_path, deque(["items", "type"]))
self.assertEqual(
list(e3.schema_path), ["items", "properties", "bar", "type"],
)
self.assertEqual(
list(e4.schema_path),
["items", "properties", "bar", "properties", "bar", "required"],
)
self.assertEqual(
list(e5.schema_path),
["items", "properties", "bar", "properties", "baz", "minItems"],
)
self.assertEqual(
list(e6.schema_path), ["items", "properties", "foo", "enum"],
)
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "type")
self.assertEqual(e3.validator, "type")
self.assertEqual(e4.validator, "required")
self.assertEqual(e5.validator, "minItems")
self.assertEqual(e6.validator, "enum")
def test_recursive(self):
schema = {
"definitions": {
"node": {
"anyOf": [{
"type": "object",
"required": ["name", "children"],
"properties": {
"name": {
"type": "string",
},
"children": {
"type": "object",
"patternProperties": {
"^.*$": {
"$ref": "#/definitions/node",
},
},
},
},
}],
},
},
"type": "object",
"required": ["root"],
"properties": {"root": {"$ref": "#/definitions/node"}},
}
instance = {
"root": {
"name": "root",
"children": {
"a": {
"name": "a",
"children": {
"ab": {
"name": "ab",
# missing "children"
},
},
},
},
},
}
validator = validators.Draft4Validator(schema)
e, = validator.iter_errors(instance)
self.assertEqual(e.absolute_path, deque(["root"]))
self.assertEqual(
e.absolute_schema_path, deque(["properties", "root", "anyOf"]),
)
self.assertEqual(e.json_path, "$.root")
e1, = e.context
self.assertEqual(e1.absolute_path, deque(["root", "children", "a"]))
self.assertEqual(
e1.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
self.assertEqual(e1.json_path, "$.root.children.a")
e2, = e1.context
self.assertEqual(
e2.absolute_path, deque(
["root", "children", "a", "children", "ab"],
),
)
self.assertEqual(
e2.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
self.assertEqual(e2.json_path, "$.root.children.a.children.ab")
def test_additionalProperties(self):
instance = {"bar": "bar", "foo": 2}
schema = {"additionalProperties": {"type": "integer", "minimum": 5}}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.json_path, "$.bar")
self.assertEqual(e2.json_path, "$.foo")
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_patternProperties(self):
instance = {"bar": 1, "foo": 2}
schema = {
"patternProperties": {
"bar": {"type": "string"},
"foo": {"minimum": 5},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.json_path, "$.bar")
self.assertEqual(e2.json_path, "$.foo")
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems(self):
instance = ["foo", 1]
schema = {
"items": [],
"additionalItems": {"type": "integer", "minimum": 5},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([0]))
self.assertEqual(e2.path, deque([1]))
self.assertEqual(e1.json_path, "$[0]")
self.assertEqual(e2.json_path, "$[1]")
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems_with_items(self):
instance = ["foo", "bar", 1]
schema = {
"items": [{}],
"additionalItems": {"type": "integer", "minimum": 5},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([1]))
self.assertEqual(e2.path, deque([2]))
self.assertEqual(e1.json_path, "$[1]")
self.assertEqual(e2.json_path, "$[2]")
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_propertyNames(self):
instance = {"foo": 12}
schema = {"propertyNames": {"not": {"const": "foo"}}}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(instance)
self.assertEqual(error.validator, "not")
self.assertEqual(
error.message,
"'foo' should not be valid under {'const': 'foo'}",
)
self.assertEqual(error.path, deque([]))
self.assertEqual(error.json_path, "$")
self.assertEqual(error.schema_path, deque(["propertyNames", "not"]))
def test_if_then(self):
schema = {
"if": {"const": 12},
"then": {"const": 13},
}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(12)
self.assertEqual(error.validator, "const")
self.assertEqual(error.message, "13 was expected")
self.assertEqual(error.path, deque([]))
self.assertEqual(error.json_path, "$")
self.assertEqual(error.schema_path, deque(["then", "const"]))
def test_if_else(self):
schema = {
"if": {"const": 12},
"else": {"const": 13},
}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(15)
self.assertEqual(error.validator, "const")
self.assertEqual(error.message, "13 was expected")
self.assertEqual(error.path, deque([]))
self.assertEqual(error.json_path, "$")
self.assertEqual(error.schema_path, deque(["else", "const"]))
def test_boolean_schema_False(self):
validator = validators.Draft7Validator(False)
error, = validator.iter_errors(12)
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.schema,
error.schema_path,
error.json_path,
),
(
"False schema does not allow 12",
None,
None,
12,
False,
deque([]),
"$",
),
)
def test_ref(self):
ref, schema = "someRef", {"additionalProperties": {"type": "integer"}}
validator = validators.Draft7Validator(
{"$ref": ref},
resolver=validators._RefResolver("", {}, store={ref: schema}),
)
error, = validator.iter_errors({"foo": "notAnInteger"})
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.absolute_path,
error.schema,
error.schema_path,
error.json_path,
),
(
"'notAnInteger' is not of type 'integer'",
"type",
"integer",
"notAnInteger",
deque(["foo"]),
{"type": "integer"},
deque(["additionalProperties", "type"]),
"$.foo",
),
)
def test_prefixItems(self):
schema = {"prefixItems": [{"type": "string"}, {}, {}, {"maximum": 3}]}
validator = validators.Draft202012Validator(schema)
type_error, min_error = validator.iter_errors([1, 2, "foo", 5])
self.assertEqual(
(
type_error.message,
type_error.validator,
type_error.validator_value,
type_error.instance,
type_error.absolute_path,
type_error.schema,
type_error.schema_path,
type_error.json_path,
),
(
"1 is not of type 'string'",
"type",
"string",
1,
deque([0]),
{"type": "string"},
deque(["prefixItems", 0, "type"]),
"$[0]",
),
)
self.assertEqual(
(
min_error.message,
min_error.validator,
min_error.validator_value,
min_error.instance,
min_error.absolute_path,
min_error.schema,
min_error.schema_path,
min_error.json_path,
),
(
"5 is greater than the maximum of 3",
"maximum",
3,
5,
deque([3]),
{"maximum": 3},
deque(["prefixItems", 3, "maximum"]),
"$[3]",
),
)
def test_prefixItems_with_items(self):
schema = {
"items": {"type": "string"},
"prefixItems": [{}],
}
validator = validators.Draft202012Validator(schema)
e1, e2 = validator.iter_errors(["foo", 2, "bar", 4, "baz"])
self.assertEqual(
(
e1.message,
e1.validator,
e1.validator_value,
e1.instance,
e1.absolute_path,
e1.schema,
e1.schema_path,
e1.json_path,
),
(
"2 is not of type 'string'",
"type",
"string",
2,
deque([1]),
{"type": "string"},
deque(["items", "type"]),
"$[1]",
),
)
self.assertEqual(
(
e2.message,
e2.validator,
e2.validator_value,
e2.instance,
e2.absolute_path,
e2.schema,
e2.schema_path,
e2.json_path,
),
(
"4 is not of type 'string'",
"type",
"string",
4,
deque([3]),
{"type": "string"},
deque(["items", "type"]),
"$[3]",
),
)
def test_contains_too_many(self):
"""
`contains` + `maxContains` produces only one error, even if there are
many more incorrectly matching elements.
"""
schema = {"contains": {"type": "string"}, "maxContains": 2}
validator = validators.Draft202012Validator(schema)
error, = validator.iter_errors(["foo", 2, "bar", 4, "baz", "quux"])
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.absolute_path,
error.schema,
error.schema_path,
error.json_path,
),
(
"Too many items match the given schema (expected at most 2)",
"maxContains",
2,
["foo", 2, "bar", 4, "baz", "quux"],
deque([]),
{"contains": {"type": "string"}, "maxContains": 2},
deque(["contains"]),
"$",
),
)
def test_contains_too_few(self):
schema = {"contains": {"type": "string"}, "minContains": 2}
validator = validators.Draft202012Validator(schema)
error, = validator.iter_errors(["foo", 2, 4])
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.absolute_path,
error.schema,
error.schema_path,
error.json_path,
),
(
(
"Too few items match the given schema "
"(expected at least 2 but only 1 matched)"
),
"minContains",
2,
["foo", 2, 4],
deque([]),
{"contains": {"type": "string"}, "minContains": 2},
deque(["contains"]),
"$",
),
)
def test_contains_none(self):
schema = {"contains": {"type": "string"}, "minContains": 2}
validator = validators.Draft202012Validator(schema)
error, = validator.iter_errors([2, 4])
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.absolute_path,
error.schema,
error.schema_path,
error.json_path,
),
(
"[2, 4] does not contain items matching the given schema",
"contains",
{"type": "string"},
[2, 4],
deque([]),
{"contains": {"type": "string"}, "minContains": 2},
deque(["contains"]),
"$",
),
)
def test_ref_sibling(self):
schema = {
"$defs": {"foo": {"required": ["bar"]}},
"properties": {
"aprop": {
"$ref": "#/$defs/foo",
"required": ["baz"],
},
},
}
validator = validators.Draft202012Validator(schema)
e1, e2 = validator.iter_errors({"aprop": {}})
self.assertEqual(
(
e1.message,
e1.validator,
e1.validator_value,
e1.instance,
e1.absolute_path,
e1.schema,
e1.schema_path,
e1.relative_schema_path,
e1.json_path,
),
(
"'bar' is a required property",
"required",
["bar"],
{},
deque(["aprop"]),
{"required": ["bar"]},
deque(["properties", "aprop", "required"]),
deque(["properties", "aprop", "required"]),
"$.aprop",
),
)
self.assertEqual(
(
e2.message,
e2.validator,
e2.validator_value,
e2.instance,
e2.absolute_path,
e2.schema,
e2.schema_path,
e2.relative_schema_path,
e2.json_path,
),
(
"'baz' is a required property",
"required",
["baz"],
{},
deque(["aprop"]),
{"$ref": "#/$defs/foo", "required": ["baz"]},
deque(["properties", "aprop", "required"]),
deque(["properties", "aprop", "required"]),
"$.aprop",
),
)
| TestValidationErrorDetails |
python | walkccc__LeetCode | solutions/1263. Minimum Moves to Move a Box to Their Target Location/1263.py | {
"start": 0,
"end": 2213
} | class ____:
def minPushBox(self, grid: list[list[str]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 'B':
box = (i, j)
elif grid[i][j] == 'S':
player = (i, j)
elif grid[i][j] == 'T':
target = (i, j)
def isInvalid(playerX: int, playerY: int) -> bool:
return (playerX < 0 or playerX == m or playerY < 0 or playerY == n or
grid[playerX][playerY] == '#')
def canGoTo(
playerX: int,
playerY: int,
fromX: int,
fromY: int,
boxX: int,
boxY: int
) -> bool:
"""Returns True if (playerX, playerY) can go to (fromX, fromY)."""
q = collections.deque([(playerX, playerY)])
seen = {(playerX, playerY)}
while q:
i, j = q.popleft()
if i == fromX and j == fromY:
return True
for dx, dy in DIRS:
x = i + dx
y = j + dy
if isInvalid(x, y):
continue
if (x, y) in seen:
continue
if x == boxX and y == boxY:
continue
q.append((x, y))
seen.add((x, y))
return False
# (boxX, boxY, playerX, playerY)
q = collections.deque([(box[0], box[1], player[0], player[1])])
seen = {(box[0], box[1], player[0], player[1])}
step = 0
while q:
for _ in range(len(q)):
boxX, boxY, playerX, playerY = q.popleft()
if boxX == target[0] and boxY == target[1]:
return step
for k, (dx, dy) in enumerate(DIRS):
nextBoxX = boxX + dx
nextBoxY = boxY + dy
if isInvalid(nextBoxX, nextBoxY):
continue
if (nextBoxX, nextBoxY, boxX, boxY) in seen:
continue
fromX = boxX + DIRS[(k + 2) % 4][0]
fromY = boxY + DIRS[(k + 2) % 4][1]
if isInvalid(fromX, fromY):
continue
if canGoTo(playerX, playerY, fromX, fromY, boxX, boxY):
q.append((nextBoxX, nextBoxY, boxX, boxY))
seen.add((nextBoxX, nextBoxY, boxX, boxY))
step += 1
return -1
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 816,
"end": 2363
} | class ____:
"""Define how the query's move operation should be performed."""
def __init__(
self,
force: float,
objects: Optional[Union[List[UUID], UUID]] = None,
concepts: Optional[Union[List[str], str]] = None,
):
if (objects is None or (isinstance(objects, list) and len(objects) == 0)) and (
concepts is None or (isinstance(concepts, list) and len(concepts) == 0)
):
raise ValueError("Either objects or concepts need to be given")
self.force = force
# accept single values, but make them a list
if objects is None:
self.__objects = None
elif not isinstance(objects, list):
self.__objects = [str(objects)]
else:
self.__objects = [str(obj_uuid) for obj_uuid in objects]
if concepts is None:
self.__concepts = None
elif not isinstance(concepts, list):
self.__concepts = [concepts]
else:
self.__concepts = concepts
@property
def _objects_list(self) -> Optional[List[str]]:
return self.__objects
@property
def _concepts_list(self) -> Optional[List[str]]:
return self.__concepts
def _to_gql_payload(self) -> dict:
payload: dict = {"force": self.force}
if self.__objects is not None:
payload["objects"] = [{"id": obj} for obj in self.__objects]
if self.__concepts is not None:
payload["concepts"] = self.__concepts
return payload
| Move |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1148902,
"end": 1149550
} | class ____(ScaleInvalidDataShowAsx):
"""
ScaleInvalidDataShowAsValuex schema wrapper.
Parameters
----------
value : float, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"x">'}
def __init__(self, value: Optional[float | Literal["width"]] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValuex |
python | ray-project__ray | python/ray/dashboard/memory_utils.py | {
"start": 2622,
"end": 7235
} | class ____:
def __init__(
self, *, object_ref: dict, node_address: str, is_driver: bool, pid: int
):
# worker info
self.is_driver = is_driver
self.pid = pid
self.node_address = node_address
# object info
self.task_status = object_ref.get("taskStatus", "?")
if self.task_status == "NIL":
self.task_status = "-"
self.attempt_number = int(object_ref.get("attemptNumber", 0)) + 1
self.object_size = int(object_ref.get("objectSize", -1))
self.call_site = object_ref.get("callSite", "<Unknown>")
if len(self.call_site) == 0:
self.call_site = "disabled"
self.object_ref = ray.ObjectRef(
decode_object_ref_if_needed(object_ref["objectId"])
)
# reference info
self.local_ref_count = int(object_ref.get("localRefCount", 0))
self.pinned_in_memory = bool(object_ref.get("pinnedInMemory", False))
self.submitted_task_ref_count = int(object_ref.get("submittedTaskRefCount", 0))
self.contained_in_owned = [
ray.ObjectRef(decode_object_ref_if_needed(object_ref))
for object_ref in object_ref.get("containedInOwned", [])
]
self.reference_type = self._get_reference_type()
def is_valid(self) -> bool:
# If the entry doesn't have a reference type or some invalid state,
# (e.g., no object ref presented), it is considered invalid.
if (
not self.pinned_in_memory
and self.local_ref_count == 0
and self.submitted_task_ref_count == 0
and len(self.contained_in_owned) == 0
):
return False
elif self.object_ref.is_nil():
return False
else:
return True
def group_key(self, group_by_type: GroupByType) -> str:
if group_by_type == GroupByType.NODE_ADDRESS:
return self.node_address
elif group_by_type == GroupByType.STACK_TRACE:
return self.call_site
else:
raise ValueError(f"group by type {group_by_type} is invalid.")
def _get_reference_type(self) -> str:
if self._is_object_ref_actor_handle():
return ReferenceType.ACTOR_HANDLE.value
if self.pinned_in_memory:
return ReferenceType.PINNED_IN_MEMORY.value
elif self.submitted_task_ref_count > 0:
return ReferenceType.USED_BY_PENDING_TASK.value
elif self.local_ref_count > 0:
return ReferenceType.LOCAL_REFERENCE.value
elif len(self.contained_in_owned) > 0:
return ReferenceType.CAPTURED_IN_OBJECT.value
else:
return ReferenceType.UNKNOWN_STATUS.value
def _is_object_ref_actor_handle(self) -> bool:
object_ref_hex = self.object_ref.hex()
# We need to multiply 2 because we need bits size instead of bytes size.
taskid_random_bits_size = (TASKID_BYTES_SIZE - ACTORID_BYTES_SIZE) * 2
actorid_random_bits_size = (ACTORID_BYTES_SIZE - JOBID_BYTES_SIZE) * 2
# random (8B) | ActorID(6B) | flag (2B) | index (6B)
# ActorID(6B) == ActorRandomByte(4B) + JobID(2B)
# If random bytes are all 'f', but ActorRandomBytes
# are not all 'f', that means it is an actor creation
# task, which is an actor handle.
random_bits = object_ref_hex[:taskid_random_bits_size]
actor_random_bits = object_ref_hex[
taskid_random_bits_size : taskid_random_bits_size + actorid_random_bits_size
]
if random_bits == "f" * 16 and not actor_random_bits == "f" * 24:
return True
else:
return False
def as_dict(self):
return {
"object_ref": self.object_ref.hex(),
"pid": self.pid,
"node_ip_address": self.node_address,
"object_size": self.object_size,
"reference_type": self.reference_type,
"call_site": self.call_site,
"task_status": self.task_status,
"attempt_number": self.attempt_number,
"local_ref_count": self.local_ref_count,
"pinned_in_memory": self.pinned_in_memory,
"submitted_task_ref_count": self.submitted_task_ref_count,
"contained_in_owned": [
object_ref.hex() for object_ref in self.contained_in_owned
],
"type": "Driver" if self.is_driver else "Worker",
}
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self.as_dict())
| MemoryTableEntry |
python | kubernetes-client__python | kubernetes/client/models/v1_node_condition.py | {
"start": 383,
"end": 8205
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_heartbeat_time': 'datetime',
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_heartbeat_time': 'lastHeartbeatTime',
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_heartbeat_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1NodeCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_heartbeat_time = None
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_heartbeat_time is not None:
self.last_heartbeat_time = last_heartbeat_time
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_heartbeat_time(self):
"""Gets the last_heartbeat_time of this V1NodeCondition. # noqa: E501
Last time we got an update on a given condition. # noqa: E501
:return: The last_heartbeat_time of this V1NodeCondition. # noqa: E501
:rtype: datetime
"""
return self._last_heartbeat_time
@last_heartbeat_time.setter
def last_heartbeat_time(self, last_heartbeat_time):
"""Sets the last_heartbeat_time of this V1NodeCondition.
Last time we got an update on a given condition. # noqa: E501
:param last_heartbeat_time: The last_heartbeat_time of this V1NodeCondition. # noqa: E501
:type: datetime
"""
self._last_heartbeat_time = last_heartbeat_time
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1NodeCondition. # noqa: E501
Last time the condition transit from one status to another. # noqa: E501
:return: The last_transition_time of this V1NodeCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1NodeCondition.
Last time the condition transit from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1NodeCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1NodeCondition. # noqa: E501
Human readable message indicating details about last transition. # noqa: E501
:return: The message of this V1NodeCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1NodeCondition.
Human readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1NodeCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1NodeCondition. # noqa: E501
(brief) reason for the condition's last transition. # noqa: E501
:return: The reason of this V1NodeCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1NodeCondition.
(brief) reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1NodeCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1NodeCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1NodeCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1NodeCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1NodeCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1NodeCondition. # noqa: E501
Type of node condition. # noqa: E501
:return: The type of this V1NodeCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1NodeCondition.
Type of node condition. # noqa: E501
:param type: The type of this V1NodeCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeCondition):
return True
return self.to_dict() != other.to_dict()
| V1NodeCondition |
python | Textualize__textual | src/textual/document/_wrapped_document.py | {
"start": 440,
"end": 17779
} | class ____:
"""A view into a Document which wraps the document at a certain
width and can be queried to retrieve lines from the *wrapped* version
of the document.
Allows for incremental updates, ensuring that we only re-wrap ranges of the document
that were influenced by edits.
"""
def __init__(
self,
document: DocumentBase,
width: int = 0,
tab_width: int = 4,
) -> None:
"""Construct a WrappedDocument.
By default, a WrappedDocument is wrapped with width=0 (no wrapping).
To wrap the document, use the wrap() method.
Args:
document: The document to wrap.
width: The width to wrap at.
tab_width: The maximum width to consider for tab characters.
"""
self.document = document
"""The document wrapping is performed on."""
self._wrap_offsets: list[list[int]] = []
"""Maps line indices to the offsets within the line where wrapping
breaks should be added."""
self._tab_width_cache: list[list[int]] = []
"""Maps line indices to a list of tab widths. `[[2, 4]]` means that on line 0, the first
tab has width 2, and the second tab has width 4."""
self._offset_to_line_info: list[tuple[LineIndex, SectionOffset]] = []
"""Maps y_offsets (from the top of the document) to line_index and the offset
of the section within the line."""
self._line_index_to_offsets: list[list[VerticalOffset]] = []
"""Maps line indices to all the vertical offsets which correspond to that line."""
self._width: int = width
"""The width the document is currently wrapped at. This will correspond with
the value last passed into the `wrap` method."""
self._tab_width: int = tab_width
"""The maximum width to expand tabs to when considering their widths."""
self.wrap(width, tab_width)
@property
def wrapped(self) -> bool:
"""True if the content is wrapped. This is not the same as wrapping being "enabled".
For example, an empty document can have wrapping enabled, but no wrapping has actually
occurred.
In other words, this is True if the length of any line in the document is greater
than the available width."""
return len(self._line_index_to_offsets) == len(self._offset_to_line_info)
def wrap(self, width: int, tab_width: int | None = None) -> None:
"""Wrap and cache all lines in the document.
Args:
width: The width to wrap at. 0 for no wrapping.
tab_width: The maximum width to consider for tab characters. If None,
reuse the tab width.
"""
self._width = width
if tab_width:
self._tab_width = tab_width
# We're starting wrapping from scratch
new_wrap_offsets: list[list[int]] = []
offset_to_line_info: list[tuple[LineIndex, SectionOffset]] = []
line_index_to_offsets: list[list[VerticalOffset]] = []
line_tab_widths: list[list[int]] = []
append_wrap_offset = new_wrap_offsets.append
append_line_info = offset_to_line_info.append
append_line_offsets = line_index_to_offsets.append
append_line_tab_widths = line_tab_widths.append
current_offset = 0
tab_width = self._tab_width
for line_index, line in enumerate(self.document.lines):
tab_sections = get_tab_widths(line, tab_width)
wrap_offsets = (
compute_wrap_offsets(
line,
width,
tab_size=tab_width,
precomputed_tab_sections=tab_sections,
)
if width
else []
)
append_line_tab_widths([width for _, width in tab_sections])
append_wrap_offset(wrap_offsets)
append_line_offsets([])
for section_y_offset in range(len(wrap_offsets) + 1):
append_line_info((line_index, section_y_offset))
line_index_to_offsets[line_index].append(current_offset)
current_offset += 1
self._wrap_offsets = new_wrap_offsets
self._offset_to_line_info = offset_to_line_info
self._line_index_to_offsets = line_index_to_offsets
self._tab_width_cache = line_tab_widths
@property
def lines(self) -> list[list[str]]:
"""The lines of the wrapped version of the Document.
Each index in the returned list represents a line index in the raw
document. The list[str] at each index is the content of the raw document line
split into multiple lines via wrapping.
Note that this is expensive to compute and is not cached.
Returns:
A list of lines from the wrapped version of the document.
"""
wrapped_lines: list[list[str]] = []
append = wrapped_lines.append
for line_index, line in enumerate(self.document.lines):
divided = Text(line).divide(self._wrap_offsets[line_index])
append([section.plain for section in divided])
return wrapped_lines
@property
def height(self) -> int:
"""The height of the wrapped document."""
return sum(len(offsets) + 1 for offsets in self._wrap_offsets)
def wrap_range(
self,
start: Location,
old_end: Location,
new_end: Location,
) -> None:
"""Incrementally recompute wrapping based on a performed edit.
This must be called *after* the source document has been edited.
Args:
start: The start location of the edit that was performed in document-space.
old_end: The old end location of the edit in document-space.
new_end: The new end location of the edit in document-space.
"""
start_line_index, _ = start
old_end_line_index, _ = old_end
new_end_line_index, _ = new_end
# Although end users should not be able to edit invalid ranges via a TextArea,
# programmers can pass whatever they wish to the edit API, so we need to clamp
# the edit ranges here to ensure we only attempt to update within the bounds
# of the wrapped document.
old_max_index = len(self._line_index_to_offsets) - 1
new_max_index = self.document.line_count - 1
start_line_index = clamp(
start_line_index, 0, min((old_max_index, new_max_index))
)
old_end_line_index = clamp(old_end_line_index, 0, old_max_index)
new_end_line_index = clamp(new_end_line_index, 0, new_max_index)
top_line_index, old_bottom_line_index = sorted(
(start_line_index, old_end_line_index)
)
new_bottom_line_index = max((start_line_index, new_end_line_index))
top_y_offset = self._line_index_to_offsets[top_line_index][0]
old_bottom_y_offset = self._line_index_to_offsets[old_bottom_line_index][-1]
# Get the new range of the edit from top to bottom.
new_lines = self.document.lines[top_line_index : new_bottom_line_index + 1]
new_wrap_offsets: list[list[int]] = []
new_line_index_to_offsets: list[list[VerticalOffset]] = []
new_offset_to_line_info: list[tuple[LineIndex, SectionOffset]] = []
new_tab_widths: list[list[int]] = []
append_wrap_offsets = new_wrap_offsets.append
append_tab_widths = new_tab_widths.append
width = self._width
tab_width = self._tab_width
# Add the new offsets between the top and new bottom (the new post-edit offsets)
current_y_offset = top_y_offset
for line_index, line in enumerate(new_lines, top_line_index):
tab_sections = get_tab_widths(line, tab_width)
wrap_offsets = (
compute_wrap_offsets(
line, width, tab_width, precomputed_tab_sections=tab_sections
)
if width
else []
)
append_tab_widths([width for _, width in tab_sections])
append_wrap_offsets(wrap_offsets)
# Collect up the new y offsets for this document line
y_offsets_for_line: list[int] = []
for section_offset in range(len(wrap_offsets) + 1):
y_offsets_for_line.append(current_y_offset)
new_offset_to_line_info.append((line_index, section_offset))
current_y_offset += 1
# Save the new y offsets for this line
new_line_index_to_offsets.append(y_offsets_for_line)
# Replace the range start -> old with the new wrapped lines
self._offset_to_line_info[top_y_offset : old_bottom_y_offset + 1] = (
new_offset_to_line_info
)
self._line_index_to_offsets[top_line_index : old_bottom_line_index + 1] = (
new_line_index_to_offsets
)
self._tab_width_cache[top_line_index : old_bottom_line_index + 1] = (
new_tab_widths
)
# How much did the edit/rewrap alter the offsets?
old_height = old_bottom_y_offset - top_y_offset + 1
new_height = len(new_offset_to_line_info)
offset_shift = new_height - old_height
line_shift = new_bottom_line_index - old_bottom_line_index
# Update the line info at all offsets below the edit region.
if line_shift:
for y_offset in range(
top_y_offset + new_height, len(self._offset_to_line_info)
):
old_line_index, section_offset = self._offset_to_line_info[y_offset]
new_line_index = old_line_index + line_shift
new_line_info = (new_line_index, section_offset)
self._offset_to_line_info[y_offset] = new_line_info
# Update the offsets at all lines below the edit region
if offset_shift:
for line_index in range(
top_line_index + len(new_lines), len(self._line_index_to_offsets)
):
old_offsets = self._line_index_to_offsets[line_index]
new_offsets = [offset + offset_shift for offset in old_offsets]
self._line_index_to_offsets[line_index] = new_offsets
self._wrap_offsets[top_line_index : old_bottom_line_index + 1] = (
new_wrap_offsets
)
def offset_to_location(self, offset: Offset) -> Location:
"""Given an offset within the wrapped/visual display of the document,
return the corresponding location in the document.
Args:
offset: The y-offset within the document.
Raises:
ValueError: When the given offset does not correspond to a line
in the document.
Returns:
The Location in the document corresponding to the given offset.
"""
x, y = offset
x = max(0, x)
y = max(0, y)
if not self._width:
# No wrapping, so we directly map offset to location and clamp.
line_index = min(y, len(self._wrap_offsets) - 1)
column_index = cell_width_to_column_index(
self.document.get_line(line_index), x, self._tab_width
)
return line_index, column_index
# Find the line corresponding to the given y offset in the wrapped document.
get_target_document_column = self.get_target_document_column
try:
offset_data = self._offset_to_line_info[y]
except IndexError:
# y-offset is too large
offset_data = self._offset_to_line_info[-1]
if offset_data is not None:
line_index, section_y = offset_data
location = line_index, get_target_document_column(
line_index,
x,
section_y,
)
else:
location = len(self._wrap_offsets) - 1, get_target_document_column(
-1, x, -1
)
# Offset doesn't match any line => land on bottom wrapped line
return location
def location_to_offset(self, location: Location) -> Offset:
"""
Convert a location in the document to an offset within the wrapped/visual display of the document.
Args:
location: The location in the document.
Returns:
The Offset in the document's visual display corresponding to the given location.
"""
line_index, column_index = location
# Clamp the line index to the bounds of the document
line_index = clamp(line_index, 0, len(self._line_index_to_offsets))
# Find the section index of this location, so that we know which y_offset to use
wrap_offsets = self.get_offsets(line_index)
section_start_columns = [0, *wrap_offsets]
section_index = bisect_right(wrap_offsets, column_index)
# Get the y-offsets corresponding to this line index
y_offsets = self._line_index_to_offsets[line_index]
section_column_index = column_index - section_start_columns[section_index]
section = self.get_sections(line_index)[section_index]
x_offset = cell_len(
expand_tabs_inline(section[:section_column_index], self._tab_width)
)
return Offset(x_offset, y_offsets[section_index])
def get_target_document_column(
self,
line_index: int,
x_offset: int,
y_offset: int,
) -> int:
"""Given a line index and the offsets within the wrapped version of that
line, return the corresponding column index in the raw document.
Args:
line_index: The index of the line in the document.
x_offset: The x-offset within the wrapped line.
y_offset: The y-offset within the wrapped line (supports negative indexing).
Returns:
The column index corresponding to the line index and y offset.
"""
# We've found the relevant line, now find the character by
# looking at the character corresponding to the offset width.
sections = self.get_sections(line_index)
# wrapped_section is the text that appears on a single y_offset within
# the TextArea. It's a potentially wrapped portion of a larger line from
# the original document.
target_section = sections[y_offset]
# Add the offsets from the wrapped sections above this one (from the same raw
# document line)
target_section_start = sum(
len(wrapped_section) for wrapped_section in sections[:y_offset]
)
# Get the column index within this wrapped section of the line
target_column_index = target_section_start + cell_width_to_column_index(
target_section, x_offset, self._tab_width
)
# If we're on the final section of a line, the cursor can legally rest beyond
# the end by a single cell. Otherwise, we'll need to ensure that we're
# keeping the cursor within the bounds of the target section.
if y_offset != len(sections) - 1 and y_offset != -1:
target_column_index = min(
target_column_index, target_section_start + len(target_section) - 1
)
return target_column_index
def get_sections(self, line_index: int) -> list[str]:
"""Return the sections for the given line index.
When wrapping is enabled, a single line in the document can visually span
multiple lines. The list returned represents that visually (each string in
the list represents a single section (y-offset) after wrapping happens).
Args:
line_index: The index of the line to get sections for.
Returns:
The wrapped line as a list of strings.
"""
line_offsets = self._wrap_offsets[line_index]
wrapped_lines = Text(self.document[line_index], end="").divide(line_offsets)
return [line.plain for line in wrapped_lines]
def get_offsets(self, line_index: int) -> list[int]:
"""Given a line index, get the offsets within that line where wrapping
should occur for the current document.
Args:
line_index: The index of the line within the document.
Raises:
ValueError: When `line_index` is out of bounds.
Returns:
The offsets within the line where wrapping should occur.
"""
wrap_offsets = self._wrap_offsets
out_of_bounds = line_index < 0 or line_index >= len(wrap_offsets)
if out_of_bounds:
raise ValueError(
f"The document line index {line_index!r} is out of bounds. "
f"The document contains {len(wrap_offsets)!r} lines."
)
return wrap_offsets[line_index]
def get_tab_widths(self, line_index: int) -> list[int]:
"""Return a list of the tab widths for the given line index.
Args:
line_index: The index of the line in the document.
Returns:
An ordered list of the expanded width of the tabs in the line.
"""
return self._tab_width_cache[line_index]
| WrappedDocument |
python | huggingface__transformers | tests/utils/test_feature_extraction_utils.py | {
"start": 1087,
"end": 2139
} | class ____(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = httpx.HTTPStatusError(
"failed", request=mock.Mock(), response=mock.Mock()
)
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("httpx.Client.request", return_value=response_mock) as mock_head:
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# This check we did call the fake head request
mock_head.assert_called()
@is_staging_test
| FeatureExtractorUtilTester |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 45746,
"end": 50723
} | class ____(HubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)"
)
self.hubert = HubertModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.hubert.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`HubertProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.hubert(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["HubertForCTC", "HubertForSequenceClassification", "HubertModel", "HubertPreTrainedModel"]
| HubertForSequenceClassification |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/rds.py | {
"start": 1709,
"end": 4019
} | class ____(RdsBaseSensor):
"""
Waits for RDS snapshot with a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsSnapshotExistenceSensor`
:param db_type: Type of the DB - either "instance" or "cluster"
:param db_snapshot_identifier: The identifier for the DB snapshot
:param target_statuses: Target status of snapshot
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields: Sequence[str] = aws_template_fields(
"db_snapshot_identifier",
"target_statuses",
)
def __init__(
self,
*,
db_type: str,
db_snapshot_identifier: str,
target_statuses: list[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.db_type = RdsDbType(db_type)
self.db_snapshot_identifier = db_snapshot_identifier
self.target_statuses = target_statuses or ["available"]
def poke(self, context: Context):
self.log.info(
"Poking for statuses : %s\nfor snapshot %s", self.target_statuses, self.db_snapshot_identifier
)
try:
if self.db_type.value == "instance":
state = self.hook.get_db_snapshot_state(self.db_snapshot_identifier)
else:
state = self.hook.get_db_cluster_snapshot_state(self.db_snapshot_identifier)
except AirflowNotFoundException:
return False
return state in self.target_statuses
| RdsSnapshotExistenceSensor |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/pipes/utils.py | {
"start": 21442,
"end": 23662
} | class ____(PipesThreadedMessageReader):
"""Message reader that reads a sequence of message chunks written by an external process into a
blob store such as S3, Azure blob storage, or GCS.
The reader maintains a counter, starting at 1, that is synchronized with a message writer in
some pipes process. The reader starts a thread that periodically attempts to read a chunk
indexed by the counter at some location expected to be written by the pipes process. The chunk
should be a file with each line corresponding to a JSON-encoded pipes message. When a chunk is
successfully read, the messages are processed and the counter is incremented. The
:py:class:`PipesBlobStoreMessageWriter` on the other end is expected to similarly increment a
counter (starting from 1) on successful write, keeping counters on the read and write end in
sync.
If `log_readers` is passed, the message reader will start the passed log readers when the
`opened` message is received from the external process.
Args:
interval (float): interval in seconds between attempts to download a chunk
log_readers (Optional[Sequence[PipesLogReader]]): A set of log readers to use to read logs.
"""
counter: int
def __init__(
self,
interval: float = 10,
log_readers: Optional[Sequence["PipesLogReader"]] = None,
):
super().__init__(interval=interval, log_readers=log_readers)
self.counter = 1
@abstractmethod
def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]:
...
# historical reasons, keeping the original interface of PipesBlobStoreMessageReader
def download_messages( # pyright: ignore[reportIncompatibleMethodOverride]
self, cursor: Optional[int], params: PipesParams
) -> Optional[tuple[int, str]]:
# mapping new interface to the old one
# the old interface isn't using the cursor parameter, instead, it keeps track of counter in the "counter" attribute
chunk = self.download_messages_chunk(self.counter, params)
if chunk:
self.counter += 1
return self.counter, chunk
| PipesBlobStoreMessageReader |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 37210,
"end": 39439
} | class ____(PsutilTestCase):
@pytest.mark.skipif(
not shutil.which("ifconfig"), reason="ifconfig utility not available"
)
def test_against_ifconfig(self):
for name, stats in psutil.net_if_stats().items():
try:
out = sh(f"ifconfig {name}")
except RuntimeError:
pass
else:
assert stats.isup == ('RUNNING' in out), out
assert stats.mtu == int(
re.findall(r'(?i)MTU[: ](\d+)', out)[0]
)
def test_mtu(self):
for name, stats in psutil.net_if_stats().items():
with open(f"/sys/class/net/{name}/mtu") as f:
assert stats.mtu == int(f.read().strip())
@pytest.mark.skipif(
not shutil.which("ifconfig"), reason="ifconfig utility not available"
)
def test_flags(self):
# first line looks like this:
# "eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500"
matches_found = 0
for name, stats in psutil.net_if_stats().items():
try:
out = sh(f"ifconfig {name}")
except RuntimeError:
pass
else:
match = re.search(r"flags=(\d+)?<(.*?)>", out)
if match and len(match.groups()) >= 2:
matches_found += 1
ifconfig_flags = set(match.group(2).lower().split(","))
psutil_flags = set(stats.flags.split(","))
assert ifconfig_flags == psutil_flags
else:
# ifconfig has a different output on CentOS 6
# let's try that
match = re.search(r"(.*) MTU:(\d+) Metric:(\d+)", out)
if match and len(match.groups()) >= 3:
matches_found += 1
ifconfig_flags = set(match.group(1).lower().split())
psutil_flags = set(stats.flags.split(","))
assert ifconfig_flags == psutil_flags
if not matches_found:
return pytest.fail("no matches were found")
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestSystemNetIfStats |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 81043,
"end": 87123
} | class ____(BaseField):
"""A really lazy reference to a document.
Unlike the :class:`~mongoengine.fields.ReferenceField` it will
**not** be automatically (lazily) dereferenced on access.
Instead, access will return a :class:`~mongoengine.base.LazyReference` class
instance, allowing access to `pk` or manual dereference by using
``fetch()`` method.
"""
def __init__(
self,
document_type,
passthrough=False,
dbref=False,
reverse_delete_rule=DO_NOTHING,
**kwargs,
):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
:param passthrough: When trying to access unknown fields, the
:class:`~mongoengine.base.datastructure.LazyReference` instance will
automatically call `fetch()` and try to retrieve the field on the fetched
document. Note this only work getting field (not setting or deleting).
"""
# XXX ValidationError raised outside of the "validate" method.
if not isinstance(document_type, str) and not issubclass(
document_type, Document
):
self.error(
"Argument to LazyReferenceField constructor must be a "
"document class or a string"
)
self.dbref = dbref
self.passthrough = passthrough
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super().__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = _DocumentRegistry.get(self.document_type_obj)
return self.document_type_obj
def build_lazyref(self, value):
if isinstance(value, LazyReference):
if value.passthrough != self.passthrough:
value = LazyReference(
value.document_type, value.pk, passthrough=self.passthrough
)
elif value is not None:
if isinstance(value, self.document_type):
value = LazyReference(
self.document_type, value.pk, passthrough=self.passthrough
)
elif isinstance(value, DBRef):
value = LazyReference(
self.document_type, value.id, passthrough=self.passthrough
)
else:
# value is the primary key of the referenced document
value = LazyReference(
self.document_type, value, passthrough=self.passthrough
)
return value
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing."""
if instance is None:
# Document class being used rather than a document object
return self
value = self.build_lazyref(instance._data.get(self.name))
if value:
instance._data[self.name] = value
return super().__get__(instance, owner)
def to_mongo(self, value):
if isinstance(value, LazyReference):
pk = value.pk
elif isinstance(value, self.document_type):
pk = value.pk
elif isinstance(value, DBRef):
pk = value.id
else:
# value is the primary key of the referenced document
pk = value
id_field_name = self.document_type._meta["id_field"]
id_field = self.document_type._fields[id_field_name]
pk = id_field.to_mongo(pk)
if self.dbref:
return DBRef(self.document_type._get_collection_name(), pk)
else:
return pk
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if not isinstance(value, (DBRef, Document, EmbeddedDocument)):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
value = self.build_lazyref(value)
return value
def validate(self, value):
if isinstance(value, LazyReference):
if value.collection != self.document_type._get_collection_name():
self.error("Reference must be on a `%s` document." % self.document_type)
pk = value.pk
elif isinstance(value, self.document_type):
pk = value.pk
elif isinstance(value, DBRef):
# TODO: check collection ?
collection = self.document_type._get_collection_name()
if value.collection != collection:
self.error("DBRef on bad collection (must be on `%s`)" % collection)
pk = value.id
else:
# value is the primary key of the referenced document
id_field_name = self.document_type._meta["id_field"]
id_field = getattr(self.document_type, id_field_name)
pk = value
try:
id_field.validate(pk)
except ValidationError:
self.error(
"value should be `{0}` document, LazyReference or DBRef on `{0}` "
"or `{0}`'s primary key (i.e. `{1}`)".format(
self.document_type.__name__, type(id_field).__name__
)
)
if pk is None:
self.error(_unsaved_object_error(self.document_type.__name__))
def prepare_query_value(self, op, value):
if value is None:
return None
super().prepare_query_value(op, value)
return self.to_mongo(value)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
| LazyReferenceField |
python | docker__docker-py | docker/errors.py | {
"start": 3334,
"end": 3394
} | class ____(DockerException, ValueError):
pass
| NullResource |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 45341,
"end": 54271
} | class ____(TestDetrPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TestDetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some tweaks for Deformable DETR:
- `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass.
- it also returns a stack of intermediate outputs and reference points from all decoding layers.
Args:
config: TestDetrConfig
"""
def __init__(self, config: TestDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layers = nn.ModuleList([TestDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
self.gradient_checkpointing = False
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.class_embed = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings=None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
The query embeddings that are passed into the decoder.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each self-attention layer.
reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*):
Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area.
spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of the feature maps.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*):
Indexes for the start of each feature level. In range `[0, sequence_length]`.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
intermediate = ()
intermediate_reference_points = ()
for idx, decoder_layer in enumerate(self.layers):
num_coordinates = reference_points.shape[-1]
if num_coordinates == 4:
reference_points_input = (
reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
)
elif reference_points.shape[-1] == 2:
reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]
else:
raise ValueError("Reference points' last dimension must be of size 2")
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
position_embeddings,
reference_points_input,
spatial_shapes,
spatial_shapes_list,
level_start_index,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
tmp = self.bbox_embed[idx](hidden_states)
num_coordinates = reference_points.shape[-1]
if num_coordinates == 4:
new_reference_points = tmp + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
elif num_coordinates == 2:
new_reference_points = tmp
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
raise ValueError(
f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}"
)
reference_points = new_reference_points.detach()
intermediate += (hidden_states,)
intermediate_reference_points += (reference_points,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# Keep batch_size as first dimension
intermediate = torch.stack(intermediate, dim=1)
intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
intermediate,
intermediate_reference_points,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if v is not None
)
return TestDetrDecoderOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate,
intermediate_reference_points=intermediate_reference_points,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
def build_position_encoding(config):
n_steps = config.d_model // 2
if config.position_embedding_type == "sine":
# TODO find a better way of exposing other arguments
position_embedding = TestDetrSinePositionEmbedding(n_steps, normalize=True)
elif config.position_embedding_type == "learned":
position_embedding = TestDetrLearnedPositionEmbedding(n_steps)
else:
raise ValueError(f"Not supported {config.position_embedding_type}")
return position_embedding
@auto_docstring(
custom_intro="""
The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
hidden-states without any specific head on top.
"""
)
| TestDetrDecoder |
python | cython__cython | Cython/Compiler/FlowControl.py | {
"start": 11940,
"end": 12097
} | class ____(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
| Argument |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 11396,
"end": 11656
} | class ____:
addheaders = []
def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
| MockOpener |
python | astropy__astropy | astropy/io/registry/core.py | {
"start": 13670,
"end": 14631
} | class ____(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
| UnifiedIORegistry |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_data_forwarding_details.py | {
"start": 982,
"end": 20997
} | class ____(DataForwardingDetailsEndpointTest):
method = "PUT"
def test_without_revamp_feature_flag_access(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "old_key"},
is_enabled=True,
)
with self.feature(
{
"organizations:data-forwarding-revamp-access": False,
"organizations:data-forwarding": True,
}
):
response = self.client.put(
reverse(self.endpoint, args=(self.organization.slug, data_forwarder.id))
)
assert response.status_code == 403
def test_without_data_forwarding_feature_flag_access(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "old_key"},
is_enabled=True,
)
with self.feature(
{
"organizations:data-forwarding-revamp-access": True,
"organizations:data-forwarding": False,
}
):
response = self.client.put(
reverse(self.endpoint, args=(self.organization.slug, data_forwarder.id))
)
assert response.status_code == 403
def test_update_data_forwarder(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "old_key"},
is_enabled=True,
)
# Verify initial state before update
assert data_forwarder.config == {"write_key": "old_key"}
assert data_forwarder.is_enabled
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "new_key"},
"is_enabled": False,
"enroll_new_projects": True,
"project_ids": [self.project.id],
}
with self.feature({"organizations:data-forwarding-revamp-access": True}):
response = self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
assert response.data["config"] == {"write_key": "new_key"}
assert not response.data["isEnabled"]
assert response.data["enrollNewProjects"]
data_forwarder.refresh_from_db()
assert data_forwarder.config == {"write_key": "new_key"}
assert not data_forwarder.is_enabled
assert data_forwarder.enroll_new_projects
def test_update_reenrolls_previously_enrolled_project(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project = self.create_project(organization=self.organization)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [project.id],
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
project_config = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project
)
assert project_config.is_enabled
def test_update_with_project_ids(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
project3 = self.create_project(organization=self.organization)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project1)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [project2.id, project3.id],
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
enrolled_projects = set(
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, is_enabled=True
).values_list("project_id", flat=True)
)
assert enrolled_projects == {project2.id, project3.id}
assert project1.id not in enrolled_projects
def test_update_unenroll_all_projects(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project1)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project2)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [],
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
enrolled_count = DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, is_enabled=True
).count()
assert enrolled_count == 0
def test_update_with_invalid_project_ids(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [99999, 88888], # Invalid project IDs
}
response = self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=400, **payload
)
assert "invalid project ids" in str(response.data).lower()
def test_update_with_project_write_bulk_enrollment(self) -> None:
"""Test bulk enrollment of multiple projects by project:write user"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
# Bulk enrollment only uses project_ids
payload = {
"project_ids": [project1.id, project2.id],
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
project_config1 = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project1
)
assert project_config1.is_enabled
project_config2 = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project2
)
assert project_config2.is_enabled
def test_update_project_overrides_with_project_write(self) -> None:
"""Test updating a single project's overrides and is_enabled by project:write user"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project = self.create_project(organization=self.organization)
self.create_data_forwarder_project(
data_forwarder=data_forwarder, project=project, overrides={"old": "value"}
)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
# Single project configuration uses project_id (singular)
payload = {
"project_id": project.id,
"overrides": {"new": "value"},
"is_enabled": False,
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
project_config = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project
)
assert project_config.overrides == {"new": "value"}
assert not project_config.is_enabled
def test_update_unenroll_projects_with_project_write(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project1)
self.create_data_forwarder_project(data_forwarder=data_forwarder, project=project2)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
payload: dict[str, list[int]] = {"project_ids": []}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
enrolled_count = DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, is_enabled=True
).count()
assert enrolled_count == 0
def test_update_with_project_write_checks_permissions(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
project1 = self.create_project(organization=self.organization, teams=[team1])
project2 = self.create_project(organization=self.organization, teams=[team2])
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[team1],
teamRole="admin",
)
self.login_as(user=user)
payload = {"project_ids": [project1.id, project2.id]}
response = self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=403, **payload
)
assert "insufficient access" in str(response.data).lower()
def test_update_requires_permission(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
user_without_permission = self.create_user()
self.login_as(user=user_without_permission)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "new_key"},
}
self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=403, **payload
)
def test_update_with_missing_project_id_for_project_write(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
# project:write path requires either project_ids or project_id
payload = {
"overrides": {"custom": "value"},
}
response = self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=400, **payload
)
assert "project_id" in str(response.data).lower()
def test_update_with_mixed_valid_and_invalid_project_ids(self) -> None:
"""Test bulk enrollment with invalid project IDs"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project = self.create_project(organization=self.organization)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
# Bulk enrollment should not include overrides
payload = {
"project_ids": [project.id, 99999],
}
response = self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=400, **payload
)
assert "invalid project ids" in str(response.data).lower()
assert "99999" in str(response.data)
def test_update_with_project_from_different_organization(self) -> None:
"""Test bulk enrollment rejects projects from different organization"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
# Create a project in a different organization
other_org = self.create_organization(name="Other Org")
other_project = self.create_project(organization=other_org)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
# Bulk enrollment should not include overrides
payload = {
"project_ids": [other_project.id],
}
response = self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=400, **payload
)
assert "invalid project ids" in str(response.data).lower()
def test_update_without_team_membership_denies_access(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[], # No team membership
)
self.login_as(user=user)
payload: dict[str, list[int]] = {
"project_ids": [],
}
self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=403, **payload
)
def test_update_single_project_creates_new_config(self) -> None:
"""Test that single project configuration can create a new config"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project = self.create_project(organization=self.organization)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member",
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
payload = {
"project_id": project.id,
"overrides": {"custom": "value"},
"is_enabled": True,
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
project_config = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project
)
assert project_config.overrides == {"custom": "value"}
assert project_config.is_enabled
def test_org_write_can_bulk_enroll(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="manager", # Has org:write
)
self.login_as(user=user)
payload = {
"project_ids": [project1.id, project2.id],
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
enrolled_projects = set(
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, is_enabled=True
).values_list("project_id", flat=True)
)
assert enrolled_projects == {project1.id, project2.id}
def test_org_write_can_update_project_overrides(self) -> None:
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "test_key"},
)
project = self.create_project(organization=self.organization)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="manager", # Has org:write
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
payload = {
"project_id": project.id,
"overrides": {"custom": "value"},
"is_enabled": True,
}
self.get_success_response(
self.organization.slug, data_forwarder.id, status_code=200, **payload
)
project_config = DataForwarderProject.objects.get(
data_forwarder=data_forwarder, project=project
)
assert project_config.overrides == {"custom": "value"}
assert project_config.is_enabled
def test_project_write_cannot_update_main_config(self) -> None:
"""Test that project:write users cannot update main data forwarder config"""
data_forwarder = self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "old_key"},
is_enabled=True,
)
user = self.create_user()
self.create_member(
user=user,
organization=self.organization,
role="member", # Has project:write but not org:write
teams=[self.team],
teamRole="admin",
)
self.login_as(user=user)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "new_key"},
"is_enabled": False,
}
self.get_error_response(
self.organization.slug, data_forwarder.id, status_code=400, **payload
)
# Config should remain unchanged
data_forwarder.refresh_from_db()
assert data_forwarder.config == {"write_key": "old_key"}
assert data_forwarder.is_enabled
@region_silo_test
| DataForwardingDetailsPutTest |
python | numba__numba | numba/experimental/function_type.py | {
"start": 1069,
"end": 1803
} | class ____(models.PrimitiveModel):
"""FunctionProtoModel describes the signatures of first-class functions
"""
def __init__(self, dmm, fe_type):
if isinstance(fe_type, FunctionType):
ftype = fe_type.ftype
elif isinstance(fe_type, FunctionPrototype):
ftype = fe_type
else:
raise NotImplementedError((type(fe_type)))
retty = dmm.lookup(ftype.rtype).get_value_type()
args = [dmm.lookup(t).get_value_type() for t in ftype.atypes]
be_type = ir.PointerType(ir.FunctionType(retty, args))
super(FunctionProtoModel, self).__init__(dmm, fe_type, be_type)
@register_model(FunctionType)
@register_model(UndefinedFunctionType)
| FunctionProtoModel |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 3529,
"end": 4210
} | class ____:
"""Since the serializer is replacing the `jina_pb2` to know how to exactly serialize messages, this is just a placeholder that
delegates the serializing and deserializing to the internal protobuf structure with no extra optimization.
"""
@staticmethod
def SerializeToString(x):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
return x.SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
ip = jina_pb2.JinaInfoProto()
ip.ParseFromString(x)
return ip
| JinaInfoProto |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.