language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/3300-3399/3385.Minimum Time to Break Locks II/Solution.py | {
"start": 0,
"end": 3884
} | class ____:
class Edge(NamedTuple):
src: int
dst: int
cap: int
flow: int
cost: int
class _Edge:
def __init__(self, dst: int, cap: int, cost: int) -> None:
self.dst = dst
self.cap = cap
self.cost = cost
self.rev: Optional[MCFGraph._Edge] = None
def __init__(self, n: int) -> None:
self._n = n
self._g: List[List[MCFGraph._Edge]] = [[] for _ in range(n)]
self._edges: List[MCFGraph._Edge] = []
def add_edge(self, src: int, dst: int, cap: int, cost: int) -> int:
assert 0 <= src < self._n
assert 0 <= dst < self._n
assert 0 <= cap
m = len(self._edges)
e = MCFGraph._Edge(dst, cap, cost)
re = MCFGraph._Edge(src, 0, -cost)
e.rev = re
re.rev = e
self._g[src].append(e)
self._g[dst].append(re)
self._edges.append(e)
return m
def get_edge(self, i: int) -> Edge:
assert 0 <= i < len(self._edges)
e = self._edges[i]
re = cast(MCFGraph._Edge, e.rev)
return MCFGraph.Edge(re.dst, e.dst, e.cap + re.cap, re.cap, e.cost)
def edges(self) -> List[Edge]:
return [self.get_edge(i) for i in range(len(self._edges))]
def flow(self, s: int, t: int, flow_limit: Optional[int] = None) -> Tuple[int, int]:
return self.slope(s, t, flow_limit)[-1]
def slope(
self, s: int, t: int, flow_limit: Optional[int] = None
) -> List[Tuple[int, int]]:
assert 0 <= s < self._n
assert 0 <= t < self._n
assert s != t
if flow_limit is None:
flow_limit = cast(int, sum(e.cap for e in self._g[s]))
dual = [0] * self._n
prev: List[Optional[Tuple[int, MCFGraph._Edge]]] = [None] * self._n
def refine_dual() -> bool:
pq = [(0, s)]
visited = [False] * self._n
dist: List[Optional[int]] = [None] * self._n
dist[s] = 0
while pq:
dist_v, v = heappop(pq)
if visited[v]:
continue
visited[v] = True
if v == t:
break
dual_v = dual[v]
for e in self._g[v]:
w = e.dst
if visited[w] or e.cap == 0:
continue
reduced_cost = e.cost - dual[w] + dual_v
new_dist = dist_v + reduced_cost
dist_w = dist[w]
if dist_w is None or new_dist < dist_w:
dist[w] = new_dist
prev[w] = v, e
heappush(pq, (new_dist, w))
else:
return False
dist_t = dist[t]
for v in range(self._n):
if visited[v]:
dual[v] -= cast(int, dist_t) - cast(int, dist[v])
return True
flow = 0
cost = 0
prev_cost_per_flow: Optional[int] = None
result = [(flow, cost)]
while flow < flow_limit:
if not refine_dual():
break
f = flow_limit - flow
v = t
while prev[v] is not None:
u, e = cast(Tuple[int, MCFGraph._Edge], prev[v])
f = min(f, e.cap)
v = u
v = t
while prev[v] is not None:
u, e = cast(Tuple[int, MCFGraph._Edge], prev[v])
e.cap -= f
assert e.rev is not None
e.rev.cap += f
v = u
c = -dual[s]
flow += f
cost += f * c
if c == prev_cost_per_flow:
result.pop()
result.append((flow, cost))
prev_cost_per_flow = c
return result
| MCFGraph |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 14453,
"end": 21087
} | class ____(test_util.TensorFlowTestCase):
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.subTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
x_tf = self.evaluate(array_ops.reverse_v2(x_np, []))
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 120, 3, 40, 5], dtype=np_dtype)
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(use_gpu=use_gpu, axis_dtype=axis_dtype):
x_tf = self.evaluate(
array_ops.reverse_v2(x_np,
constant_op.constant([0], dtype=axis_dtype)))
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 120, 3], [4, 5, 60]], dtype=np_dtype)
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(
reverse_f=reverse_f, use_gpu=use_gpu, axis_dtype=axis_dtype):
x_tf_1 = self.evaluate(
reverse_f(x_np, constant_op.constant([0], dtype=axis_dtype)))
x_tf_2 = self.evaluate(
reverse_f(x_np, constant_op.constant([-2], dtype=axis_dtype)))
x_tf_3 = self.evaluate(
reverse_f(x_np, constant_op.constant([1], dtype=axis_dtype)))
x_tf_4 = self.evaluate(
reverse_f(x_np, constant_op.constant([-1], dtype=axis_dtype)))
x_tf_5 = self.evaluate(
reverse_f(x_np, constant_op.constant([1, 0], dtype=axis_dtype)))
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This test covers the axis validation in the shape function
# (no eval())
def testInvalidAxis(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"is out of.* range"):
array_ops.reverse_v2(x_np, [-30])
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"is out of.* range"):
array_ops.reverse_v2(x_np, [2])
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
r"axis 0 specified more than once|axis 0 was repeated"):
array_ops.reverse_v2(x_np, [0, -2])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
#
# Note: this test passes placeholder as constant axis is validated
# in shape function (see testInvalidAxis)
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
@def_function.function
def func(ax):
return array_ops.reverse_v2(x_np, ax)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"is out of.*range"):
func([-30])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"is out of.*range"):
func([2])
with self.assertRaisesRegex(
(ValueError, errors_impl.InvalidArgumentError),
"(axis 0 specified more than once|canonicalized axis 0 was repeated.)"):
func([0, -2])
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
np.complex128,
np.array(b"").dtype.type, dtypes.bfloat16.as_numpy_dtype
]:
self._reverse1DimAuto(dtype)
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64,
np.int64, np.bool_, np.float16, np.float32, np.float64, np.complex64,
np.complex128,
np.array(b"").dtype.type, dtypes.bfloat16.as_numpy_dtype
]:
self._reverse2DimAuto(dtype)
def testReverseRowsOf3Channels(self):
"""Tests optimized code for reversing rows with last dim size = 3."""
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size,
use_gpu=True):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = self.evaluate(reverse_f(x_np, [1]))
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseRowsOf4Channels(self):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size,
use_gpu=True):
x_np = np.reshape(
np.arange(outer_size * middle_size * 4, dtype=np.float32),
newshape=(outer_size, middle_size, 4))
x_tf = self.evaluate(reverse_f(x_np, [1]))
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseColumnsOf3Channels(self):
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in list(range(50)) + [100000]:
for middle_size in (1, 2):
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size,
use_gpu=True):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = self.evaluate(reverse_f(x_np, [0]))
np_answer = x_np[::-1, :, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseInvalidShape(self):
x = np.ndarray(shape=[0, 1, 1])
v = array_ops.reverse_v2(x, axis=[1])
self.assertAllEqual(self.evaluate(v), v)
| ReverseV2Test |
python | falconry__falcon | falcon/asgi/stream.py | {
"start": 877,
"end": 17942
} | class ____:
"""File-like input object for reading the body of the request, if any.
This class implements coroutine functions for asynchronous reading or
iteration, but otherwise provides an interface similar to that defined by
:class:`io.IOBase`.
If the request includes a Content-Length header, the number of bytes in the
stream will be truncated to the length specified by the header. Otherwise,
the stream will yield data until the ASGI server indicates that no more
bytes are available.
For large request bodies, the preferred method of using the stream object is
as an asynchronous iterator. In this mode, each body chunk is simply yielded
in its entirety, as it is received from the ASGI server. Because no data is
buffered by the framework, this is the most memory-efficient way of reading
the request body::
# If the request body is empty or has already be consumed, the iteration
# will immediately stop without yielding any data chunks. Otherwise, a
# series of byte # strings will be yielded until the entire request
# body has been yielded or the client disconnects.
async for data_chunk in req.stream
pass
The stream object also supports asynchronous ``read()`` and
``readall()`` methods::
# Read all of the data at once; use only when you are confident
# that the request body is small enough to not eat up all of
# your memory. For small bodies, this is the most performant
# option.
data = await req.stream.readall()
# ...or call read() without arguments
data = await req.stream.read()
# ...or read the data in chunks. You may choose to read more
# or less than 32 KiB as shown in this example. But note that
# this approach will generally be less efficient as compared
# to async iteration, resulting in more usage and
# copying of memory.
while True:
data_chunk = await req.stream.read(32 * 1024)
if not data_chunk:
break
Warning:
Apps may not use both ``read()`` and the asynchronous iterator
interface to consume the same request body; the only time that
it is safe to do so is when one or the other method is used to
completely read the entire body *before* the other method is
even attempted. Therefore, it is important to always call
:meth:`~.exhaust` or :meth:`~.close` if a body has only been
partially read and the remaining data is to be ignored.
Note:
The stream object provides a convenient abstraction over the series of
body chunks contained in any ASGI "http.request" events received by the
app. As such, some request body data may be temporarily buffered in
memory during and between calls to read from the stream. The framework
has been designed to minimize the amount of data that must be buffered
in this manner.
Args:
receive (awaitable): ASGI awaitable callable that will yield a new
request event dictionary when one is available.
Keyword Args:
first_event (dict): First ASGI event received from the client,
if one was preloaded (default ``None``).
content_length (int): Expected content length of the stream, derived
from the Content-Length header in the request (if available).
"""
__slots__ = (
'_buffer',
'_bytes_remaining',
'_closed',
'_iteration_started',
'_pos',
'_receive',
)
_buffer: bytes
_bytes_remaining: int
_closed: bool
_iteration_started: bool
_pos: int
_receive: AsgiReceive
def __init__(
self,
receive: AsgiReceive,
first_event: AsgiEvent | None = None,
content_length: int | None = None,
) -> None:
self._closed = False
self._iteration_started = False
self._receive = receive
# NOTE(kgriffs): Outside of testing, first_event will always be set
# and we also assume a body is expected, otherwise why bother
# creating a stream object to read it? But just in case this
# object is created in other cases, use "in" here rather than
# EAFP.
if first_event and 'body' in first_event:
first_chunk: bytes = first_event['body']
else:
first_chunk = b''
if content_length is None:
self._buffer = first_chunk
# NOTE(kgriffs): If length is unknown we just set remaining bytes
# to a ridiculously high number so that we will keep reading
# until we get an event with more_body == False. We do not
# use sys.maxsize because 2**31 on 32-bit systems is not
# a large enough number (someone may have an API that accepts
# multi-GB payloads).
self._bytes_remaining = 2**63
else:
if len(first_chunk) > content_length:
self._buffer = first_chunk[:content_length]
else:
self._buffer = first_chunk
self._bytes_remaining = content_length - len(self._buffer)
self._pos = len(self._buffer)
if first_event and self._bytes_remaining:
# NOTE(kgriffs): Override if the event says there's no more data
if not ('more_body' in first_event and first_event['more_body']):
self._bytes_remaining = 0
def __aiter__(self) -> AsyncIterator[bytes]:
# NOTE(kgriffs): This returns an async generator, but that's OK because
# it also implements the iterator protocol defined in PEP 492, albeit
# in a more efficient way than a regular async iterator.
return self._iter_content()
# -------------------------------------------------------------------------
# These methods are included to improve compatibility with Python's
# standard "file-like" IO interface.
# -------------------------------------------------------------------------
# NOTE(kgriffs): According to the Python docs, NotImplementedError is not
# meant to be used to mean "not supported"; rather, the method should
# just be left undefined; hence we do not implement readline(),
# readlines(), __iter__(), __next__(), flush(), seek(),
# truncate(), __del__().
def fileno(self) -> NoReturn:
"""Raise an instance of OSError since a file descriptor is not used."""
raise OSError('This IO object does not use a file descriptor')
def isatty(self) -> bool:
"""Return ``False`` always."""
return False
def readable(self) -> bool:
"""Return ``True`` always."""
return True
def seekable(self) -> bool:
"""Return ``False`` always."""
return False
def writable(self) -> bool:
"""Return ``False`` always."""
return False
def tell(self) -> int:
"""Return the number of bytes read from the stream so far."""
return self._pos
@property
def closed(self) -> bool:
return self._closed
# -------------------------------------------------------------------------
@property
def eof(self) -> bool:
return not self._buffer and self._bytes_remaining == 0
def close(self) -> None:
"""Clear any buffered data and close this stream.
Once the stream is closed, any operation on it will
raise an instance of :class:`ValueError`.
As a convenience, it is allowed to call this method more than
once; only the first call, however, will have an effect.
"""
if not self._closed:
self._buffer = b''
self._bytes_remaining = 0
self._closed = True
async def exhaust(self) -> None:
"""Consume and immediately discard any remaining data in the stream."""
if self._closed:
raise ValueError(
'This stream is closed; no further operations on it are permitted.'
)
self._buffer = b''
while self._bytes_remaining > 0:
event = await self._receive()
if event['type'] == 'http.disconnect':
self._bytes_remaining = 0
else:
try:
num_bytes = len(event['body'])
except KeyError:
# NOTE(kgriffs): The ASGI spec states that 'body' is optional.
num_bytes = 0
self._bytes_remaining -= num_bytes
self._pos += num_bytes
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
# Immediately dereference the data so it can be discarded ASAP
event = None # type: ignore[assignment]
# NOTE(kgriffs): Ensure that if we read more than expected, this
# value is normalized to zero.
self._bytes_remaining = 0
async def readall(self) -> bytes:
"""Read and return all remaining data in the request body.
Warning:
Only use this method when you can be certain that you have
enough free memory for the entire request body, and that you
have configured your web server to limit request bodies to a
reasonable size (to guard against malicious requests).
Returns:
bytes: The request body data, or ``b''`` if the body is empty or
has already been consumed.
"""
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return b''
if self._buffer:
next_chunk = self._buffer
self._buffer = b''
chunks = [next_chunk]
else:
chunks = []
while self._bytes_remaining > 0:
event = await self._receive()
# PERF(kgriffs): Use try..except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
chunks.append(next_chunk)
self._bytes_remaining -= next_chunk_len
else:
# NOTE(kgriffs): Do not read more data than we are
# expecting. This *should* never happen if the
# server enforces the content-length header, but
# it is better to be safe than sorry.
chunks.append(next_chunk[: self._bytes_remaining])
self._bytes_remaining = 0
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
data = chunks[0] if len(chunks) == 1 else b''.join(chunks)
self._pos += len(data)
return data
async def read(self, size: int | None = None) -> bytes:
"""Read some or all of the remaining bytes in the request body.
Warning:
A size should always be specified, unless you can be certain that
you have enough free memory for the entire request body, and that
you have configured your web server to limit request bodies to a
reasonable size (to guard against malicious requests).
Warning:
Apps may not use both ``read()`` and the asynchronous iterator
interface to consume the same request body; the only time that
it is safe to do so is when one or the other method is used to
completely read the entire body *before* the other method is
even attempted. Therefore, it is important to always call
:meth:`~.exhaust` or :meth:`~.close` if a body has only been
partially read and the remaining data is to be ignored.
Keyword Args:
size (int): The maximum number of bytes to read. The actual
amount of data that can be read will depend on how much is
available, and may be smaller than the amount requested. If the
size is -1 or not specified, all remaining data is read and
returned.
Returns:
bytes: The request body data, or ``b''`` if the body is empty or
has already been consumed.
"""
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return b''
if size is None or size == -1:
return await self.readall()
if size <= 0:
return b''
if self._buffer:
num_bytes_available = len(self._buffer)
chunks = [self._buffer]
else:
num_bytes_available = 0
chunks = []
while self._bytes_remaining > 0 and num_bytes_available < size:
event = await self._receive()
# PERF(kgriffs): Use try..except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
chunks.append(next_chunk)
self._bytes_remaining -= next_chunk_len
num_bytes_available += next_chunk_len
else:
# NOTE(kgriffs): Do not read more data than we are
# expecting. This *should* never happen, but better
# safe than sorry.
chunks.append(next_chunk[: self._bytes_remaining])
self._bytes_remaining = 0
num_bytes_available += self._bytes_remaining
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
self._buffer = chunks[0] if len(chunks) == 1 else b''.join(chunks)
if num_bytes_available <= size:
data = self._buffer
self._buffer = b''
else:
data = self._buffer[:size]
self._buffer = self._buffer[size:]
self._pos += len(data)
return data
async def _iter_content(self) -> AsyncIterator[bytes]:
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return
if self._iteration_started:
raise OperationNotAllowed('This stream is already being iterated over.')
self._iteration_started = True
if self._buffer:
next_chunk = self._buffer
self._buffer = b''
self._pos += len(next_chunk)
yield next_chunk
while self._bytes_remaining > 0:
event = await self._receive()
# PERF(kgriffs): Use try...except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
# NOTE(kgriffs): No need to yield empty body chunks.
if next_chunk:
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
self._bytes_remaining -= next_chunk_len
self._pos += next_chunk_len
else:
# NOTE(kgriffs): We received more data than expected,
# so truncate to the expected length.
next_chunk = next_chunk[: self._bytes_remaining]
self._pos += self._bytes_remaining
self._bytes_remaining = 0
yield next_chunk
# NOTE(kgriffs): Per the ASGI spec, more_body is optional
# and should be considered False if not present.
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
# PERF(kgriffs): event.get() is more elegant, but uses a
# few more CPU cycles.
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
| BoundedStream |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/core.py | {
"start": 4537,
"end": 4765
} | class ____:
args: Any
kwargs: Any
# Plus two optional arguments for .xfail()
raises: Any = field(default=None)
reason: Any = field(default=None)
# TODO_DOCS link to not-yet-existent patch-dumping docs
| Example |
python | ray-project__ray | python/ray/serve/_private/version.py | {
"start": 486,
"end": 9283
} | class ____:
def __init__(
self,
code_version: Optional[str],
deployment_config: DeploymentConfig,
ray_actor_options: Optional[Dict],
placement_group_bundles: Optional[List[Dict[str, float]]] = None,
placement_group_strategy: Optional[str] = None,
max_replicas_per_node: Optional[int] = None,
route_prefix: Optional[str] = None,
):
if code_version is not None and not isinstance(code_version, str):
raise TypeError(f"code_version must be str, got {type(code_version)}.")
if code_version is None:
self.code_version = get_random_string()
else:
self.code_version = code_version
# Options for this field may be mutated over time, so any logic that uses this
# should access this field directly.
self.deployment_config = deployment_config
self.ray_actor_options = ray_actor_options
self.placement_group_bundles = placement_group_bundles
self.placement_group_strategy = placement_group_strategy
self.max_replicas_per_node = max_replicas_per_node
self.route_prefix = route_prefix
self.compute_hashes()
@classmethod
def from_deployment_version(
cls, deployment_version, deployment_config, route_prefix: Optional[str] = None
):
version_copy = deepcopy(deployment_version)
version_copy.deployment_config = deployment_config
version_copy.route_prefix = route_prefix
version_copy.compute_hashes()
return version_copy
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: Any) -> bool:
if not isinstance(other, DeploymentVersion):
return False
return self._hash == other._hash
def requires_actor_restart(self, new_version):
"""Determines whether the new version requires actors of the current version to
be restarted.
"""
return (
self.code_version != new_version.code_version
or self.ray_actor_options_hash != new_version.ray_actor_options_hash
or self.placement_group_options_hash
!= new_version.placement_group_options_hash
or self.max_replicas_per_node != new_version.max_replicas_per_node
)
def requires_actor_reconfigure(self, new_version):
"""Determines whether the new version requires calling reconfigure() on the
replica actor.
"""
return self.reconfigure_actor_hash != new_version.reconfigure_actor_hash
def requires_long_poll_broadcast(self, new_version):
"""Determines whether lightweightly updating an existing replica to the new
version requires broadcasting through long poll that the running replicas has
changed.
"""
return (
self.deployment_config.max_ongoing_requests
!= new_version.deployment_config.max_ongoing_requests
)
def compute_hashes(self):
# If these change, the controller will rolling upgrade existing replicas.
serialized_ray_actor_options = _serialize(self.ray_actor_options or {})
self.ray_actor_options_hash = crc32(serialized_ray_actor_options)
combined_placement_group_options = {}
if self.placement_group_bundles is not None:
combined_placement_group_options["bundles"] = self.placement_group_bundles
if self.placement_group_strategy is not None:
combined_placement_group_options["strategy"] = self.placement_group_strategy
serialized_placement_group_options = _serialize(
combined_placement_group_options
)
self.placement_group_options_hash = crc32(serialized_placement_group_options)
# Include app-level route prefix in the version hashes so changing
# it triggers an in-place reconfigure of running replicas.
serialized_route_prefix = _serialize(self.route_prefix)
# If this changes, DeploymentReplica.reconfigure() will call reconfigure on the
# actual replica actor
self.reconfigure_actor_hash = crc32(
serialized_route_prefix
+ self._get_serialized_options(
[DeploymentOptionUpdateType.NeedsActorReconfigure]
)
)
# Used by __eq__ in deployment state to either reconfigure the replicas or
# stop and restart them
self._hash = crc32(
self.code_version.encode("utf-8")
+ serialized_ray_actor_options
+ serialized_placement_group_options
+ str(self.max_replicas_per_node).encode("utf-8")
+ serialized_route_prefix
+ self._get_serialized_options(
[
DeploymentOptionUpdateType.NeedsReconfigure,
DeploymentOptionUpdateType.NeedsActorReconfigure,
]
)
)
def to_proto(self) -> bytes:
# TODO(simon): enable cross language user config
return DeploymentVersionProto(
code_version=self.code_version,
deployment_config=self.deployment_config.to_proto(),
ray_actor_options=json.dumps(self.ray_actor_options),
placement_group_bundles=json.dumps(self.placement_group_bundles)
if self.placement_group_bundles is not None
else "",
placement_group_strategy=self.placement_group_strategy
if self.placement_group_strategy is not None
else "",
max_replicas_per_node=self.max_replicas_per_node
if self.max_replicas_per_node is not None
else 0,
)
@classmethod
def from_proto(cls, proto: DeploymentVersionProto):
return DeploymentVersion(
proto.code_version,
DeploymentConfig.from_proto(proto.deployment_config),
json.loads(proto.ray_actor_options),
placement_group_bundles=(
json.loads(proto.placement_group_bundles)
if proto.placement_group_bundles
else None
),
placement_group_version=(
proto.placement_group_version if proto.placement_group_version else None
),
max_replicas_per_node=(
proto.max_replicas_per_node if proto.max_replicas_per_node else None
),
)
def _get_serialized_options(
self, update_types: List[DeploymentOptionUpdateType]
) -> bytes:
"""Returns a serialized dictionary containing fields of a deployment config that
should prompt a deployment version update.
"""
reconfigure_dict = {}
# TODO(aguo): Once we only support pydantic 2, we can remove this if check.
# In pydantic 2.0, `__fields__` has been renamed to `model_fields`.
fields = (
self.deployment_config.model_fields
if hasattr(self.deployment_config, "model_fields")
else self.deployment_config.__fields__
)
for option_name, field in fields.items():
option_weight = field.field_info.extra.get("update_type")
if option_weight in update_types:
reconfigure_dict[option_name] = getattr(
self.deployment_config, option_name
)
# If autoscaling config was changed, only broadcast to
# replicas if metrics_interval_s or look_back_period_s
# was changed, because the rest of the fields are only
# used in deployment state manager
if isinstance(reconfigure_dict[option_name], AutoscalingConfig):
reconfigure_dict[option_name] = reconfigure_dict[option_name].dict(
include={"metrics_interval_s", "look_back_period_s"}
)
elif isinstance(reconfigure_dict[option_name], BaseModel):
reconfigure_dict[option_name] = reconfigure_dict[option_name].dict()
# Can't serialize bytes. The request router class is already
# included in the serialized config as request_router_class.
if "request_router_config" in reconfigure_dict:
reconfigure_dict["request_router_config"].pop(
"_serialized_request_router_cls", None
)
if (
isinstance(self.deployment_config.user_config, bytes)
and "user_config" in reconfigure_dict
):
del reconfigure_dict["user_config"]
return self.deployment_config.user_config + _serialize(reconfigure_dict)
return _serialize(reconfigure_dict)
def _serialize(json_object):
return str.encode(json.dumps(json_object, sort_keys=True))
| DeploymentVersion |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/sql.py | {
"start": 928,
"end": 991
} | class ____(str, Enum):
SQL = "sql"
NL = "nl"
| SQLQueryMode |
python | walkccc__LeetCode | solutions/36. Valid Sudoku/36.py | {
"start": 0,
"end": 535
} | class ____:
def isValidSudoku(self, board: list[list[str]]) -> bool:
seen = set()
for i in range(9):
for j in range(9):
c = board[i][j]
if c == '.':
continue
if (c + '@row ' + str(i) in seen or
c + '@col ' + str(j) in seen or
c + '@box ' + str(i // 3) + str(j // 3) in seen):
return False
seen.add(c + '@row ' + str(i))
seen.add(c + '@col ' + str(j))
seen.add(c + '@box ' + str(i // 3) + str(j // 3))
return True
| Solution |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 44280,
"end": 44532
} | class ____:
"""
Entry type for a negative cache entry.
"""
reason: str
if TYPE_CHECKING:
_DispatchCacheEntry = Union[_DispatchCacheValidEntry, _DispatchCacheBypassEntry]
@dataclass(frozen=True, slots=True)
| _DispatchCacheBypassEntry |
python | doocs__leetcode | solution/1500-1599/1584.Min Cost to Connect All Points/Solution.py | {
"start": 0,
"end": 795
} | class ____:
def minCostConnectPoints(self, points: List[List[int]]) -> int:
n = len(points)
g = [[0] * n for _ in range(n)]
dist = [inf] * n
vis = [False] * n
for i, (x1, y1) in enumerate(points):
for j in range(i + 1, n):
x2, y2 = points[j]
t = abs(x1 - x2) + abs(y1 - y2)
g[i][j] = g[j][i] = t
dist[0] = 0
ans = 0
for _ in range(n):
i = -1
for j in range(n):
if not vis[j] and (i == -1 or dist[j] < dist[i]):
i = j
vis[i] = True
ans += dist[i]
for j in range(n):
if not vis[j]:
dist[j] = min(dist[j], g[i][j])
return ans
| Solution |
python | django__django | tests/fixtures_regress/models.py | {
"start": 615,
"end": 847
} | class ____(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, models.SET_NULL, null=True)
def __str__(self):
return self.name + " is owned by " + str(self.owner)
| Stuff |
python | plotly__plotly.py | plotly/graph_objs/cone/colorbar/_tickfont.py | {
"start": 233,
"end": 9903
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "cone.colorbar"
_path_str = "cone.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.cone.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.cone.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | ijl__orjson | test/test_fragment.py | {
"start": 209,
"end": 3001
} | class ____:
def test_fragment_fragment_eq(self):
assert orjson.Fragment(b"{}") != orjson.Fragment(b"{}")
def test_fragment_fragment_not_mut(self):
fragment = orjson.Fragment(b"{}")
with pytest.raises(AttributeError):
fragment.contents = b"[]"
assert orjson.dumps(fragment) == b"{}"
def test_fragment_repr(self):
assert repr(orjson.Fragment(b"{}")).startswith("<orjson.Fragment object at ")
def test_fragment_fragment_bytes(self):
assert orjson.dumps(orjson.Fragment(b"{}")) == b"{}"
assert orjson.dumps(orjson.Fragment(b"[]")) == b"[]"
assert orjson.dumps([orjson.Fragment(b"{}")]) == b"[{}]"
assert orjson.dumps([orjson.Fragment(b'{}"a\\')]) == b'[{}"a\\]'
def test_fragment_fragment_str(self):
assert orjson.dumps(orjson.Fragment("{}")) == b"{}"
assert orjson.dumps(orjson.Fragment("[]")) == b"[]"
assert orjson.dumps([orjson.Fragment("{}")]) == b"[{}]"
assert orjson.dumps([orjson.Fragment('{}"a\\')]) == b'[{}"a\\]'
def test_fragment_fragment_str_empty(self):
assert orjson.dumps(orjson.Fragment("")) == b""
def test_fragment_fragment_str_str(self):
assert orjson.dumps(orjson.Fragment('"str"')) == b'"str"'
def test_fragment_fragment_str_emoji(self):
assert orjson.dumps(orjson.Fragment('"🐈"')) == b'"\xf0\x9f\x90\x88"'
def test_fragment_fragment_str_array(self):
n = 8096
obj = [orjson.Fragment('"🐈"')] * n
ref = b"[" + b",".join(b'"\xf0\x9f\x90\x88"' for _ in range(n)) + b"]"
assert orjson.dumps(obj) == ref
def test_fragment_fragment_str_invalid(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(orjson.Fragment("\ud800")) # type: ignore
def test_fragment_fragment_bytes_invalid(self):
assert orjson.dumps(orjson.Fragment(b"\\ud800")) == b"\\ud800"
def test_fragment_fragment_none(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps([orjson.Fragment(None)]) # type: ignore
def test_fragment_fragment_args_zero(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment())
def test_fragment_fragment_args_two(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(b"{}", None)) # type: ignore
def test_fragment_fragment_keywords(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(contents=b"{}")) # type: ignore
def test_fragment_fragment_arg_and_keywords(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(b"{}", contents=b"{}")) # type: ignore
@pytest.mark.skipif(pd is None, reason="pandas is not installed")
| TestFragment |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/formatters/other.py | {
"start": 3983,
"end": 5034
} | class ____(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append(f'{indentation}({ttype}, {value!r}),\n')
before = TESTCASE_BEFORE % (''.join(rawbuf),)
during = ''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()
| TestcaseFormatter |
python | ashishps1__awesome-system-design-resources | implementations/python/load_balancing_algorithms/ip_hash.py | {
"start": 16,
"end": 569
} | class ____():
def __init__(self, servers):
self.servers = servers
def get_next_server(self, client_ip):
hash_value = hashlib.md5(client_ip.encode()).hexdigest()
index = int(hash_value, 16) % len(self.servers)
return self.servers[index]
# Example usage
servers = ["Server1", "Server2", "Server3"]
load_balancer = IPHash(servers)
client_ips = ["192.168.0.1", "192.168.0.2", "192.168.0.3", "192.168.0.4"]
for ip in client_ips:
server = load_balancer.get_next_server(ip)
print(f"Client {ip} -> {server}") | IPHash |
python | huggingface__transformers | tests/cli/test_serve.py | {
"start": 35381,
"end": 35977
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.port = 8042
thread = Thread(target=Serve, kwargs={"port": cls.port})
thread.daemon = True
thread.start()
def test_healthcheck(self):
"""Tests that the healthcheck endpoint works."""
response = _call_healthcheck(f"http://localhost:{self.port}")
self.assertIsNotNone(response, "Failed to connect to the server health endpoint.")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "ok"})
| ServeInfrastructureTest |
python | ansible__ansible | lib/ansible/plugins/shell/__init__.py | {
"start": 1148,
"end": 1351
} | class ____:
"""Internal type returned by shell subsystems that may require both an execution payload and a command (eg powershell)."""
command: str
input_data: bytes | None = None
| _ShellCommand |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trace.py | {
"start": 28170,
"end": 35424
} | class ____(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get_projects(
self,
request: HttpRequest,
organization: Organization | RpcOrganization,
force_global_perms: bool = False,
include_all_accessible: bool = False,
project_ids: set[int] | None = None,
project_slugs: set[str] | None = None,
) -> list[Project]:
"""The trace endpoint always wants to get all projects regardless of what's passed into the API
This is because a trace can span any number of projects in an organization. But we still want to
use the get_projects function to check for any permissions. So we'll just pass project_ids=-1 everytime
which is what would be sent if we wanted all projects"""
return super().get_projects(
request,
organization,
project_ids={-1},
project_slugs=None,
include_all_accessible=True,
)
def has_feature(self, organization: Organization, request: HttpRequest) -> bool:
return bool(
features.has("organizations:performance-view", organization, actor=request.user)
)
@staticmethod
def serialize_error(event: SnubaError) -> TraceError:
timestamp = datetime.fromisoformat(event["timestamp"]).timestamp()
if "timestamp_ms" in event and event["timestamp_ms"] is not None:
timestamp = datetime.fromisoformat(event["timestamp_ms"]).timestamp()
return {
"event_id": event["id"],
"issue_id": event["issue.id"],
"span": event["trace.span"],
"project_id": event["project.id"],
"project_slug": event["project"],
"title": event["title"],
"level": event["tags[level]"],
"message": event["message"],
"timestamp": timestamp,
"event_type": "error",
"generation": 0,
}
@staticmethod
def construct_parent_map(
events: Sequence[SnubaTransaction],
) -> dict[str, list[SnubaTransaction]]:
"""A mapping of span ids to their transactions
- Transactions are associated to each other via parent_span_id
"""
parent_map: dict[str, list[SnubaTransaction]] = defaultdict(list)
for item in events:
if not is_root(item):
parent_map[item["trace.parent_span"]].append(item)
return parent_map
@staticmethod
def construct_error_map(events: Sequence[SnubaError]) -> dict[str, list[SnubaError]]:
"""A mapping of span ids to their errors
key depends on the event type:
- Errors are associated to transactions via span_id
"""
parent_map: dict[str, list[SnubaError]] = defaultdict(list)
for item in events:
if item["trace.span"] is None:
continue
parent_map[item["trace.span"]].append(item)
return parent_map
@staticmethod
def record_analytics(
transactions: Sequence[SnubaTransaction], trace_id: str, user_id: int, org_id: int
) -> None:
with sentry_sdk.start_span(op="recording.analytics"):
len_transactions = len(transactions)
sentry_sdk.set_tag("trace_view.trace", trace_id)
sentry_sdk.set_tag("trace_view.transactions", len_transactions)
sentry_sdk.set_tag(
"trace_view.transactions.grouped", format_grouped_length(len_transactions)
)
set_span_attribute("trace_view.transactions", len_transactions)
projects: set[int] = set()
for transaction in transactions:
projects.add(transaction["project.id"])
len_projects = len(projects)
sentry_sdk.set_tag("trace_view.projects", len_projects)
sentry_sdk.set_tag("trace_view.projects.grouped", format_grouped_length(len_projects))
set_span_attribute("trace_view.projects", len_projects)
def get(self, request: Request, organization: Organization, trace_id: str) -> HttpResponse:
if not request.user.is_authenticated:
return Response(status=400)
if not self.has_feature(organization, request):
return Response(status=404)
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response(status=404)
# Detailed is deprecated now that we want to use spans instead
detailed = request.GET.get("detailed", "0") == "1"
update_snuba_params_with_timestamp(request, snuba_params)
limit = min(int(request.GET.get("limit", MAX_TRACE_SIZE)), 10_000)
event_id = (
request.GET.get("targetId") or request.GET.get("event_id") or request.GET.get("eventId")
)
# Only need to validate event_id as trace_id is validated in the URL
if event_id and not (is_event_id(event_id) or is_span_id(event_id)):
return Response({"detail": INVALID_ID_DETAILS.format("Event ID")}, status=400)
query_source = self.get_request_source(request)
with handle_query_errors():
transaction_params = create_transaction_params(
trace_id, snuba_params, query_source=query_source
)
transactions, errors = query_trace_data(
trace_id,
snuba_params,
transaction_params,
limit,
None,
query_source=query_source,
)
self.record_analytics(transactions, trace_id, request.user.id, organization.id)
warning_extra: dict[str, str] = {"trace": trace_id, "organization": organization.slug}
# Look for all root transactions in the trace (i.e., transactions
# that explicitly have no parent span id)
roots: list[SnubaTransaction] = []
for item in transactions:
if is_root(item):
roots.append(item)
else:
# This is okay because the query does an order by on -root
break
if len(roots) > 1:
sentry_sdk.set_tag("discover.trace-view.warning", "root.extra-found")
logger.warning(
"discover.trace-view.root.extra-found",
extra={"extra_roots": len(roots), **warning_extra},
)
return Response(
self.serialize(
limit,
transactions,
errors,
roots,
warning_extra,
event_id,
detailed,
query_source=self.get_request_source(request),
)
)
@abc.abstractmethod
def serialize(
self,
limit: int,
transactions: Sequence[SnubaTransaction],
errors: Sequence[SnubaError],
roots: Sequence[SnubaTransaction],
warning_extra: dict[str, str],
event_id: str | None,
detailed: bool = False,
query_source: QuerySource | None = None,
) -> Any:
raise NotImplementedError
@region_silo_endpoint
| OrganizationEventsTraceEndpointBase |
python | celery__celery | t/unit/app/test_app.py | {
"start": 60243,
"end": 60541
} | class ____:
def test_strtobool(self):
for s in ('false', 'no', '0'):
assert not defaults.strtobool(s)
for s in ('true', 'yes', '1'):
assert defaults.strtobool(s)
with pytest.raises(TypeError):
defaults.strtobool('unsure')
| test_defaults |
python | pandas-dev__pandas | asv_bench/benchmarks/algos/isin.py | {
"start": 3130,
"end": 3809
} | class ____:
params = [
[np.float64, np.object_],
[
1_300,
2_000,
7_000,
8_000,
70_000,
80_000,
750_000,
900_000,
],
["inside", "outside"],
]
param_names = ["dtype", "size", "title"]
def setup(self, dtype, size, title):
self.values = np.random.rand(size)
self.series = Series(self.values).astype(dtype)
np.random.shuffle(self.values)
if title == "outside":
self.values = self.values + 0.1
def time_isin(self, dtype, size, title):
self.series.isin(self.values)
| IsinWithRandomFloat |
python | pandas-dev__pandas | pandas/core/indexes/datetimelike.py | {
"start": 13038,
"end": 29219
} | class ____(DatetimeIndexOpsMixin, ABC):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
_data: DatetimeArray | TimedeltaArray
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
@property
def unit(self) -> TimeUnit:
return self._data.unit
def as_unit(self, unit: TimeUnit) -> Self:
"""
Convert to a dtype with the given unit resolution.
This method is for converting the dtype of a ``DatetimeIndex`` or
``TimedeltaIndex`` to a new dtype with the given unit
resolution/precision.
Parameters
----------
unit : {'s', 'ms', 'us', 'ns'}
Returns
-------
same type as self
Converted to the specified unit.
See Also
--------
Timestamp.as_unit : Convert to the given unit.
Timedelta.as_unit : Convert to the given unit.
DatetimeIndex.as_unit : Convert to the given unit.
TimedeltaIndex.as_unit : Convert to the given unit.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.DatetimeIndex(["2020-01-02 01:02:03.004005006"])
>>> idx
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
dtype='datetime64[ns]', freq=None)
>>> idx.as_unit("s")
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta(["1 day 3 min 2 us 42 ns"])
>>> tdelta_idx
TimedeltaIndex(['1 days 00:03:00.000002042'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.as_unit("s")
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
"""
arr = self._data.as_unit(unit)
return type(self)._simple_new(arr, name=self.name)
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self._name)
@property
def values(self) -> np.ndarray:
# NB: For Datetime64TZ this is lossy
data = self._data._ndarray
data = data.view()
data.flags.writeable = False
return data
@doc(DatetimeIndexOpsMixin.shift)
def shift(self, periods: int = 1, freq=None) -> Self:
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = to_offset(freq)
offset = periods * freq
return self + offset
if periods == 0 or len(self) == 0:
# GH#14811 empty case
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
result = self._data._generate_range(
start=start, end=end, periods=None, freq=self.freq, unit=self.unit
)
return type(self)._simple_new(result, name=self.name)
@cache_readonly
@doc(DatetimeLikeArrayMixin.inferred_freq)
def inferred_freq(self) -> str | None:
return self._data.inferred_freq
# --------------------------------------------------------------------
# Set Operation Methods
@cache_readonly
def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
tick = Timedelta(freq).as_unit(self.unit)._value
rng = range(self[0]._value, self[-1]._value + tick, tick)
return RangeIndex(rng)
def _can_range_setop(self, other) -> bool:
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
def _wrap_range_setop(self, other, res_i8) -> Self:
new_freq = None
if not len(res_i8):
# RangeIndex defaults to step=1, which we don't want.
new_freq = self.freq
elif isinstance(res_i8, RangeIndex):
new_freq = to_offset(
Timedelta(res_i8.step, unit=self.unit).as_unit(self.unit)
)
# TODO(GH#41493): we cannot just do
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
# because test_setops_preserve_freq fails with _validate_frequency raising.
# This raising is incorrect, as 'on_freq' is incorrect. This will
# be fixed by GH#41493
res_values = res_i8.values.view(self._data._ndarray.dtype)
result = type(self._data)._simple_new(
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
# incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
# "Union[dtype[datetime64], DatetimeTZDtype]"
res_values,
dtype=self.dtype, # type: ignore[arg-type]
freq=new_freq, # type: ignore[arg-type]
)
return cast("Self", self._wrap_setop_result(other, result))
def _range_intersect(self, other, sort) -> Self:
# Dispatch to RangeIndex intersection logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.intersection(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _range_union(self, other, sort) -> Self:
# Dispatch to RangeIndex union logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.union(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _intersection(self, other: Index, sort: bool = False) -> Index:
"""
intersection specialized to the case with matching dtypes and both non-empty.
"""
other = cast("DatetimeTimedeltaMixin", other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
# At this point we should have result.dtype == self.dtype
# and type(result) is type(self._data)
result = self._wrap_setop_result(other, result)
# error: "Index" has no attribute "_with_freq"; maybe "_with_infer"?
return result._with_freq(None)._with_freq("infer") # type: ignore[attr-defined]
else:
return self._fast_intersect(other, sort)
def _fast_intersect(self, other, sort):
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
result = left._values[lslice]
return result
def _can_fast_intersect(self, other: Self) -> bool:
# Note: we only get here with len(self) > 0 and len(other) > 0
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
# Note we are assuming away Ticks, as those go through _range_intersect
# GH#42104
return self.freq.n == 1
def _can_fast_union(self, other: Self) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
# only reached via union_many
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other: Self, sort=None) -> Self:
# Caller is responsible for ensuring self and other are non-empty
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
result = type(self)._simple_new(dates, name=self.name)
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
assert isinstance(dates, type(self._data))
# error: Item "ExtensionArray" of "ExtensionArray |
# ndarray[Any, Any]" has no attribute "_freq"
assert dates._freq == self.freq # type: ignore[union-attr]
result = type(self)._simple_new(dates)
return result
else:
return left
def _union(self, other, sort):
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
assert self.dtype == other.dtype
if self._can_range_setop(other):
return self._range_union(other, sort=sort)
if self._can_fast_union(other):
result = self._fast_union(other, sort=sort)
# in the case with sort=None, the _can_fast_union check ensures
# that result.freq == self.freq
return result
else:
return super()._union(other, sort)._with_freq("infer")
# --------------------------------------------------------------------
# Join Methods
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
"""
freq = None
if self._can_fast_union(other):
freq = self.freq
return freq
def _wrap_join_result(
self,
joined,
other,
lidx: npt.NDArray[np.intp] | None,
ridx: npt.NDArray[np.intp] | None,
how: JoinHow,
) -> tuple[Self, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
assert other.dtype == self.dtype, (other.dtype, self.dtype)
join_index, lidx, ridx = super()._wrap_join_result(
joined, other, lidx, ridx, how
)
join_index._data._freq = self._get_join_freq(other)
return join_index, lidx, ridx
def _get_engine_target(self) -> np.ndarray:
# engine methods and libjoin methods need dt64/td64 values cast to i8
return self._data._ndarray.view("i8")
def _from_join_target(self, result: np.ndarray):
# view e.g. i8 back to M8[ns]
result = result.view(self._data._ndarray.dtype)
return self._data._from_backing_data(result)
# --------------------------------------------------------------------
# List-like Methods
def _get_delete_freq(self, loc: int | slice | Sequence[int]):
"""
Find the `freq` for self.delete(loc).
"""
freq = None
if self.freq is not None:
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
# error: Incompatible types in assignment (expression has
# type "Union[slice, ndarray]", variable has type
# "Union[int, slice, Sequence[int]]")
loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
np.asarray(loc, dtype=np.intp), len(self)
)
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
return freq
def _get_insert_freq(self, loc: int, item):
"""
Find the `freq` for self.insert(loc, item).
"""
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if self.freq is not None:
# freq can be preserved on edge cases
if self.size:
if item is NaT:
pass
elif loc in (0, -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if isinstance(self.freq, Tick):
# all TimedeltaIndex cases go through here; is_on_offset
# would raise TypeError
freq = self.freq
elif self.freq.is_on_offset(item):
freq = self.freq
return freq
@doc(NDArrayBackedExtensionIndex.delete)
def delete(self, loc) -> Self:
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
@doc(NDArrayBackedExtensionIndex.insert)
def insert(self, loc: int, item):
result = super().insert(loc, item)
if isinstance(result, type(self)):
# i.e. parent class method did not cast
result._data._freq = self._get_insert_freq(loc, item)
return result
# --------------------------------------------------------------------
# NDArray-Like Methods
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> Self:
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
| DatetimeTimedeltaMixin |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_base_aws.py | {
"start": 2315,
"end": 5435
} | class ____:
def test_default_parameters(self):
op = FakeDynamoDBSensor(task_id="fake_task_id")
msg = "Attention! Changes in default parameters might produce breaking changes in multiple sensors"
assert op.aws_conn_id == "aws_default", msg
assert op.region_name is None, msg
assert op.verify is None, msg
assert op.botocore_config is None, msg
def test_parameters(self):
op = FakeDynamoDBSensor(
task_id="fake-task-id",
aws_conn_id=TEST_CONN,
region_name="eu-central-1",
verify=False,
botocore_config={"read_timeout": 777, "connect_timeout": 42},
)
assert op.aws_conn_id == TEST_CONN
assert op.region_name == "eu-central-1"
assert op.verify is False
assert op.botocore_config == {"read_timeout": 777, "connect_timeout": 42}
hook = op.hook
assert isinstance(hook, FakeDynamoDbHook)
assert hook.aws_conn_id == op.aws_conn_id
assert hook._region_name == op.region_name
assert hook._verify == op.verify
assert hook._config.read_timeout == 777
assert hook._config.connect_timeout == 42
@pytest.mark.db_test
@pytest.mark.parametrize(
"op_kwargs",
[
pytest.param(
{
"aws_conn_id": TEST_CONN,
"region_name": "eu-central-1",
"verify": False,
"botocore_config": {"read_timeout": 777, "connect_timeout": 42},
},
id="all-params-provided",
),
pytest.param({}, id="default-only"),
],
)
def test_execute(self, dag_maker, op_kwargs):
with dag_maker("test_aws_base_sensor", serialized=True):
FakeDynamoDBSensor(task_id="fake-task-id", **op_kwargs, poke_interval=1)
dagrun = dag_maker.create_dagrun(logical_date=timezone.utcnow())
tis = {ti.task_id: ti for ti in dagrun.task_instances}
tis["fake-task-id"].run()
def test_no_aws_hook_class_attr(self):
class NoAwsHookClassSensor(AwsBaseSensor): ...
error_match = r"Class attribute 'NoAwsHookClassSensor\.aws_hook_class' should be set"
with pytest.raises(AttributeError, match=error_match):
NoAwsHookClassSensor(task_id="fake-task-id")
def test_aws_hook_class_wrong_hook_type(self):
class WrongHookSensor(AwsBaseSensor):
aws_hook_class = BaseHook
error_match = r"Class attribute 'WrongHookSensor.aws_hook_class' is not a subclass of AwsGenericHook"
with pytest.raises(AttributeError, match=error_match):
WrongHookSensor(task_id="fake-task-id")
def test_aws_hook_class_class_instance(self):
class SoWrongSensor(AwsBaseSensor):
aws_hook_class = FakeDynamoDbHook()
error_match = r"Class attribute 'SoWrongSensor.aws_hook_class' is not a subclass of AwsGenericHook"
with pytest.raises(AttributeError, match=error_match):
SoWrongSensor(task_id="fake-task-id")
| TestAwsBaseSensor |
python | apache__airflow | providers/google/tests/integration/google/cloud/transfers/test_mssql_to_gcs.py | {
"start": 1468,
"end": 3419
} | class ____:
def setup_method(self):
os.environ["AIRFLOW_CONN_MSSQL_DEFAULT"] = AIRFLOW_CONN_MSSQL_DEFAULT
hook = MsSqlHook()
conn = hook.get_conn()
hook.set_autocommit(conn, True)
self.cursor = conn.cursor()
self.cursor.execute(f"""CREATE TABLE {TEST_TABLE_ID} (
PersonID int,
LastName varchar(255),
FirstName varchar(255),
Address varchar(255),
City varchar(255)
)""")
q = f"""INSERT INTO {TEST_TABLE_ID} (
PersonID, LastName, FirstName, Address, City
) VALUES
(0, 'Airflow', 'Apache', '1000 N West Street, Suite 1200', 'Wilmington, NC, USA')
"""
# raise Exception(q)
self.cursor.execute(q)
def teardown_method(self):
self.cursor.execute(f"DROP TABLE {TEST_TABLE_ID}")
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_execute(self, gcs_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MSSQLToGCSOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=f"SELECT * FROM {TEST_TABLE_ID}",
bucket=BUCKET,
filename=JSON_FILENAME,
)
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False, metadata=None):
assert bucket == BUCKET
assert JSON_FILENAME.format(0) == obj
assert mime_type == "application/json"
assert gzip == GZIP
with open(tmp_filename, "rb") as file:
assert b"".join(NDJSON_LINES) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(context=mock.MagicMock())
| TestMsSqlToGoogleCloudStorageOperator |
python | realpython__materials | dwitter-part-1/source_code_final/dwitter/apps.py | {
"start": 36,
"end": 146
} | class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "dwitter"
| DwitterConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dynamic_partitions_request.py | {
"start": 227,
"end": 939
} | class ____(
NamedTuple(
"_AddDynamicPartitionsRequest",
[
("partitions_def_name", str),
("partition_keys", Sequence[str]),
],
)
):
"""A request to add partitions to a dynamic partitions definition, to be evaluated by a sensor or schedule."""
def __new__(
cls,
partitions_def_name: str,
partition_keys: Sequence[str],
):
return super().__new__(
cls,
partitions_def_name=check.str_param(partitions_def_name, "partitions_def_name"),
partition_keys=check.list_param(partition_keys, "partition_keys", of_type=str),
)
@whitelist_for_serdes
@public
| AddDynamicPartitionsRequest |
python | fastai__fastai | fastai/medical/imaging.py | {
"start": 1475,
"end": 1670
} | class ____(TensorImage):
"Inherits from `TensorImage` and converts the `pixel_array` into a `TensorDicom`"
_show_args = {'cmap':'gray'}
# %% ../../nbs/60_medical.imaging.ipynb 15
| TensorDicom |
python | walkccc__LeetCode | solutions/411. Minimum Unique Word Abbreviation/411.py | {
"start": 0,
"end": 1704
} | class ____:
def minAbbreviation(self, target: str, dictionary: list[str]) -> str:
m = len(target)
def getMask(word: str) -> int:
# mask[i] = 0 := target[i] == word[i]
# mask[i] = 1 := target[i] != word[i]
# e.g. target = "apple"
# word = "blade"
# mask = 11110
mask = 0
for i, c in enumerate(word):
if c != target[i]:
mask |= 1 << m - 1 - i
return mask
masks = [getMask(word) for word in dictionary if len(word) == m]
if not masks:
return str(m)
abbrs = []
def getAbbr(cand: int) -> str:
abbr = []
replacedCount = 0
for i, c in enumerate(target):
if cand >> m - 1 - i & 1:
# If cand[i] = 1, `abbr` should show the original character.
if replacedCount:
abbr += str(replacedCount)
abbr.append(c)
replacedCount = 0
else:
# If cand[i] = 0, `abbr` can be replaced.
replacedCount += 1
if replacedCount:
abbr.append(str(replacedCount))
return ''.join(abbr)
# all the candidate representation of the target
for cand in range(2**m):
# All the masks have at lease one bit different from the candidate.
if all(cand & mask for mask in masks):
abbr = getAbbr(cand)
abbrs.append(abbr)
def getAbbrLen(abbr: str) -> int:
abbrLen = 0
i = 0
j = 0
while i < len(abbr):
if abbr[j].isalpha():
j += 1
else:
while j < len(abbr) and abbr[j].isdigit():
j += 1
abbrLen += 1
i = j
return abbrLen
return min(abbrs, key=lambda x: getAbbrLen(x))
| Solution |
python | django__django | tests/test_runner/tests.py | {
"start": 27882,
"end": 28367
} | class ____(unittest.TestCase):
def test_setup_databases(self):
"""
setup_databases() doesn't fail with dummy database backend.
"""
tested_connections = db.ConnectionHandler({})
with mock.patch("django.test.utils.connections", new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
| DummyBackendTest |
python | ray-project__ray | python/ray/air/util/object_extensions/pandas.py | {
"start": 414,
"end": 3461
} | class ____(pd.api.extensions.ExtensionArray):
"""Implements the Pandas extension array interface for the Arrow object array"""
def __init__(self, values: collections.abc.Iterable[typing.Any]):
vals = list(values)
self.values = np.empty(len(vals), dtype=object)
self.values[:] = vals
@classmethod
def _from_sequence(
cls,
scalars: collections.abc.Sequence[typing.Any],
*,
dtype: typing.Union[Dtype, None] = None,
copy: bool = False,
) -> "PythonObjectArray":
return PythonObjectArray(scalars)
@classmethod
def _from_factorized(
cls, values: collections.abc.Sequence[typing.Any], original: "PythonObjectArray"
) -> "PythonObjectArray":
return PythonObjectArray(values)
def __getitem__(self, item: PositionalIndexer) -> typing.Any:
return self.values[item]
def __setitem__(self, key, value) -> None:
self.values[key] = value
def __len__(self) -> int:
return len(self.values)
def __eq__(self, other: object) -> ArrayLike:
if isinstance(other, PythonObjectArray):
return self.values == other.values
elif isinstance(other, np.ndarray):
return self.values == other
else:
return NotImplemented
def to_numpy(
self,
dtype: typing.Union["npt.DTypeLike", None] = None,
copy: bool = False,
na_value: object = lib.no_default,
) -> np.ndarray:
result = self.values
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
return PythonObjectDtype()
@property
def nbytes(self) -> int:
return self.values.nbytes
def __arrow_array__(self, type=None):
return ray.air.util.object_extensions.arrow.ArrowPythonObjectArray.from_objects(
self.values
)
def isna(self) -> np.ndarray:
return pd.isnull(self.values)
def take(
self,
indices: TakeIndexer,
*,
allow_fill: bool = False,
fill_value: typing.Any = None,
) -> "PythonObjectArray":
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = pd.core.algorithms.take(
self.values, indices, allow_fill=allow_fill, fill_value=fill_value
)
return self._from_sequence(result, dtype=self.dtype)
def copy(self) -> "PythonObjectArray":
return PythonObjectArray(self.values)
@classmethod
def _concat_same_type(
cls, to_concat: collections.abc.Sequence["PythonObjectArray"]
) -> "PythonObjectArray":
values_to_concat = [element.values for element in to_concat]
return cls(np.concatenate(values_to_concat))
@PublicAPI(stability="alpha")
@pd.api.extensions.register_extension_dtype
| PythonObjectArray |
python | PrefectHQ__prefect | src/prefect/blocks/abstract.py | {
"start": 3877,
"end": 4917
} | class ____(ABC, Generic[T]): # not a block
"""
Represents a job run in an external system. Allows waiting
for the job run's completion and fetching its results.
"""
@property
def logger(self) -> LoggerOrAdapter:
"""
Returns a logger based on whether the JobRun
is called from within a flow or task run context.
If a run context is present, the logger property returns a run logger.
Else, it returns a default logger labeled with the class's name.
Returns:
The run logger or a default logger with the class's name.
"""
try:
return get_run_logger()
except MissingContextError:
return get_logger(self.__class__.__name__)
@abstractmethod
async def wait_for_completion(self) -> Logger:
"""
Wait for the job run to complete.
"""
@abstractmethod
async def fetch_result(self) -> T:
"""
Retrieve the results of the job run and return them.
"""
| JobRun |
python | ApeWorX__ape | src/ape_console/plugin.py | {
"start": 545,
"end": 3205
} | class ____(Magics):
@cached_property
def ipython(self):
if ipython := get_ipython():
return ipython
raise ValueError("Must be called from an IPython session.")
@line_magic
def ape(self, line: str = ""):
"""
Run Ape CLI commands within an ``ape console`` session.
Usage example::
%ape accounts list
"""
runner = CliRunner()
if "console" in [x.strip("\"' \t\n") for x in shlex.split(line)]:
# Prevent running console within console because usually bad
# stuff happens when you try to do this.
raise ValueError("Unable to run `console` within a console.")
result = runner.invoke(cli, line)
if result.output:
click.echo(result.output)
return result
@line_magic
def bal(self, line: str = ""):
"""
Show an account balance in human-readable form.
Usage example::
account = accounts.load("me")
%bal account
"""
if not line:
raise ValueError("Missing argument.")
provider = ape.networks.provider
ecosystem = provider.network.ecosystem
result = eval(line, self.ipython.user_global_ns, self.ipython.user_ns)
if isinstance(result, str) and result.startswith("0x"):
address = result
else:
# Handles accounts, ENS, integers, BaseAddress, and aliases.
address = ManagerAccessMixin.account_manager.resolve_address(result) or f"{result}"
decimals = ecosystem.fee_token_decimals
symbol = ecosystem.fee_token_symbol
balance = provider.get_balance(address)
return f"{round(balance / 10**decimals, 8)} {symbol}"
def custom_exception_handler(self, etype, value, tb, tb_offset=None):
project = self.user_ns["project"]
if isinstance(project, LocalProject):
path = project.path
else:
# This happens if assigned the variable `project` in your session
# to something other than ``ape.project``.
path = ManagerAccessMixin.local_project.path
if not handle_ape_exception(value, [path]):
logger.error(Abort.from_ape_exception(value).format_message())
def load_ipython_extension(ipython):
ipython.register_magics(ApeConsoleMagics)
ipython.set_custom_exc((ApeException,), custom_exception_handler)
# This prevents displaying a user's home directory
# ever when using `ape console`.
ipython.display_formatter.formatters["text/plain"].for_type(
Path, lambda x, *args, **kwargs: rich_print(clean_path(x))
)
| ApeConsoleMagics |
python | catalyst-team__catalyst | catalyst/callbacks/control_flow.py | {
"start": 1435,
"end": 3628
} | class ____:
def __init__(self, loaders: LOADERS, reverse_condition: bool):
if isinstance(loaders, str):
loaders = [loaders]
if not isinstance(loaders, (list, tuple, dict, OrderedDict)):
raise ValueError(
"'loaders' type should be one of - str, "
"Sequence[str], Mapping[str, int] or "
"Mapping[str, Sequence[int]]! "
f"(got {type(loaders)})"
)
self.loaders = loaders
self.reverse_condition = reverse_condition
# extra conditions precomputing
if isinstance(self.loaders, (list, tuple)):
self.loaders = sorted(set(self.loaders)) # ignore duplicates
elif isinstance(self.loaders, (dict, OrderedDict)):
ignore_list = {}
for loader, epochs in self.loaders.items():
if isinstance(epochs, (int, float)):
ignore_list[loader] = [int(epochs)]
else:
try:
ignore_list[loader] = []
for num in sorted(set(epochs)):
to_add = int(num)
ignore_list[loader].append(to_add)
except (ValueError, TypeError):
raise ValueError(
"'ignore_list' should be a dict where "
"keys is a int/float/List[int]/Tuple[int]!"
)
self._ignore_list = ignore_list
def __call__(self, epoch, loader):
# sequence of loaders
if isinstance(self.loaders, (list, tuple)):
if self.reverse_condition:
return loader not in self.loaders
else:
return loader in self.loaders
# loader: ignore epoch or epochs
elif isinstance(self.loaders, (dict, OrderedDict)):
if self.reverse_condition:
return epoch not in (
self._ignore_list.get(loader) or {} # {loader: [epoch]}.get(loader)
)
else:
return epoch in (self._ignore_list.get(loader) or {})
| _LoaderFilterFn |
python | django__django | tests/migrations/test_writer.py | {
"start": 1745,
"end": 1826
} | class ____(enum.Enum):
A = _("a-value")
B = _("value-b")
| TextTranslatedEnum |
python | numba__numba | numba/typed/dictobject.py | {
"start": 2215,
"end": 2638
} | class ____(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('meminfo', _meminfo_dictptr),
('data', types.voidptr), # ptr to the C dict
]
super(DictModel, self).__init__(dmm, fe_type, members)
@register_model(DictItemsIterableType)
@register_model(DictKeysIterableType)
@register_model(DictValuesIterableType)
@register_model(DictIteratorType)
| DictModel |
python | django__django | tests/serializers/models/base.py | {
"start": 3814,
"end": 3887
} | class ____(models.Model):
parent_data = models.IntegerField()
| BaseModel |
python | django__django | tests/admin_views/admin.py | {
"start": 12755,
"end": 12804
} | class ____(admin.ModelAdmin):
pass
| PictureAdmin |
python | pyodide__pyodide | tools/backport.py | {
"start": 8441,
"end": 11639
} | class ____:
"""The changelog information for a particular release of Pyodide.
Introduced by ##. Ends when there is a ##.
header:
Other than the unreleased section we don't actually bother parsing out
the changelog. So for the "prelude" and "rest" sections, this is
actually all the content.
For the unreleased and patch_release sections, this is only the content
up to the first entry or subsection. So that should include just the `##
Unreleased` line and a blank line or two.
sections:
The list of sections.
cur_section:
Parser state.
pr_index:
For the unreleased section, we populate this with information about
where the release note for each PR is. Populated by create_pr_index().
"""
header: list[str] = field(default_factory=list)
sections: list[ChangelogSection] = field(default_factory=list)
cur_section: ChangelogSection = field(default_factory=ChangelogSection)
pr_index: dict[int, PrChangelogIndex] = field(default_factory=dict)
def get_text(self) -> str:
"""Unparse the section"""
header = ""
if self.header:
header = "\n".join(self.header) + "\n"
return header + "".join(x.get_text() for x in self.sections)
def append(self, line: str) -> None:
"""Main parsing logic."""
if line.startswith("### "):
self.finish_section()
if self.cur_section or line.startswith(("-", "### ")):
self.cur_section.append(line)
else:
self.header.append(line)
def append_lines(self, lines: list[str]) -> None:
for line in lines:
self.append(line)
def finish_section(self) -> None:
"""If cur_section is nonempty, add it to entries. Then empty out cur_entry"""
if self.cur_section:
self.cur_section.finish_paragraph()
self.sections.append(self.cur_section)
self.cur_section = ChangelogSection()
def create_pr_index(self) -> None:
PR_NUMBER_RE = re.compile(r"{pr}`[0-9]+`")
for subsection_idx, subsection in enumerate(self.sections):
for paragraph_idx, paragraph in enumerate(subsection.paragraphs):
for entry_idx, entry in enumerate(paragraph.entries):
pr_strs = PR_NUMBER_RE.findall(entry.get_text())
is_unique = len(pr_strs) == 1
for pr_str in pr_strs:
pr = int(pr_str[5:-1])
self.pr_index[pr] = PrChangelogIndex(
subsection_idx, paragraph_idx, entry_idx, is_unique
)
def delete_entry(self, pr_changelog_index: PrChangelogIndex) -> None:
subsection = self.sections[pr_changelog_index.subsection]
paragraph = subsection.paragraphs[pr_changelog_index.paragraph]
del paragraph.entries[pr_changelog_index.entry]
if not paragraph.entries:
del subsection.paragraphs[pr_changelog_index.paragraph]
if not subsection.paragraphs:
del self.sections[pr_changelog_index.subsection]
@dataclass
| ChangelogVersion |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec20.py | {
"start": 746,
"end": 1031
} | class ____(Generic[P2]):
def __init__(self, cb: Callable[P2, Any]) -> None: ...
def m1(self) -> X[int, Concatenate[float, P2]]: ...
y1 = Y(x4)
reveal_type(y1, expected_text="Y[(x: X[int, ...])]")
y2 = y1.m1()
reveal_type(y2, expected_text="X[int, (float, x: X[int, ...])]")
| Y |
python | realpython__materials | python-unittest/test_even.py | {
"start": 384,
"end": 843
} | class ____(unittest.TestCase):
def test_even_number(self):
for number in [2, 4, 6, -8, -10, -12]:
with self.subTest(number=number):
self.assertEqual(is_even(number), True)
def test_odd_number(self):
for number in [1, 3, 5, -7, -9, -11]:
with self.subTest(number=number):
self.assertEqual(is_even(number), False)
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestIsEven |
python | Pylons__pyramid | src/pyramid/authorization.py | {
"start": 1016,
"end": 1058
} | class ____(_ACLAllowed):
pass
| ACLAllowed |
python | kamyu104__LeetCode-Solutions | Python/design-log-storage-system.py | {
"start": 169,
"end": 905
} | class ____(object):
def __init__(self):
self.__logs = []
self.__granularity = {'Year': 4, 'Month': 7, 'Day': 10, \
'Hour': 13, 'Minute': 16, 'Second': 19}
def put(self, id, timestamp):
"""
:type id: int
:type timestamp: str
:rtype: void
"""
self.__logs.append((id, timestamp))
def retrieve(self, s, e, gra):
"""
:type s: str
:type e: str
:type gra: str
:rtype: List[int]
"""
i = self.__granularity[gra]
begin = s[:i]
end = e[:i]
return sorted(id for id, timestamp in self.__logs \
if begin <= timestamp[:i] <= end)
| LogSystem |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 66972,
"end": 68102
} | class ____(parser_test_base.ParserTestBase):
def test_import(self):
self.check(
"""
import mod # type: ignore
def f(x: mod.attr) -> None: ...
""",
"""
import mod
def f(x: mod.attr) -> None: ...""",
)
def test_from_import(self):
src = textwrap.dedent("""
from mod import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, options=self.options)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
def test_relative_import(self):
src = textwrap.dedent("""
from . import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, options=self.options)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
def test_relative_import_parent(self):
src = textwrap.dedent("""
from .. import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, options=self.options)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
| ImportTypeIgnoreTest |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 58985,
"end": 68710
} | class ____(PatchTSMixerPreTrainedModel):
r"""
`PatchTSMixer` for forecasting application.
Args:
config (`PatchTSMixerConfig`):
Configuration.
Returns:
`None`.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__(config)
self.loss = config.loss
self.use_return_dict = config.use_return_dict
self.prediction_channel_indices = config.prediction_channel_indices
self.num_parallel_samples = config.num_parallel_samples
if config.loss == "mse":
self.distribution_output = None
else:
dim = config.prediction_length
distribution_output_map = {
"student_t": StudentTOutput,
"normal": NormalOutput,
"negative_binomial": NegativeBinomialOutput,
}
output_class = distribution_output_map.get(config.distribution_output)
if output_class is not None:
self.distribution_output = output_class(dim=dim)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.model = PatchTSMixerModel(config)
self.head = PatchTSMixerForPredictionHead(
config=config,
distribution_output=self.distribution_output,
)
# Initialize weights and apply final processing
if config.post_init:
self.post_init()
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
observed_mask: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = False,
return_loss: bool = True,
return_dict: Optional[bool] = None,
) -> PatchTSMixerForPredictionOutput:
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to predict
the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,
for classification or regression tasks, it denotes the appropriate context values of the time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is
greater than 1.
observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,:
`(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*):
Target values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT
required for a pretraining task.
For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want
to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter,
pass the target data with all channels, as channel Filtering for both prediction and target will be
manually applied before the loss computation.
return_loss (`bool`, *optional*):
Whether to return the loss in the `forward` call.
"""
if self.loss == "mse":
loss = nn.MSELoss(reduction="mean")
elif self.loss == "nll":
loss = nll
else:
raise ValueError("Invalid loss function: Allowed values: mse and nll")
return_dict = return_dict if return_dict is not None else self.use_return_dict
# past_values: tensor [batch_size x context_length x num_input_channels]
model_output = self.model(
past_values,
observed_mask=observed_mask,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
) # model_output: [batch_size x nvars x num_patch x d_model]
if isinstance(model_output, tuple):
model_output = PatchTSMixerModelOutput(*model_output)
# tensor [batch_size x prediction_length x num_input_channels]
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if self.prediction_channel_indices is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(
y_hat,
loc=model_output.loc[..., self.prediction_channel_indices],
scale=model_output.scale[..., self.prediction_channel_indices],
)
if future_values is not None and return_loss is True:
loss_val = loss(
distribution,
future_values[..., self.prediction_channel_indices],
)
# take average of the loss
loss_val = weighted_average(loss_val)
else:
y_hat = (
y_hat * model_output.scale[..., self.prediction_channel_indices]
+ model_output.loc[..., self.prediction_channel_indices]
)
if future_values is not None and return_loss is True:
loss_val = loss(y_hat, future_values[..., self.prediction_channel_indices])
else:
if self.distribution_output:
distribution = self.distribution_output.distribution(
y_hat, loc=model_output.loc, scale=model_output.scale
)
if future_values is not None and return_loss is True:
loss_val = loss(distribution, future_values)
loss_val = weighted_average(loss_val)
else:
y_hat = y_hat * model_output.scale + model_output.loc
if future_values is not None and return_loss is True:
loss_val = loss(y_hat, future_values)
if self.prediction_channel_indices is not None:
loc = model_output.loc[..., self.prediction_channel_indices]
scale = model_output.scale[..., self.prediction_channel_indices]
else:
loc = model_output.loc
scale = model_output.scale
if not return_dict:
return tuple(
v
for v in [
loss_val,
y_hat,
model_output.last_hidden_state,
model_output.hidden_states,
loc,
scale,
]
)
return PatchTSMixerForPredictionOutput(
loss=loss_val,
prediction_outputs=y_hat, # tensor [batch_size x prediction_length x num_input_channels]
last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model]
hidden_states=model_output.hidden_states,
loc=loc,
scale=scale,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
observed_mask: Optional[torch.Tensor] = None,
) -> SamplePatchTSMixerPredictionOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Args:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size,
number of samples, prediction_length, num_input_channels)`.
"""
# get number of samples
num_parallel_samples = self.num_parallel_samples
# get model output
outputs = self(
past_values=past_values,
future_values=None,
observed_mask=observed_mask,
output_hidden_states=False,
)
# get distribution
distribution = self.distribution_output.distribution(
outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale
)
# get samples: list of [batch_size x prediction_length x num_channels]
samples = [distribution.sample() for _ in range(num_parallel_samples)]
# stack tensors
samples = torch.stack(samples, dim=1) # [batch_size x num_samples x prediction_length x num_channels]
return SamplePatchTSMixerPredictionOutput(sequences=samples)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`].
"""
)
| PatchTSMixerForPrediction |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 4640,
"end": 5893
} | class ____(fixtures.DeclarativeMappedTest):
__sparse_driver_backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "a",
}
class B(A):
__tablename__ = "b"
id = Column(ForeignKey("a.id"), primary_key=True)
data = Column(Integer)
__mapper_args__ = {"polymorphic_identity": "b"}
@classmethod
def insert_data(cls, connection):
A, B = cls.classes("A", "B")
s = Session(connection)
s.add_all([B(data=5), B(data=7)])
s.commit()
def test_group_by(self):
B = self.classes.B
s = fixture_session()
rows = (
s.query(B.id.expressions[0], B.id.expressions[1], func.sum(B.data))
.group_by(*B.id.expressions)
.order_by(B.id.expressions[0])
.all()
)
eq_(rows, [(1, 1, 5), (2, 2, 7)])
| ColExpressionsTest |
python | numpy__numpy | numpy/_core/_ufunc_config.py | {
"start": 10716,
"end": 15130
} | class ____:
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
.. versionchanged:: 1.17.0
`errstate` is also usable as a function decorator, saving
a level of indentation if an entire function is wrapped.
.. versionchanged:: 2.0
`errstate` is now fully thread and asyncio safe, but may not be
entered more than once.
It is not safe to decorate async functions using ``errstate``.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> import numpy as np
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([nan, inf, inf])
>>> with np.errstate(divide='ignore'):
... np.arange(3) / 0.
array([nan, inf, inf])
>>> np.sqrt(-1)
np.float64(nan)
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> olderr = np.seterr(**olderr) # restore original state
"""
__slots__ = (
"_all",
"_call",
"_divide",
"_invalid",
"_over",
"_token",
"_under",
)
def __init__(self, *, call=_Unspecified,
all=None, divide=None, over=None, under=None, invalid=None):
self._token = None
self._call = call
self._all = all
self._divide = divide
self._over = over
self._under = under
self._invalid = invalid
def __enter__(self):
# Note that __call__ duplicates much of this logic
if self._token is not None:
raise TypeError("Cannot enter `np.errstate` twice.")
if self._call is _Unspecified:
extobj = _make_extobj(
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
else:
extobj = _make_extobj(
call=self._call,
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
self._token = _extobj_contextvar.set(extobj)
def __exit__(self, *exc_info):
_extobj_contextvar.reset(self._token)
def __call__(self, func):
# We need to customize `__call__` compared to `ContextDecorator`
# because we must store the token per-thread so cannot store it on
# the instance (we could create a new instance for this).
# This duplicates the code from `__enter__`.
@functools.wraps(func)
def inner(*args, **kwargs):
if self._call is _Unspecified:
extobj = _make_extobj(
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
else:
extobj = _make_extobj(
call=self._call,
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
_token = _extobj_contextvar.set(extobj)
try:
# Call the original, decorated, function:
return func(*args, **kwargs)
finally:
_extobj_contextvar.reset(_token)
return inner
| errstate |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/angle_helper.py | {
"start": 8798,
"end": 9446
} | class ____(FormatterDMS):
deg_mark = r"^\mathrm{h}"
min_mark = r"^\mathrm{m}"
sec_mark = r"^\mathrm{s}"
fmt_d = "$%d" + deg_mark + "$"
fmt_ds = r"$%d.%s" + deg_mark + "$"
# %s for sign
fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark+"$"
fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark+"$"
fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\,"
fmt_s_partial = "%02d" + sec_mark + "$"
fmt_ss_partial = "%02d.%s" + sec_mark + "$"
def __call__(self, direction, factor, values): # hour
return super().__call__(direction, factor, np.asarray(values) / 15)
| FormatterHMS |
python | TheAlgorithms__Python | data_structures/binary_tree/is_sum_tree.py | {
"start": 332,
"end": 1726
} | class ____:
data: int
left: Node | None = None
right: Node | None = None
def __iter__(self) -> Iterator[int]:
"""
>>> root = Node(2)
>>> list(root)
[2]
>>> root.left = Node(1)
>>> tuple(root)
(1, 2)
"""
if self.left:
yield from self.left
yield self.data
if self.right:
yield from self.right
def __len__(self) -> int:
"""
>>> root = Node(2)
>>> len(root)
1
>>> root.left = Node(1)
>>> len(root)
2
"""
return sum(1 for _ in self)
@property
def is_sum_node(self) -> bool:
"""
>>> root = Node(3)
>>> root.is_sum_node
True
>>> root.left = Node(1)
>>> root.is_sum_node
False
>>> root.right = Node(2)
>>> root.is_sum_node
True
"""
if not self.left and not self.right:
return True # leaf nodes are considered sum nodes
left_sum = sum(self.left) if self.left else 0
right_sum = sum(self.right) if self.right else 0
return all(
(
self.data == left_sum + right_sum,
self.left.is_sum_node if self.left else True,
self.right.is_sum_node if self.right else True,
)
)
@dataclass
| Node |
python | doocs__leetcode | lcof/面试题24. 反转链表/Solution.py | {
"start": 136,
"end": 418
} | class ____:
def reverseList(self, head: ListNode) -> ListNode:
dummy = ListNode()
curr = head
while curr:
next = curr.next
curr.next = dummy.next
dummy.next = curr
curr = next
return dummy.next
| Solution |
python | openai__openai-python | src/openai/resources/chat/chat.py | {
"start": 1382,
"end": 2314
} | class ____(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
return AsyncCompletions(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatWithStreamingResponse(self)
| AsyncChat |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 13346,
"end": 24071
} | class ____(torch.nn.Module):
def forward(self, getitem_12: "f32[8]", getitem_11: "f32[8]", getitem_10: "f32[8]", getitem_15: "f32[8]", getitem_14: "f32[8]", getitem_13: "f32[8]", tangents_1: "f32[8]"):
partitioned_bw_subgraph_0_1 = self.partitioned_bw_subgraph_0_0
invoke_subgraph_7 = torch.ops.higher_order.invoke_subgraph(partitioned_bw_subgraph_0_1, 'partitioned_bw_subgraph_0_0', getitem_13, getitem_14, getitem_15, tangents_1); partitioned_bw_subgraph_0_1 = getitem_13 = getitem_14 = getitem_15 = None
getitem_2: "f32[8]" = invoke_subgraph_7[0]
getitem_3: "f32[8]" = invoke_subgraph_7[1]; invoke_subgraph_7 = None
partitioned_bw_subgraph_0_0 = self.partitioned_bw_subgraph_0_0
invoke_subgraph_5 = torch.ops.higher_order.invoke_subgraph(partitioned_bw_subgraph_0_0, 'partitioned_bw_subgraph_0_0', getitem_10, getitem_11, getitem_12, tangents_1); partitioned_bw_subgraph_0_0 = getitem_10 = getitem_11 = getitem_12 = tangents_1 = None
getitem_6: "f32[8]" = invoke_subgraph_5[0]
getitem_7: "f32[8]" = invoke_subgraph_5[1]; invoke_subgraph_5 = None
add_1: "f32[8]" = torch.ops.aten.add.Tensor(getitem_2, getitem_6); getitem_2 = getitem_6 = None
add_2: "f32[8]" = torch.ops.aten.add.Tensor(getitem_3, getitem_7); getitem_3 = getitem_7 = None
return (add_1, add_2, None)
class partitioned_bw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "f32[8]", primals_1: "f32[8]", primals_2: "f32[8]", tangents_0: "f32[8]"):
mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_0, primals_2); tangents_0 = primals_2 = None
mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_3, 5); mul_3 = None
mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_0, primals_1)
cos: "f32[8]" = torch.ops.aten.cos.default(mul); mul = None
mul_5: "f32[8]" = torch.ops.aten.mul.Tensor(mul_4, cos); mul_4 = cos = None
mul_6: "f32[8]" = torch.ops.aten.mul.Tensor(mul_5, primals_0); primals_0 = None
mul_7: "f32[8]" = torch.ops.aten.mul.Tensor(mul_5, primals_1); mul_5 = primals_1 = None
return (mul_7, mul_6, None)
""",
ignore_empty_lines=True,
)
def test_buffer_mutation_works_under_no_grad(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("buf", torch.ones(8, requires_grad=False))
@nested_compile_region
def forward(self, x, y):
self.buf.add_(1)
return torch.mul(x, y).sin() * self.buf
mod_ref = Mod()
mod = Mod()
def fn(mod, x, y):
return mod(x, y) + mod(x, y)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = fn(mod_ref, x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
with torch.no_grad():
res = torch.compile(fn, fullgraph=True)(mod, x_clone, y_clone)
self.assertEqual(ref, res)
self.assertEqual(mod_ref.buf, mod.buf)
mod = Mod()
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
with torch.inference_mode():
res = torch.compile(fn, fullgraph=True)(mod, x_clone, y_clone)
self.assertEqual(ref, res)
self.assertEqual(mod_ref.buf, mod.buf)
mod = Mod()
x_clone = x.detach().clone().requires_grad_(False)
y_clone = y.detach().clone().requires_grad_(False)
res = torch.compile(fn, fullgraph=True)(mod, x_clone, y_clone)
self.assertEqual(ref, res)
self.assertEqual(mod_ref.buf, mod.buf)
def test_buffer_mutation_errors_under_training(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("buf", torch.ones(8, requires_grad=False))
@nested_compile_region
def forward(self, x, y):
self.buf.add_(1)
return torch.mul(x, y).sin() * self.buf
mod = Mod()
def fn(mod, x, y):
return mod(x, y) + mod(x, y)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"does not currently support training with in-place input or buffer mutations",
):
torch.compile(fn, backend="inductor", fullgraph=True)(mod, x, y)
def test_list(self):
@nested_compile_region
def gn(x, y):
return [torch.mul(x, y), torch.add(x, y)]
def fn(x, y):
lst = gn(x, y)
lst.append(torch.sin(x))
return lst[0] + lst[1] + lst[2]
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = fn(x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
res = torch.compile(fn, backend="inductor", fullgraph=True)(x_clone, y_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
def test_tuple_of_tuple(self):
@nested_compile_region
def gn(x, y):
return ((torch.mul(x, y),), torch.add(x, y))
def fn(x, y):
tup = gn(x, y)
return tup[0][0] + tup[1]
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = fn(x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
res = torch.compile(fn, backend="inductor", fullgraph=True)(x_clone, y_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
@unittest.skip("FunctionCtx ops is not cacheable right now")
def test_differing_strides_for_grad_outs(self):
class CustomOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return torch.sin(x)
@staticmethod
def backward(ctx, grad_out):
a = grad_out.view(12, 5)
return torch.cos(torch.reshape(a, (3, 4, 5)))
@nested_compile_region
def gn(x):
return CustomOp.apply(x)
def fn(x):
a = gn(x)
# Force stride changes so that backward view causes a failure if
# contiguous not called.
b = torch.permute(a, (0, 2, 1))
return b
x = torch.randn(3, 4, 5, requires_grad=True)
ref = torch.permute(gn(x), (0, 2, 1))
x_clone = x.clone().detach().requires_grad_(True)
opt_fn = torch.compile(fn, backend="aot_eager")
res = opt_fn(x_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
@requires_cuda_and_triton
def test_sdpa(self):
@nested_compile_region
def gn(q, k, v):
return torch.nn.functional.scaled_dot_product_attention(
q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True
)
def fn(q, k, v):
with torch.nn.attention.sdpa_kernel(
[torch.nn.attention.SDPBackend.FLASH_ATTENTION]
):
return gn(q, k, v)
q = torch.randn(
1, 1, 32, 32, device="cuda", dtype=torch.bfloat16, requires_grad=True
)
k = torch.randn(
1, 1, 32, 32, device="cuda", dtype=torch.bfloat16, requires_grad=True
)
v = torch.randn(
1, 1, 32, 32, device="cuda", dtype=torch.bfloat16, requires_grad=True
)
ref = fn(q, k, v)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
res = opt_fn(q, k, v)
res.sum().backward()
self.assertEqual(ref, res)
res = opt_fn(q, k, v)
res.sum().backward()
def test_symint_from_fwd_to_bwd(self):
@nested_compile_region
def gn(x, y):
a = torch.sum(x, (1,), keepdim=True).view(y.shape[1], y.shape[0])
return torch.matmul(a, y)
def fn(x, y):
return gn(x, y)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
x = torch.randn(64, 1, requires_grad=True)
y = torch.randn(8, 8, requires_grad=True)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
x = torch.randn(256, 1, requires_grad=True)
y = torch.randn(16, 16, requires_grad=True)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
res.sum().backward()
x = torch.randn(16, 1, requires_grad=True)
y = torch.randn(4, 4, requires_grad=True)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
res.sum().backward()
@inductor_config.patch("fx_graph_cache", False)
def test_dropout_checks_joint_graph(self):
# `dropout` tests that joint graph passes (not just partitioner) is ran
# on the hop graphs. Inductor rng functionalization happens in the joint
# graph passes. Without running joint graph passes, we would get an
# error like AssertionError: should have been handled in
# replace_random.py
@nested_compile_region
def gn(x):
return torch.nn.functional.dropout(torch.sin(x), p=0.5)
@nested_compile_region
def hn(x):
return torch.sin(x)
def fn(x):
return gn(x) + hn(x)
x = torch.randn(8, requires_grad=True)
# Difficult to check the results here because we random does not match
# between eager and Triton.
res = torch.compile(fn, backend="inductor", fullgraph=True)(x) # noqa: F841
torch.compiler.reset()
backend = InductorAndRecordGraphs()
res = torch.compile(fn, backend=backend, fullgraph=True)(x)
res.sum().backward()
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(
backend.inductor_graphs[0].print_readable(print_output=False)
),
"""\
| GraphModule |
python | facelessuser__pymdown-extensions | pymdownx/util.py | {
"start": 5050,
"end": 11220
} | class ____(InlineProcessor):
"""Processor for handling complex nested patterns such as strong and em matches."""
PATTERNS = [] # type: list[PatSeqItem]
def build_single(self, m: re.Match[str], tag: str, full_recursion: bool, idx: int) -> etree.Element:
"""Return single tag."""
el1 = etree.Element(tag)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, full_recursion, idx)
return el1
def build_double(self, m: re.Match[str], tags: str, full_recursion: bool, idx: int) -> etree.Element:
"""Return double tag."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el2, None, full_recursion, idx)
el1.append(el2)
if len(m.groups()) == 3:
text = m.group(3)
self.parse_sub_patterns(text, el1, el2, full_recursion, idx)
return el1
def build_double2(self, m: re.Match[str], tags: str, full_recursion: bool, idx: int) -> etree.Element:
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, full_recursion, idx)
text = m.group(3)
el1.append(el2)
self.parse_sub_patterns(text, el2, None, full_recursion, idx)
return el1
def parse_sub_patterns(
self,
data: str,
parent: etree.Element,
last: None | etree.Element,
full_recursion: bool,
idx: int
) -> None:
"""
Parses sub patterns.
`data` (`str`):
text to evaluate.
`parent` (`etree.Element`):
Parent to attach text and sub elements to.
`last` (`etree.Element`):
Last appended child to parent. Can also be None if parent has no children.
`idx` (`int`):
Current pattern index that was used to evaluate the parent.
"""
offset = 0
pos = 0
length = len(data)
while pos < length:
# Find the start of potential emphasis or strong tokens
if self.compiled_re.match(data, pos):
matched = False
# See if the we can match an emphasis/strong pattern
for index, item in enumerate(self.PATTERNS):
# Only evaluate patterns that are after what was used on the parent
if not full_recursion and index <= idx:
continue
m = item.pattern.match(data, pos)
if m:
# Append child nodes to parent
# Text nodes should be appended to the last
# child if present, and if not, it should
# be added as the parent's text node.
text = data[offset:m.start(0)]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
el = self.build_element(m, item.builder, item.tags, item.full_recursion, index)
parent.append(el)
last = el
# Move our position past the matched hunk
offset = pos = m.end(0)
matched = True
if not matched:
# We matched nothing, move on to the next character
pos += 1
else:
# Increment position as no potential emphasis start was found.
pos += 1
# Append any leftover text as a text node.
text = data[offset:]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
def build_element(
self,
m: re.Match[str],
builder: str,
tags: str,
full_recursion: bool,
index: int
) -> etree.Element:
"""Element builder."""
if builder == 'double2':
return self.build_double2(m, tags, full_recursion, index)
elif builder == 'double':
return self.build_double(m, tags, full_recursion, index)
else:
return self.build_single(m, tags, full_recursion, index)
def handleMatch( # type: ignore[override]
self,
m: re.Match[str],
data: str
) -> tuple[etree.Element | None, int | None, int | None]:
"""Parse patterns."""
el = None
start = None
end = None
for index, item in enumerate(self.PATTERNS):
m1 = item.pattern.match(data, m.start(0))
if m1:
start = m1.start(0)
end = m1.end(0)
el = self.build_element(m1, item.builder, item.tags, item.full_recursion, index)
break
return el, start, end
def deprecated(message: str, stacklevel: int = 2) -> Callable[..., Any]: # pragma: no cover
"""
Raise a `DeprecationWarning` when wrapped function/method is called.
Usage:
@deprecated("This method will be removed in version X; use Y instead.")
def some_method()"
pass
"""
def _wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
@wraps(func)
def _deprecated_func(*args: Any, **kwargs: Any) -> Any:
warnings.warn(
f"'{func.__name__}' is deprecated. {message}",
category=DeprecationWarning,
stacklevel=stacklevel
)
return func(*args, **kwargs)
return _deprecated_func
return _wrapper
def warn_deprecated(message: str, stacklevel: int = 2) -> None: # pragma: no cover
"""Warn deprecated."""
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=stacklevel
)
| PatternSequenceProcessor |
python | pytorch__pytorch | test/distributed/checkpoint/test_checkpoint.py | {
"start": 1533,
"end": 2298
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sharded: ShardedTensor = sharded_tensor.zeros(self.spec(), 4, 4)
self.regular = torch.nn.Parameter(torch.ones(4, 4))
self.extra_sharded: Optional[ShardedTensor] = None
self.extra_param: Optional[torch.nn.Parameter] = None
self._register_state_dict_hook(state_dict_hook)
def spec(self) -> ChunkShardingSpec:
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
return ChunkShardingSpec(
dim=0,
placements=[
f"rank:0/{device_type}:0",
f"rank:1/{device_type}:1",
],
)
| TestModule |
python | pydata__xarray | xarray/tests/test_indexing.py | {
"start": 18855,
"end": 19719
} | class ____:
def test_setitem(self) -> None:
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.zeros(10))
def test_sub_array(self) -> None:
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.CopyOnWriteArray)
child[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.arange(10))
assert_array_equal(child, np.zeros(5))
def test_index_scalar(self) -> None:
# regression test for GH1374
x = indexing.CopyOnWriteArray(np.array(["foo", "bar"]))
assert np.array(x[B[0]][B[()]]) == "foo"
| TestCopyOnWriteArray |
python | google__jax | docs/autodidax2_part1.py | {
"start": 7205,
"end": 8572
} | class ____:
primal : float
tangent : float
def add_dual(x : DualNumber, y: DualNumber) -> DualNumber:
return DualNumber(x.primal + y.primal, x.tangent + y.tangent)
def mul_dual(x : DualNumber, y: DualNumber) -> DualNumber:
return DualNumber(x.primal * y.primal, x.primal * y.tangent + x.tangent * y.primal)
def foo_dual(x : DualNumber) -> DualNumber:
return mul_dual(x, add_dual(x, DualNumber(3.0, 0.0)))
print (foo_dual(DualNumber(2.0, 1.0)))
# -
# That works! But rewriting `foo` to use the `_dual` versions of addition and
# multiplication was a bit tedious. Let's get back to the main program and use
# our interpretation machinery to do the rewrite automatically.
# ## JVP Interpreter
# We'll set up a new interpreter called `JVPInterpreter` ("JVP" for
# "Jacobian-vector product") which propagates these dual numbers instead of
# ordinary values. The `JVPInterpreter` has methods 'add' and 'mul' that operate
# on dual number. They cast constant arguments to dual numbers as needed by
# calling `JVPInterpreter.lift`. In our manually rewritten version above we did
# that by replacing the literal `3.0` with `DualNumber(3.0, 0.0)`.
# +
# This is like DualNumber above except that is also has a pointer to the
# interpreter it belongs to, which is needed to avoid "perturbation confusion"
# in higher order differentiation.
@dataclass
| DualNumber |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 19182,
"end": 20595
} | class ____(unittest.TestCase):
def _callFUT(self, context, iface):
from pyramid.traversal import find_interface
return find_interface(context, iface)
def test_it_interface(self):
baz = DummyContext()
bar = DummyContext(baz)
foo = DummyContext(bar)
root = DummyContext(foo)
root.__parent__ = None
root.__name__ = 'root'
foo.__parent__ = root
foo.__name__ = 'foo'
bar.__parent__ = foo
bar.__name__ = 'bar'
baz.__parent__ = bar
baz.__name__ = 'baz'
from zope.interface import Interface, directlyProvides
class IFoo(Interface):
pass
directlyProvides(root, IFoo)
result = self._callFUT(baz, IFoo)
self.assertEqual(result.__name__, 'root')
def test_it_class(self):
class DummyRoot:
def __init__(self, child):
self.child = child
baz = DummyContext()
bar = DummyContext(baz)
foo = DummyContext(bar)
root = DummyRoot(foo)
root.__parent__ = None
root.__name__ = 'root'
foo.__parent__ = root
foo.__name__ = 'foo'
bar.__parent__ = foo
bar.__name__ = 'bar'
baz.__parent__ = bar
baz.__name__ = 'baz'
result = self._callFUT(baz, DummyRoot)
self.assertEqual(result.__name__, 'root')
| FindInterfaceTests |
python | cookiecutter__cookiecutter | cookiecutter/extensions.py | {
"start": 447,
"end": 883
} | class ____(Extension):
"""Jinja2 extension to convert a Python object to JSON."""
def __init__(self, environment: Environment) -> None:
"""Initialize the extension with the given environment."""
super().__init__(environment)
def jsonify(obj: Any, indent: int = 4) -> str:
return json.dumps(obj, sort_keys=True, indent=indent)
environment.filters['jsonify'] = jsonify
| JsonifyExtension |
python | ipython__ipython | IPython/core/magics/execution.py | {
"start": 1733,
"end": 3632
} | class ____:
"""
Object returned by the timeit magic with info about the run.
Contains the following attributes:
loops: int
number of loops done per measurement
repeat: int
number of times the measurement was repeated
best: float
best execution time / number
all_runs : list[float]
execution time of each run (in s)
compile_time: float
time of statement compilation (s)
"""
def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.worst = worst
self.all_runs = all_runs
self.compile_time = compile_time
self._precision = precision
self.timings = [dt / self.loops for dt in all_runs]
@property
def average(self):
return math.fsum(self.timings) / len(self.timings)
@property
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
def __str__(self):
pm = '+-'
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
"\xb1".encode(sys.stdout.encoding)
pm = "\xb1"
except:
pass
return "{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
pm=pm,
runs=self.repeat,
loops=self.loops,
loop_plural="" if self.loops == 1 else "s",
run_plural="" if self.repeat == 1 else "s",
mean=_format_time(self.average, self._precision),
std=_format_time(self.stdev, self._precision),
)
def _repr_pretty_(self, p , cycle):
unic = self.__str__()
p.text("<TimeitResult : " + unic + ">")
| TimeitResult |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 139494,
"end": 154106
} | class ____(FieldChannelMixin, core.FieldDefWithoutScale):
r"""
Detail schema wrapper.
Definition object for a data field, its type and transformation of an encoding channel.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "detail"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Detail: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> Detail: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> Detail: ...
@overload
def bandPosition(self, _: float, /) -> Detail: ...
@overload
def bin(self, _: bool | Bin | Literal["binned"] | None, /) -> Detail: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Detail: ...
@overload
def field(self, _: str | RepeatRef, /) -> Detail: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Detail: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Detail: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Detail: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Detail: ...
@overload
def type(self, _: StandardType_T, /) -> Detail: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Detail |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_airbyte_utils.py | {
"start": 2735,
"end": 3026
} | class ____(AirbyteWorkspaceComponent):
workspace: Annotated[
MockAirbyteWorkspace,
dg.Resolver(
lambda context, model: MockAirbyteWorkspace(
**resolve_fields(model, MockAirbyteWorkspace, context)
)
),
]
| MockAirbyteComponent |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 1280,
"end": 1317
} | class ____(type):
pass
@attr.s
| Meta |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Operators.py | {
"start": 438,
"end": 2032
} | class ____(CtrlNode):
"""Generic node for performing any operation like A.fn(B)"""
_dtypes = [
'float64', 'float32', 'float16',
'int64', 'int32', 'int16', 'int8',
'uint64', 'uint32', 'uint16', 'uint8'
]
uiTemplate = [
('outputType', 'combo', {'values': ['no change', 'input A', 'input B'] + _dtypes , 'index': 0})
]
def __init__(self, name, fn):
self.fn = fn
CtrlNode.__init__(self, name, terminals={
'A': {'io': 'in'},
'B': {'io': 'in'},
'Out': {'io': 'out', 'bypass': 'A'}
})
def process(self, **args):
if isinstance(self.fn, tuple):
for name in self.fn:
try:
fn = getattr(args['A'], name)
break
except AttributeError as e:
pass
else:
raise e
else:
fn = getattr(args['A'], self.fn)
out = fn(args['B'])
if out is NotImplemented:
raise Exception("Operation %s not implemented between %s and %s" % (fn, str(type(args['A'])), str(type(args['B']))))
# Coerce dtype if requested
typ = self.stateGroup.state()['outputType']
if typ == 'no change':
pass
elif typ == 'input A':
out = out.astype(args['A'].dtype)
elif typ == 'input B':
out = out.astype(args['B'].dtype)
else:
out = out.astype(typ)
#print " ", fn, out
return {'Out': out}
| BinOpNode |
python | pypa__pip | tests/unit/test_base_command.py | {
"start": 1613,
"end": 1919
} | class ____(FakeCommand):
_name = "fake_unicode"
def run(self, options: Values, args: list[str]) -> int:
logging.getLogger("pip.tests").info(b"bytes here \xe9")
logging.getLogger("pip.tests").info(b"unicode here \xc3\xa9".decode("utf-8"))
return SUCCESS
| FakeCommandWithUnicode |
python | numba__numba | numba/cuda/codegen.py | {
"start": 11350,
"end": 12174
} | class ____(Codegen):
"""
This codegen implementation for CUDA only generates optimized LLVM IR.
Generation of PTX code is done separately (see numba.cuda.compiler).
"""
_library_class = CUDACodeLibrary
def __init__(self, module_name):
pass
def _create_empty_module(self, name):
ir_module = ir.Module(name)
ir_module.triple = CUDA_TRIPLE
ir_module.data_layout = nvvm.NVVM().data_layout
nvvm.add_ir_version(ir_module)
return ir_module
def _add_module(self, module):
pass
def magic_tuple(self):
"""
Return a tuple unambiguously describing the codegen behaviour.
"""
ctx = devices.get_context()
cc = ctx.device.compute_capability
return (runtime.runtime.get_version(), cc)
| JITCUDACodegen |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar01.py | {
"start": 345,
"end": 2077
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min" val="0"/>
<cfvo type="max" val="0"/>
<color rgb="FF638EC6"/>
</dataBar>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_float.py | {
"start": 3383,
"end": 6346
} | class ____(_BaseTestFloat):
test_cls = Float64
valid_dtype = (np.dtype(">f8"), np.dtype("<f8"))
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.float32),
)
valid_json_v2 = (
{"name": ">f8", "object_codec_id": None},
{"name": "<f8", "object_codec_id": None},
)
valid_json_v3 = ("float64",)
invalid_json_v2 = (
"|f8",
"float64",
"|i1",
)
invalid_json_v3 = (
"|f8",
"|i1",
{"name": "float64", "configuration": {"endianness": "little"}},
)
scalar_v2_params = (
(Float64(), 1.0),
(Float64(), -1.0),
(Float64(), "NaN"),
(Float64(), "Infinity"),
)
scalar_v3_params = (
(Float64(), 1.0),
(Float64(), -1.0),
(Float64(), "NaN"),
(Float64(), "Infinity"),
)
cast_value_params = (
(Float64(), 1.0, np.float64(1.0)),
(Float64(), -1.0, np.float64(-1.0)),
(Float64(), "NaN", np.float64("NaN")),
)
invalid_scalar_params = ((Float64(), {"set!"}),)
hex_string_params = (
("0x7ff8000000000000", np.nan),
("0x7ff8000000000001", np.nan),
("0x3ff0000000000000", 1.0),
)
item_size_params = (Float64(),)
def test_check_json_floatish_str() -> None:
"""Test the check_json_floatish_str function."""
from zarr.core.dtype.npy.common import check_json_floatish_str
# Test valid string floats
assert check_json_floatish_str("3.14")
assert check_json_floatish_str("0.0")
assert check_json_floatish_str("-2.5")
assert check_json_floatish_str("1.0")
# Test invalid cases
assert not check_json_floatish_str("not_a_number")
assert not check_json_floatish_str("")
assert not check_json_floatish_str(3.14) # actual float, not string
assert not check_json_floatish_str(42) # int
assert not check_json_floatish_str(None)
# Test that special cases still work via float() conversion
# (these will be handled by existing functions first in practice)
assert check_json_floatish_str("NaN")
assert check_json_floatish_str("Infinity")
assert check_json_floatish_str("-Infinity")
def test_string_float_from_json_scalar() -> None:
"""Test that string representations of floats can be parsed by from_json_scalar."""
# Test with Float32
dtype_instance = Float32()
result = dtype_instance.from_json_scalar("3.14", zarr_format=3)
assert abs(result - np.float32(3.14)) < 1e-6
assert isinstance(result, np.float32)
# Test other cases
result = dtype_instance.from_json_scalar("0.0", zarr_format=3)
assert result == np.float32(0.0)
result = dtype_instance.from_json_scalar("-2.5", zarr_format=3)
assert result == np.float32(-2.5)
# Test that it works for v2 format too
result = dtype_instance.from_json_scalar("1.5", zarr_format=2)
assert result == np.float32(1.5)
| TestFloat64 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 143480,
"end": 143963
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of ApproveVerifiableDomain"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The ID of the verifiable domain to approve."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| ApproveVerifiableDomainInput |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensors.py | {
"start": 7996,
"end": 8684
} | class ____(graphene.Mutation):
"""Enable a sensor to launch runs for a job based on external state change."""
Output = graphene.NonNull(GrapheneSensorOrError)
class Arguments:
sensor_selector = graphene.NonNull(GrapheneSensorSelector)
class Meta:
name = "StartSensorMutation"
@capture_error
@require_permission_check(Permissions.EDIT_SENSOR)
def mutate(self, graphene_info: ResolveInfo, sensor_selector):
selector = SensorSelector.from_graphql_input(sensor_selector)
assert_permission_for_sensor(graphene_info, Permissions.EDIT_SENSOR, selector)
return start_sensor(graphene_info, selector)
| GrapheneStartSensorMutation |
python | realpython__materials | python-enum/comparison.py | {
"start": 149,
"end": 592
} | class ____(Enum):
RED = 1
YELLOW = 2
GREEN = 3
PEDESTRIAN_RED = 1
PEDESTRIAN_GREEN = 3
red = AtlanticAveSemaphore.RED
print(f"{red is AtlanticAveSemaphore.RED = }")
print(f"{red is not AtlanticAveSemaphore.RED = }")
yellow = AtlanticAveSemaphore.YELLOW
print(f"{yellow is red = }")
print(f"{yellow is not red = }")
pedestrian_red = AtlanticAveSemaphore.PEDESTRIAN_RED
print(f"{red is pedestrian_red = }")
| EighthAveSemaphore |
python | walkccc__LeetCode | solutions/1406. Stone Game III/1406.py | {
"start": 0,
"end": 586
} | class ____:
def stoneGameIII(self, stoneValue: list[int]) -> str:
@functools.lru_cache(None)
def dp(i: int) -> int:
"""
Returns the maximum relative score Alice can make with stoneValue[i..n).
"""
if i == len(stoneValue):
return 0
res = -math.inf
summ = 0
for j in range(i, i + 3):
if j == len(stoneValue):
break
summ += stoneValue[j]
res = max(res, summ - dp(j + 1))
return res
score = dp(0)
if score == 0:
return 'Tie'
return 'Alice' if score > 0 else 'Bob'
| Solution |
python | django__django | tests/view_tests/views.py | {
"start": 10391,
"end": 10837
} | class ____(ExceptionReporter):
html_template_path = TEMPLATES_PATH / "my_technical_500.html"
text_template_path = TEMPLATES_PATH / "my_technical_500.txt"
def custom_reporter_class_view(request):
request.exception_reporter_class = CustomExceptionReporter
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
return technical_500_response(request, *exc_info)
| TemplateOverrideExceptionReporter |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/view_resolve_conflict_middle/package.py | {
"start": 228,
"end": 682
} | class ____(Package):
"""See view-resolve-conflict-top"""
has_code = False
version("0.1.0")
depends_on("view-file")
def install(self, spec, prefix):
bottom = spec["view-file"].prefix
os.mkdir(os.path.join(prefix, "bin"))
os.symlink(os.path.join(bottom, "bin", "x"), os.path.join(prefix, "bin", "x"))
os.symlink(os.path.join(bottom, "bin", "x"), os.path.join(prefix, "bin", "y"))
| ViewResolveConflictMiddle |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks.py | {
"start": 56979,
"end": 58426
} | class ____:
def test_is_terminal_true(self):
terminal_states = ["TERMINATING", "TERMINATED", "ERROR", "UNKNOWN"]
for state in terminal_states:
cluster_state = ClusterState(state, "")
assert cluster_state.is_terminal
def test_is_terminal_false(self):
non_terminal_states = ["PENDING", "RUNNING", "RESTARTING", "RESIZING"]
for state in non_terminal_states:
cluster_state = ClusterState(state, "")
assert not cluster_state.is_terminal
def test_is_terminal_with_nonexistent_life_cycle_state(self):
with pytest.raises(AirflowException):
ClusterState("blah", "")
def test_is_running(self):
running_states = ["RUNNING", "RESIZING"]
for state in running_states:
cluster_state = ClusterState(state, "")
assert cluster_state.is_running
def test_to_json(self):
cluster_state = ClusterState(CLUSTER_STATE, CLUSTER_STATE_MESSAGE)
expected = json.dumps(GET_CLUSTER_RESPONSE)
assert expected == cluster_state.to_json()
def test_from_json(self):
state = GET_CLUSTER_RESPONSE
expected = ClusterState(CLUSTER_STATE, CLUSTER_STATE_MESSAGE)
assert expected == ClusterState.from_json(json.dumps(state))
def create_aad_token_for_resource() -> AccessToken:
return AccessToken(expires_on=1575500666, token=TOKEN)
@pytest.mark.db_test
| TestClusterState |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_type_lookup.py | {
"start": 12314,
"end": 12674
} | class ____(AbstractFoo):
def __init__(self, x: int):
pass
def qux(self):
pass
@given(st.from_type(AbstractFoo))
def test_gen_abstract(foo):
# This requires that we correctly checked which of the subclasses
# could be resolved, rather than unconditionally using all of them.
assert isinstance(foo, ConcreteFoo2)
| ConcreteFoo2 |
python | kamyu104__LeetCode-Solutions | Python/knight-dialer.py | {
"start": 51,
"end": 1240
} | class ____(object):
def knightDialer(self, N):
"""
:type N: int
:rtype: int
"""
def matrix_expo(A, K):
result = [[int(i==j) for j in xrange(len(A))] \
for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b for a, b in itertools.izip(row, col)) % M \
for col in ZB] for row in A]
M = 10**9 + 7
T = [[0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0]]
return sum(map(sum, matrix_expo(T, N-1))) % M
# Time: O(n)
# Space: O(1)
| Solution |
python | pytorch__pytorch | torch/optim/rmsprop.py | {
"start": 551,
"end": 20612
} | class ____(Optimizer): # noqa: D101
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1e-2,
alpha: float = 0.99,
eps: float = 1e-8,
weight_decay: float = 0,
momentum: float = 0,
centered: bool = False,
capturable: bool = False,
foreach: Optional[bool] = None,
maximize: bool = False,
differentiable: bool = False,
) -> None: # noqa: D107
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= momentum:
raise ValueError(f"Invalid momentum value: {momentum}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if not 0.0 <= alpha:
raise ValueError(f"Invalid alpha value: {alpha}")
defaults = {
"lr": lr,
"momentum": momentum,
"alpha": alpha,
"eps": eps,
"centered": centered,
"weight_decay": weight_decay,
"capturable": capturable,
"foreach": foreach,
"maximize": maximize,
"differentiable": differentiable,
}
super().__init__(params, defaults)
def __setstate__(self, state): # noqa: D105
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("momentum", 0)
group.setdefault("centered", False)
group.setdefault("foreach", None)
group.setdefault("maximize", False)
group.setdefault("differentiable", False)
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = (
torch.tensor(
step_val, dtype=_get_scalar_dtype(), device=p.device
)
if group["capturable"]
else torch.tensor(step_val, dtype=_get_scalar_dtype())
)
def _init_group(
self,
group,
params_with_grad,
grads,
square_avgs,
momentum_buffer_list,
grad_avgs,
state_steps,
):
has_complex = False
for p in group["params"]:
if p.grad is None:
continue
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
if group["capturable"]
else torch.zeros((), dtype=_get_scalar_dtype())
)
state["square_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if group["momentum"] > 0:
state["momentum_buffer"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if group["centered"]:
state["grad_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
square_avgs.append(state["square_avg"])
state_steps.append(state["step"])
if group["momentum"] > 0:
momentum_buffer_list.append(state["momentum_buffer"])
if group["centered"]:
grad_avgs.append(state["grad_avg"])
return has_complex
@_use_grad_for_differentiable
def step(self, closure=None):
"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad: list[Tensor] = []
grads: list[Tensor] = []
square_avgs: list[Tensor] = []
grad_avgs: list[Tensor] = []
momentum_buffer_list: list[Tensor] = []
state_steps: list[Tensor] = []
has_complex = self._init_group(
group,
params_with_grad,
grads,
square_avgs,
momentum_buffer_list,
grad_avgs,
state_steps,
)
rmsprop(
params_with_grad,
grads,
square_avgs,
grad_avgs,
momentum_buffer_list,
state_steps,
lr=group["lr"],
alpha=group["alpha"],
eps=group["eps"],
weight_decay=group["weight_decay"],
momentum=group["momentum"],
centered=group["centered"],
foreach=group["foreach"],
maximize=group["maximize"],
differentiable=group["differentiable"],
capturable=group["capturable"],
has_complex=has_complex,
)
return loss
RMSprop.__doc__ = (
r"""Implements RMSprop algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \alpha \text{ (alpha)}, \: \gamma \text{ (lr)},
\: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\
&\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},
\: centered, \: \epsilon \text{ (epsilon)} \\
&\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \:
\textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}if \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t
\hspace{8mm} \\
&\hspace{5mm} \tilde{v_t} \leftarrow v_t \\
&\hspace{5mm}if \: centered \\
&\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\
&\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\
&\hspace{5mm}if \: \mu > 0 \\
&\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} +
g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\
&\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\
&\hspace{5mm} else \\
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} -
\gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to
`lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by G. Hinton.
and centered version `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
The implementation here takes the square root of the gradient average before
adding epsilon (note that TensorFlow interchanges these two operations). The effective
learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
is the scheduled learning rate and :math:`v` is the weighted moving average
of the squared gradient.
"""
+ rf"""
Args:
{_params_doc}
lr (float, Tensor, optional): learning rate (default: 1e-2)
alpha (float, optional): smoothing constant (default: 0.99)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
momentum (float, optional): momentum factor (default: 0)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
{_capturable_doc}
{_foreach_doc}
{_maximize_doc}
{_differentiable_doc}
"""
)
def _single_tensor_rmsprop(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
grad_avgs: list[Tensor],
momentum_buffer_list: list[Tensor],
state_steps: list[Tensor],
*,
lr: float,
alpha: float,
eps: float,
weight_decay: float,
momentum: float,
centered: bool,
maximize: bool,
differentiable: bool,
capturable: bool,
has_complex: bool,
) -> None:
if not torch.jit.is_scripting():
lr = _to_scalar(lr)
for i, param in enumerate(params):
step = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
if not (
param.device.type == step.device.type
and param.device.type in capturable_supported_devices
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
grad = grads[i]
grad = grad if not maximize else -grad
square_avg = square_avgs[i]
step += 1
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
is_complex_param = torch.is_complex(param)
if is_complex_param:
param = torch.view_as_real(param)
grad = torch.view_as_real(grad)
square_avg = torch.view_as_real(square_avg)
square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)
if centered:
grad_avg = grad_avgs[i]
if is_complex_param:
grad_avg = torch.view_as_real(grad_avg)
grad_avg.lerp_(grad, 1 - alpha)
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_()
else:
avg = square_avg.sqrt()
if differentiable:
avg = avg.add(eps)
else:
avg = avg.add_(eps)
if momentum > 0:
buf = momentum_buffer_list[i]
if is_complex_param:
buf = torch.view_as_real(buf)
buf.mul_(momentum).addcdiv_(grad, avg)
param.add_(buf, alpha=-lr)
else:
param.addcdiv_(grad, avg, value=-lr)
def _multi_tensor_rmsprop(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
grad_avgs: list[Tensor],
momentum_buffer_list: list[Tensor],
state_steps: list[Tensor],
*,
lr: float,
alpha: float,
eps: float,
weight_decay: float,
momentum: float,
centered: bool,
maximize: bool,
differentiable: bool,
capturable: bool,
has_complex: bool,
) -> None:
if len(params) == 0:
return
if differentiable:
raise AssertionError("_foreach ops don't support autograd")
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
if not all(
p.device.type == step.device.type
and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps, strict=True)
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
lr = _to_scalar(lr)
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, square_avgs, grad_avgs, momentum_buffer_list, state_steps] # type: ignore[list-item]
)
for (
(
grouped_params_,
grouped_grads_,
grouped_square_avgs_,
grouped_grad_avgs_,
grouped_momentum_buffer_list_,
grouped_state_steps_,
)
), _ in grouped_tensors.values():
grouped_params = cast(list[Tensor], grouped_params_)
grouped_grads = cast(list[Tensor], grouped_grads_)
grouped_square_avgs = cast(list[Tensor], grouped_square_avgs_)
grouped_state_steps = cast(list[Tensor], grouped_state_steps_)
if has_complex:
state_and_grads = [grouped_grads, grouped_square_avgs]
if momentum > 0:
grouped_momentum_buffer_list = cast(
list[Tensor], grouped_momentum_buffer_list_
)
state_and_grads.append(grouped_momentum_buffer_list)
if centered:
grouped_grad_avgs = cast(list[Tensor], grouped_grad_avgs_)
state_and_grads.append(grouped_grad_avgs)
_view_as_real(grouped_params, *state_and_grads)
if maximize:
grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment]
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch.compiler.is_compiling() and grouped_state_steps[0].is_cpu:
torch._foreach_add_(
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(grouped_state_steps, 1)
if weight_decay != 0:
# Reuse the intermediate memory (grouped_grads) already allocated for maximize
if maximize:
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
else:
grouped_grads = torch._foreach_add( # type: ignore[assignment]
grouped_grads, grouped_params, alpha=weight_decay
)
torch._foreach_mul_(grouped_square_avgs, alpha)
torch._foreach_addcmul_(
grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha
)
if centered:
grouped_grad_avgs = cast(list[Tensor], grouped_grad_avgs_)
torch._foreach_lerp_(grouped_grad_avgs, grouped_grads, 1 - alpha)
avg = torch._foreach_addcmul(
grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1
)
torch._foreach_sqrt_(avg)
torch._foreach_add_(avg, eps)
else:
avg = torch._foreach_sqrt(grouped_square_avgs)
torch._foreach_add_(avg, eps)
if momentum > 0:
grouped_momentum_buffer_list = cast(
list[Tensor], grouped_momentum_buffer_list_
)
torch._foreach_mul_(grouped_momentum_buffer_list, momentum)
torch._foreach_addcdiv_(grouped_momentum_buffer_list, grouped_grads, avg)
# If LR is a tensor, the else branch will internally call item()
# which will cause silent incorrectness if we are capturing
if capturable and isinstance(lr, torch.Tensor):
momentum_lr = torch._foreach_mul(grouped_momentum_buffer_list, -lr)
torch._foreach_add_(grouped_params, momentum_lr)
else:
torch._foreach_add_(
grouped_params, grouped_momentum_buffer_list, alpha=-lr
)
else:
# If LR is a tensor, the else branch will internally call item()
# which will cause silent incorrectness if we are capturing
if capturable and isinstance(lr, torch.Tensor):
torch._foreach_div_(avg, -lr)
torch._foreach_addcdiv_(grouped_params, grouped_grads, avg)
else:
torch._foreach_addcdiv_(grouped_params, grouped_grads, avg, value=-lr)
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rmsprop)
def rmsprop(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
grad_avgs: list[Tensor],
momentum_buffer_list: list[Tensor],
state_steps: list[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
maximize: bool = False,
differentiable: bool = False,
capturable: bool = False,
has_complex: bool = False,
*,
lr: float,
alpha: float,
eps: float,
weight_decay: float,
momentum: float,
centered: bool,
) -> None:
r"""Functional API that performs rmsprop algorithm computation.
See :class:`~torch.optim.RMSProp` for details.
"""
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
if not torch.compiler.is_compiling() and not all(
isinstance(t, torch.Tensor) for t in state_steps
):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
_, foreach = _default_to_fused_or_foreach(
params, differentiable, use_fused=False
)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_rmsprop
else:
func = _single_tensor_rmsprop
func(
params,
grads,
square_avgs,
grad_avgs,
momentum_buffer_list,
state_steps,
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
maximize=maximize,
capturable=capturable,
differentiable=differentiable,
has_complex=has_complex,
)
| RMSprop |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/xcom.py | {
"start": 2545,
"end": 2705
} | class ____(BaseModel):
"""XCom Collection serializer for responses."""
xcom_entries: Iterable[XComResponse]
total_entries: int
| XComCollectionResponse |
python | tensorflow__tensorflow | tensorflow/python/framework/errors_impl.py | {
"start": 11445,
"end": 12236
} | class ____(OpError):
"""Raised when an entity that we attempted to create already exists.
An API raises this this error to avoid overwriting an existing resource,
value, etc. Calling a creation API multiple times with the same arguments
could raise this error if the creation API is not idempotent.
For example, running an operation that saves a file
(e.g. `tf.saved_model.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
"""
def __init__(self, node_def, op, message, *args):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS, *args)
@tf_export("errors.PermissionDeniedError")
| AlreadyExistsError |
python | explosion__spaCy | spacy/training/corpus.py | {
"start": 10169,
"end": 11978
} | class ____:
"""Iterate Example objects from a file or directory of plain text
UTF-8 files with one line per doc.
path (Path): The directory or filename to read from.
min_length (int): Minimum document length (in tokens). Shorter documents
will be skipped. Defaults to 0, which indicates no limit.
max_length (int): Maximum document length (in tokens). Longer documents will
be skipped. Defaults to 0, which indicates no limit.
DOCS: https://spacy.io/api/corpus#plaintextcorpus
"""
file_type = "txt"
def __init__(
self,
path: Optional[Union[str, Path]],
*,
min_length: int = 0,
max_length: int = 0,
) -> None:
self.path = util.ensure_path(path)
self.min_length = min_length
self.max_length = max_length
def __call__(self, nlp: "Language") -> Iterator[Example]:
"""Yield examples from the data.
nlp (Language): The current nlp object.
YIELDS (Example): The example objects.
DOCS: https://spacy.io/api/corpus#plaintextcorpus-call
"""
for loc in walk_corpus(self.path, ".txt"):
with open(loc, encoding="utf-8") as f:
for text in f:
text = text.rstrip("\r\n")
if len(text):
doc = nlp.make_doc(text)
if self.min_length >= 1 and len(doc) < self.min_length:
continue
elif self.max_length >= 1 and len(doc) > self.max_length:
continue
# We don't *need* an example here, but it seems nice to
# make it match the Corpus signature.
yield Example(doc, doc.copy())
| PlainTextCorpus |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 67051,
"end": 67395
} | class ____(sgqlc.types.Enum):
"""Properties by which pull_requests connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order pull_requests by creation time
* `UPDATED_AT`: Order pull_requests by update time
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT", "UPDATED_AT")
| PullRequestOrderField |
python | redis__redis-py | redis/connection.py | {
"start": 21483,
"end": 43693
} | class ____(MaintNotificationsAbstractConnection, ConnectionInterface):
"Manages communication to and from a Redis server"
def __init__(
self,
db: int = 0,
password: Optional[str] = None,
socket_timeout: Optional[float] = None,
socket_connect_timeout: Optional[float] = None,
retry_on_timeout: bool = False,
retry_on_error: Union[Iterable[Type[Exception]], object] = SENTINEL,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
parser_class=DefaultParser,
socket_read_size: int = 65536,
health_check_interval: int = 0,
client_name: Optional[str] = None,
lib_name: Optional[str] = "redis-py",
lib_version: Optional[str] = get_lib_version(),
username: Optional[str] = None,
retry: Union[Any, None] = None,
redis_connect_func: Optional[Callable[[], None]] = None,
credential_provider: Optional[CredentialProvider] = None,
protocol: Optional[int] = 2,
command_packer: Optional[Callable[[], None]] = None,
event_dispatcher: Optional[EventDispatcher] = None,
maint_notifications_config: Optional[MaintNotificationsConfig] = None,
maint_notifications_pool_handler: Optional[
MaintNotificationsPoolHandler
] = None,
maintenance_state: "MaintenanceState" = MaintenanceState.NONE,
maintenance_notification_hash: Optional[int] = None,
orig_host_address: Optional[str] = None,
orig_socket_timeout: Optional[float] = None,
orig_socket_connect_timeout: Optional[float] = None,
):
"""
Initialize a new Connection.
To specify a retry policy for specific errors, first set
`retry_on_error` to a list of the error/s to retry on, then set
`retry` to a valid `Retry` object.
To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
"""
if (username or password) and credential_provider is not None:
raise DataError(
"'username' and 'password' cannot be passed along with 'credential_"
"provider'. Please provide only one of the following arguments: \n"
"1. 'password' and (optional) 'username'\n"
"2. 'credential_provider'"
)
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
self.pid = os.getpid()
self.db = db
self.client_name = client_name
self.lib_name = lib_name
self.lib_version = lib_version
self.credential_provider = credential_provider
self.password = password
self.username = username
self._socket_timeout = socket_timeout
if socket_connect_timeout is None:
socket_connect_timeout = socket_timeout
self._socket_connect_timeout = socket_connect_timeout
self.retry_on_timeout = retry_on_timeout
if retry_on_error is SENTINEL:
retry_on_errors_list = []
else:
retry_on_errors_list = list(retry_on_error)
if retry_on_timeout:
# Add TimeoutError to the errors list to retry on
retry_on_errors_list.append(TimeoutError)
self.retry_on_error = retry_on_errors_list
if retry or self.retry_on_error:
if retry is None:
self.retry = Retry(NoBackoff(), 1)
else:
# deep-copy the Retry object as it is mutable
self.retry = copy.deepcopy(retry)
if self.retry_on_error:
# Update the retry's supported errors with the specified errors
self.retry.update_supported_errors(self.retry_on_error)
else:
self.retry = Retry(NoBackoff(), 0)
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.redis_connect_func = redis_connect_func
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self.handshake_metadata = None
self._sock = None
self._socket_read_size = socket_read_size
self._connect_callbacks = []
self._buffer_cutoff = 6000
self._re_auth_token: Optional[TokenInterface] = None
try:
p = int(protocol)
except TypeError:
p = DEFAULT_RESP_VERSION
except ValueError:
raise ConnectionError("protocol must be an integer")
finally:
if p < 2 or p > 3:
raise ConnectionError("protocol must be either 2 or 3")
# p = DEFAULT_RESP_VERSION
self.protocol = p
if self.protocol == 3 and parser_class == _RESP2Parser:
# If the protocol is 3 but the parser is RESP2, change it to RESP3
# This is needed because the parser might be set before the protocol
# or might be provided as a kwarg to the constructor
# We need to react on discrepancy only for RESP2 and RESP3
# as hiredis supports both
parser_class = _RESP3Parser
self.set_parser(parser_class)
self._command_packer = self._construct_command_packer(command_packer)
self._should_reconnect = False
# Set up maintenance notifications
MaintNotificationsAbstractConnection.__init__(
self,
maint_notifications_config,
maint_notifications_pool_handler,
maintenance_state,
maintenance_notification_hash,
orig_host_address,
orig_socket_timeout,
orig_socket_connect_timeout,
self._parser,
)
def __repr__(self):
repr_args = ",".join([f"{k}={v}" for k, v in self.repr_pieces()])
return f"<{self.__class__.__module__}.{self.__class__.__name__}({repr_args})>"
@abstractmethod
def repr_pieces(self):
pass
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def _construct_command_packer(self, packer):
if packer is not None:
return packer
elif HIREDIS_AVAILABLE:
return HiredisRespSerializer()
else:
return PythonRespSerializer(self._buffer_cutoff, self.encoder.encode)
def register_connect_callback(self, callback):
"""
Register a callback to be called when the connection is established either
initially or reconnected. This allows listeners to issue commands that
are ephemeral to the connection, for example pub/sub subscription or
key tracking. The callback must be a _method_ and will be kept as
a weak reference.
"""
wm = weakref.WeakMethod(callback)
if wm not in self._connect_callbacks:
self._connect_callbacks.append(wm)
def deregister_connect_callback(self, callback):
"""
De-register a previously registered callback. It will no-longer receive
notifications on connection events. Calling this is not required when the
listener goes away, since the callbacks are kept as weak methods.
"""
try:
self._connect_callbacks.remove(weakref.WeakMethod(callback))
except ValueError:
pass
def set_parser(self, parser_class):
"""
Creates a new instance of parser_class with socket size:
_socket_read_size and assigns it to the parser for the connection
:param parser_class: The required parser class
"""
self._parser = parser_class(socket_read_size=self._socket_read_size)
def _get_parser(self) -> Union[_HiredisParser, _RESP3Parser, _RESP2Parser]:
return self._parser
def connect(self):
"Connects to the Redis server if not already connected"
self.connect_check_health(check_health=True)
def connect_check_health(
self, check_health: bool = True, retry_socket_connect: bool = True
):
if self._sock:
return
try:
if retry_socket_connect:
sock = self.retry.call_with_retry(
lambda: self._connect(), lambda error: self.disconnect(error)
)
else:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except OSError as e:
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
if self.redis_connect_func is None:
# Use the default on_connect function
self.on_connect_check_health(check_health=check_health)
else:
# Use the passed function redis_connect_func
self.redis_connect_func(self)
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
# first, remove any dead weakrefs
self._connect_callbacks = [ref for ref in self._connect_callbacks if ref()]
for ref in self._connect_callbacks:
callback = ref()
if callback:
callback(self)
@abstractmethod
def _connect(self):
pass
@abstractmethod
def _host_error(self):
pass
def _error_message(self, exception):
return format_error_message(self._host_error(), exception)
def on_connect(self):
self.on_connect_check_health(check_health=True)
def on_connect_check_health(self, check_health: bool = True):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
parser = self._parser
auth_args = None
# if credential provider or username and/or password are set, authenticate
if self.credential_provider or (self.username or self.password):
cred_provider = (
self.credential_provider
or UsernamePasswordCredentialProvider(self.username, self.password)
)
auth_args = cred_provider.get_credentials()
# if resp version is specified and we have auth args,
# we need to send them via HELLO
if auth_args and self.protocol not in [2, "2"]:
if isinstance(self._parser, _RESP2Parser):
self.set_parser(_RESP3Parser)
# update cluster exception classes
self._parser.EXCEPTION_CLASSES = parser.EXCEPTION_CLASSES
self._parser.on_connect(self)
if len(auth_args) == 1:
auth_args = ["default", auth_args[0]]
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
self.send_command(
"HELLO", self.protocol, "AUTH", *auth_args, check_health=False
)
self.handshake_metadata = self.read_response()
# if response.get(b"proto") != self.protocol and response.get(
# "proto"
# ) != self.protocol:
# raise ConnectionError("Invalid RESP version")
elif auth_args:
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
self.send_command("AUTH", *auth_args, check_health=False)
try:
auth_response = self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
self.send_command("AUTH", auth_args[-1], check_health=False)
auth_response = self.read_response()
if str_if_bytes(auth_response) != "OK":
raise AuthenticationError("Invalid Username or Password")
# if resp version is specified, switch to it
elif self.protocol not in [2, "2"]:
if isinstance(self._parser, _RESP2Parser):
self.set_parser(_RESP3Parser)
# update cluster exception classes
self._parser.EXCEPTION_CLASSES = parser.EXCEPTION_CLASSES
self._parser.on_connect(self)
self.send_command("HELLO", self.protocol, check_health=check_health)
self.handshake_metadata = self.read_response()
if (
self.handshake_metadata.get(b"proto") != self.protocol
and self.handshake_metadata.get("proto") != self.protocol
):
raise ConnectionError("Invalid RESP version")
# Activate maintenance notifications for this connection
# if enabled in the configuration
# This is a no-op if maintenance notifications are not enabled
self.activate_maint_notifications_handling_if_enabled(check_health=check_health)
# if a client_name is given, set it
if self.client_name:
self.send_command(
"CLIENT",
"SETNAME",
self.client_name,
check_health=check_health,
)
if str_if_bytes(self.read_response()) != "OK":
raise ConnectionError("Error setting client name")
try:
# set the library name and version
if self.lib_name:
self.send_command(
"CLIENT",
"SETINFO",
"LIB-NAME",
self.lib_name,
check_health=check_health,
)
self.read_response()
except ResponseError:
pass
try:
if self.lib_version:
self.send_command(
"CLIENT",
"SETINFO",
"LIB-VER",
self.lib_version,
check_health=check_health,
)
self.read_response()
except ResponseError:
pass
# if a database is specified, switch to it
if self.db:
self.send_command("SELECT", self.db, check_health=check_health)
if str_if_bytes(self.read_response()) != "OK":
raise ConnectionError("Invalid Database")
def disconnect(self, *args):
"Disconnects from the Redis server"
self._parser.on_disconnect()
conn_sock = self._sock
self._sock = None
# reset the reconnect flag
self.reset_should_reconnect()
if conn_sock is None:
return
if os.getpid() == self.pid:
try:
conn_sock.shutdown(socket.SHUT_RDWR)
except (OSError, TypeError):
pass
try:
conn_sock.close()
except OSError:
pass
def mark_for_reconnect(self):
self._should_reconnect = True
def should_reconnect(self):
return self._should_reconnect
def reset_should_reconnect(self):
self._should_reconnect = False
def _send_ping(self):
"""Send PING, expect PONG in return"""
self.send_command("PING", check_health=False)
if str_if_bytes(self.read_response()) != "PONG":
raise ConnectionError("Bad response from PING health check")
def _ping_failed(self, error):
"""Function to call when PING fails"""
self.disconnect()
def check_health(self):
"""Check the health of the connection with a PING/PONG"""
if self.health_check_interval and time.monotonic() > self.next_health_check:
self.retry.call_with_retry(self._send_ping, self._ping_failed)
def send_packed_command(self, command, check_health=True):
"""Send an already packed command to the Redis server"""
if not self._sock:
self.connect_check_health(check_health=False)
# guard against health check recursion
if check_health:
self.check_health()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except OSError as e:
self.disconnect()
if len(e.args) == 1:
errno, errmsg = "UNKNOWN", e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError(f"Error {errno} while writing to socket. {errmsg}.")
except BaseException:
# BaseExceptions can be raised when a socket send operation is not
# finished, e.g. due to a timeout. Ideally, a caller could then re-try
# to send un-sent data. However, the send_packed_command() API
# does not support it so there is no point in keeping the connection open.
self.disconnect()
raise
def send_command(self, *args, **kwargs):
"""Pack and send a command to the Redis server"""
self.send_packed_command(
self._command_packer.pack(*args),
check_health=kwargs.get("check_health", True),
)
def can_read(self, timeout=0):
"""Poll the socket to see if there's data that can be read."""
sock = self._sock
if not sock:
self.connect()
host_error = self._host_error()
try:
return self._parser.can_read(timeout)
except OSError as e:
self.disconnect()
raise ConnectionError(f"Error while reading from {host_error}: {e.args}")
def read_response(
self,
disable_decoding=False,
*,
disconnect_on_error=True,
push_request=False,
):
"""Read the response from a previously sent command"""
host_error = self._host_error()
try:
if self.protocol in ["3", 3]:
response = self._parser.read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
response = self._parser.read_response(disable_decoding=disable_decoding)
except socket.timeout:
if disconnect_on_error:
self.disconnect()
raise TimeoutError(f"Timeout reading from {host_error}")
except OSError as e:
if disconnect_on_error:
self.disconnect()
raise ConnectionError(f"Error while reading from {host_error} : {e.args}")
except BaseException:
# Also by default close in case of BaseException. A lot of code
# relies on this behaviour when doing Command/Response pairs.
# See #1128.
if disconnect_on_error:
self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = time.monotonic() + self.health_check_interval
if isinstance(response, ResponseError):
try:
raise response
finally:
del response # avoid creating ref cycles
return response
def pack_command(self, *args):
"""Pack a series of arguments into the Redis protocol"""
return self._command_packer.pack(*args)
def pack_commands(self, commands):
"""Pack multiple commands into the Redis protocol"""
output = []
pieces = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self._command_packer.pack(*cmd):
chunklen = len(chunk)
if (
buffer_length > buffer_cutoff
or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)
):
if pieces:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
def get_protocol(self) -> Union[int, str]:
return self.protocol
@property
def handshake_metadata(self) -> Union[Dict[bytes, bytes], Dict[str, str]]:
return self._handshake_metadata
@handshake_metadata.setter
def handshake_metadata(self, value: Union[Dict[bytes, bytes], Dict[str, str]]):
self._handshake_metadata = value
def set_re_auth_token(self, token: TokenInterface):
self._re_auth_token = token
def re_auth(self):
if self._re_auth_token is not None:
self.send_command(
"AUTH",
self._re_auth_token.try_get("oid"),
self._re_auth_token.get_value(),
)
self.read_response()
self._re_auth_token = None
def _get_socket(self) -> Optional[socket.socket]:
return self._sock
@property
def socket_timeout(self) -> Optional[Union[float, int]]:
return self._socket_timeout
@socket_timeout.setter
def socket_timeout(self, value: Optional[Union[float, int]]):
self._socket_timeout = value
@property
def socket_connect_timeout(self) -> Optional[Union[float, int]]:
return self._socket_connect_timeout
@socket_connect_timeout.setter
def socket_connect_timeout(self, value: Optional[Union[float, int]]):
self._socket_connect_timeout = value
| AbstractConnection |
python | readthedocs__readthedocs.org | readthedocs/search/tests/test_api.py | {
"start": 27237,
"end": 27296
} | class ____(BaseTestDocumentSearch):
pass
| TestDocumentSearch |
python | realpython__materials | syntactic-sugar-python/twice.py | {
"start": 0,
"end": 254
} | class ____:
def __init__(self, items):
self.items = list(items)
def __iter__(self):
yield from self.items
print("Halfway there!")
yield from self.items
for number in Twice([1, 2, 3]):
print(f"-> {number}")
| Twice |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 319845,
"end": 320595
} | class ____:
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
| TestTriang |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_work_queues.py | {
"start": 20863,
"end": 21478
} | class ____:
async def test_read_work_queue(self, client, work_queue):
response = await client.get(f"/work_queues/{work_queue.id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(work_queue.id)
assert response.json()["name"] == work_queue.name
assert response.json()["work_pool_name"] == "default-agent-pool"
async def test_read_work_queue_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/work_queues/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadWorkQueue |
python | coleifer__peewee | peewee.py | {
"start": 57080,
"end": 59627
} | class ____(Node):
def __init__(self, action=None, update=None, preserve=None, where=None,
conflict_target=None, conflict_where=None,
conflict_constraint=None):
self._action = action
self._update = update
self._preserve = ensure_tuple(preserve)
self._where = where
if conflict_target is not None and conflict_constraint is not None:
raise ValueError('only one of "conflict_target" and '
'"conflict_constraint" may be specified.')
self._conflict_target = ensure_tuple(conflict_target)
self._conflict_where = conflict_where
self._conflict_constraint = conflict_constraint
def get_conflict_statement(self, ctx, query):
return ctx.state.conflict_statement(self, query)
def get_conflict_update(self, ctx, query):
return ctx.state.conflict_update(self, query)
@Node.copy
def preserve(self, *columns):
self._preserve = columns
@Node.copy
def update(self, _data=None, **kwargs):
if _data and kwargs and not isinstance(_data, dict):
raise ValueError('Cannot mix data with keyword arguments in the '
'OnConflict update method.')
_data = _data or {}
if kwargs:
_data.update(kwargs)
self._update = _data
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def conflict_target(self, *constraints):
self._conflict_constraint = None
self._conflict_target = constraints
@Node.copy
def conflict_where(self, *expressions):
if self._conflict_where is not None:
expressions = (self._conflict_where,) + expressions
self._conflict_where = reduce(operator.and_, expressions)
@Node.copy
def conflict_constraint(self, constraint):
self._conflict_constraint = constraint
self._conflict_target = None
def database_required(method):
@wraps(method)
def inner(self, database=None, *args, **kwargs):
database = self._database if database is None else database
if not database:
raise InterfaceError('Query must be bound to a database in order '
'to call "%s".' % method.__name__)
return method(self, database, *args, **kwargs)
return inner
# BASE QUERY INTERFACE.
| OnConflict |
python | neetcode-gh__leetcode | python/1498-number-of-subsequences-that-satisfy-the-given-sum-condition.py | {
"start": 56,
"end": 450
} | class ____:
def numSubseq(self, nums: List[int], target: int) -> int:
nums.sort()
res, mod = 0, (10**9 + 7)
left, right = 0, len(nums) - 1
while left <= right:
if (nums[left] + nums[right]) > target:
right -= 1
else:
res += 1 << (right - left)
left += 1
return res % mod
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/test_data_time.py | {
"start": 7137,
"end": 16809
} | class ____(NamedTuple):
before_partitions: list[str]
after_partitions: list[str]
expected_time: Optional[datetime.datetime]
scenarios = {
"empty": PartitionedDataTimeScenario(
before_partitions=[],
after_partitions=[],
expected_time=None,
),
"first_missing": PartitionedDataTimeScenario(
before_partitions=["2023-01-02", "2023-01-03"],
after_partitions=[],
expected_time=None,
),
"some_filled": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03"],
after_partitions=[],
expected_time=datetime.datetime(2023, 1, 4, tzinfo=datetime.timezone.utc),
),
"middle_missing": PartitionedDataTimeScenario(
# 2023-01-04 is missing
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-05", "2023-01-06"],
after_partitions=[],
expected_time=datetime.datetime(2023, 1, 4, tzinfo=datetime.timezone.utc),
),
"new_duplicate_partitions": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04"],
after_partitions=["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-03"],
expected_time=datetime.datetime(2023, 1, 5, tzinfo=datetime.timezone.utc),
),
"new_duplicate_partitions2": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02"],
after_partitions=["2023-01-01", "2023-01-01", "2023-01-01", "2023-01-01"],
expected_time=datetime.datetime(2023, 1, 3, tzinfo=datetime.timezone.utc),
),
"net_new_partitions": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03"],
after_partitions=["2023-01-04", "2023-01-05", "2023-01-06"],
expected_time=datetime.datetime(2023, 1, 4, tzinfo=datetime.timezone.utc),
),
"net_new_partitions2": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04"],
after_partitions=[
"2023-01-01",
"2023-01-01",
"2023-01-01",
"2023-01-06",
"2023-01-06",
"2023-01-06",
],
expected_time=datetime.datetime(2023, 1, 5, tzinfo=datetime.timezone.utc),
),
"net_new_partitions_with_middle_missing": PartitionedDataTimeScenario(
before_partitions=["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-05", "2023-01-06"],
after_partitions=["2023-01-04", "2023-01-04"],
expected_time=datetime.datetime(2023, 1, 4, tzinfo=datetime.timezone.utc),
),
}
@pytest.mark.parametrize("scenario", list(scenarios.values()), ids=list(scenarios.keys()))
def test_partitioned_data_time(scenario):
with DagsterInstance.ephemeral() as instance, freeze_time(create_datetime(2023, 1, 7)):
_materialize_partitions(instance, scenario.before_partitions)
record = _get_record(instance=instance)
_materialize_partitions(instance, scenario.after_partitions)
data_time_queryer = CachingDataTimeResolver(
instance_queryer=_get_instance_queryer(instance, partition_repo.asset_graph),
)
data_time = data_time_queryer.get_data_time_by_key_for_record(record=record)
if scenario.expected_time is None:
assert data_time == {} or data_time == {dg.AssetKey("partitioned_asset"): None}
else:
assert data_time == {dg.AssetKey("partitioned_asset"): scenario.expected_time}
@dg.observable_source_asset
def sA():
return dg.DataVersion(str(random.random()))
@dg.observable_source_asset
def sB():
return dg.DataVersion(str(random.random()))
@dg.asset(deps=[sA])
def A():
pass
@dg.asset(deps=[sB])
def B():
pass
@dg.asset(deps=[B])
def B2():
pass
@dg.asset(deps=[sA, sB])
def AB():
pass
@dg.repository
def versioned_repo():
return [sA, sB, A, B, AB, B2]
def _get_instance_queryer(
instance: DagsterInstance, asset_graph: AssetGraph
) -> CachingInstanceQueryer:
return AssetGraphView(
temporal_context=TemporalContext(effective_dt=get_current_datetime(), last_event_id=None),
instance=instance,
asset_graph=asset_graph,
).get_inner_queryer_for_back_compat()
def observe_sources(*args):
def observe_sources_fn(*, instance, times_by_key, **kwargs):
for arg in args:
key = dg.AssetKey(arg)
observe(assets=[versioned_repo.asset_graph.get(key).assets_def], instance=instance)
latest_record = instance.get_latest_data_version_record(key, is_source=True)
latest_timestamp = latest_record.timestamp
times_by_key[key].append(
datetime.datetime.fromtimestamp(latest_timestamp, tz=datetime.timezone.utc)
)
return observe_sources_fn
def run_assets(*args):
def run_assets_fn(*, instance, **kwargs):
assets = [versioned_repo.asset_graph.get(dg.AssetKey(arg)).assets_def for arg in args]
dg.materialize_to_memory(assets=assets, instance=instance)
return run_assets_fn
def assert_has_current_time(key_str):
def assert_has_current_time_fn(*, instance, evaluation_time, **kwargs):
resolver = CachingDataTimeResolver(
instance_queryer=_get_instance_queryer(instance, versioned_repo.asset_graph),
)
data_time = resolver.get_current_data_time(
dg.AssetKey(key_str), current_time=evaluation_time
)
assert data_time == evaluation_time
return assert_has_current_time_fn
def assert_has_index_time(key_str, source_key_str, index):
def assert_has_index_time_fn(*, instance, times_by_key, evaluation_time, **kwargs):
resolver = CachingDataTimeResolver(
instance_queryer=_get_instance_queryer(instance, versioned_repo.asset_graph),
)
data_time = resolver.get_current_data_time(
dg.AssetKey(key_str), current_time=evaluation_time
)
if index is None:
assert data_time is None
else:
assert data_time == times_by_key[dg.AssetKey(source_key_str)][index]
return assert_has_index_time_fn
timelines = {
"basic_one_parent": [
observe_sources("sA"),
assert_has_index_time("A", None, None),
# run A, make sure it knows it's current
run_assets("A"),
assert_has_current_time("A"),
# new version of sA, A now points at the timestamp of that new version
observe_sources("sA"),
assert_has_index_time("A", "sA", 1),
# run A again, make sure it knows it's current
run_assets("A"),
assert_has_current_time("A"),
],
"basic_two_parents": [
observe_sources("sA", "sB"),
assert_has_index_time("AB", None, None),
# run AB, make sure it knows it's current
run_assets("AB"),
assert_has_current_time("AB"),
# new version of sA, AB now points at the timestamp of that new version
observe_sources("sA"),
assert_has_index_time("AB", "sA", 1),
# run AB again, make sure it knows it's current
run_assets("AB"),
assert_has_current_time("AB"),
# new version of sA and sB, AB now points at the timestamp of the older new version
observe_sources("sA"),
assert_has_index_time("AB", "sA", 2),
observe_sources("sB"),
assert_has_index_time("AB", "sA", 2),
# run AB again, make sure it knows it's current
run_assets("AB"),
assert_has_current_time("AB"),
],
"chained": [
observe_sources("sA", "sB"),
run_assets("B"),
assert_has_current_time("B"),
run_assets("B2"),
assert_has_current_time("B2"),
observe_sources("sA"),
assert_has_current_time("B"),
assert_has_current_time("B2"),
observe_sources("sB"),
assert_has_index_time("B", "sB", 1),
assert_has_index_time("B2", "sB", 1),
run_assets("B"),
assert_has_current_time("B"),
assert_has_index_time("B2", "sB", 1),
run_assets("B2"),
assert_has_current_time("B2"),
],
"chained_multiple_observations": [
observe_sources("sB"),
run_assets("B", "B2"),
assert_has_current_time("B"),
assert_has_current_time("B2"),
# after getting current, get a bunch of new versions
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
# should point to the time at which the version changed
assert_has_index_time("B", "sB", 1),
assert_has_index_time("B2", "sB", 1),
# run B, make sure it knows it's current
run_assets("B"),
assert_has_current_time("B"),
# after getting current, get a bunch of new versions
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
observe_sources("sB"),
# should point to the time at which the version changed
assert_has_index_time("B", "sB", 6),
assert_has_index_time("B2", "sB", 1),
],
}
@pytest.mark.parametrize("timeline", list(timelines.values()), ids=list(timelines.keys()))
def test_non_volatile_data_time(timeline):
with DagsterInstance.ephemeral() as instance:
times_by_key = defaultdict(list)
for action in timeline:
action(
instance=instance,
times_by_key=times_by_key,
evaluation_time=get_current_datetime(),
)
| PartitionedDataTimeScenario |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 77893,
"end": 81981
} | class ____(Emulated, _AbstractInterval, TypeDecorator[dt.timedelta]):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In PostgreSQL
and Oracle Database, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch" (Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.fromtimestamp(0, dt.timezone.utc).replace(tzinfo=None)
cache_ok = True
def __init__(
self,
native: bool = True,
second_precision: Optional[int] = None,
day_precision: Optional[int] = None,
):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently PostgreSQL, Oracle Database).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle Database and PostgreSQL
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle Database.
"""
super().__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
class Comparator(
TypeDecorator.Comparator[_CT],
_AbstractInterval.Comparator[_CT],
):
__slots__ = ()
comparator_factory = Comparator
@property
def python_type(self):
return dt.timedelta
def adapt_to_emulated(self, impltype, **kw):
return _AbstractInterval.adapt(self, impltype, **kw)
def coerce_compared_value(self, op, value):
return self.impl_instance.coerce_compared_value(op, value)
def bind_processor(
self, dialect: Dialect
) -> _BindProcessorType[dt.timedelta]:
if TYPE_CHECKING:
assert isinstance(self.impl_instance, DateTime)
impl_processor = self.impl_instance.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
fixed_impl_processor = impl_processor
def process(
value: Optional[dt.timedelta],
) -> Any:
if value is not None:
dt_value = epoch + value
else:
dt_value = None
return fixed_impl_processor(dt_value)
else:
def process(
value: Optional[dt.timedelta],
) -> Any:
if value is not None:
dt_value = epoch + value
else:
dt_value = None
return dt_value
return process
def result_processor(
self, dialect: Dialect, coltype: Any
) -> _ResultProcessorType[dt.timedelta]:
if TYPE_CHECKING:
assert isinstance(self.impl_instance, DateTime)
impl_processor = self.impl_instance.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
fixed_impl_processor = impl_processor
def process(value: Any) -> Optional[dt.timedelta]:
dt_value = fixed_impl_processor(value)
if dt_value is None:
return None
return dt_value - epoch
else:
def process(value: Any) -> Optional[dt.timedelta]:
if value is None:
return None
return value - epoch # type: ignore
return process
| Interval |
python | apache__airflow | airflow-core/src/airflow/models/team.py | {
"start": 1742,
"end": 3411
} | class ____(Base):
"""
Contains the list of teams defined in the environment.
This table is only used when Airflow is run in multi-team mode.
"""
__tablename__ = "team"
id: Mapped[str] = mapped_column(UUIDType(binary=False), primary_key=True, default=uuid6.uuid7)
name: Mapped[str] = mapped_column(String(50), unique=True, nullable=False)
dag_bundles = relationship(
"DagBundleModel", secondary=dag_bundle_team_association_table, back_populates="teams"
)
def __repr__(self):
return f"Team(id={self.id},name={self.name})"
@classmethod
@provide_session
def get_all_teams_id_to_name_mapping(cls, session: Session = NEW_SESSION) -> dict[str, str]:
"""
Return a mapping of all team IDs to team names from the database.
This method provides a reusable way to get team information that can be used
across the codebase for validation and lookups.
:param session: Database session
:return: Dictionary mapping team UUIDs to team names
"""
stmt = select(cls.id, cls.name)
teams = session.execute(stmt).all()
return {str(team_id): team_name for team_id, team_name in teams}
@classmethod
def get_all_team_names(cls) -> set[str]:
"""
Return a set of all team names from the database.
This method provides a convenient way to get just the team names for validation
purposes, such as verifying team names in executor configurations.
:return: Set of all team names
"""
team_mapping = cls.get_all_teams_id_to_name_mapping()
return set(team_mapping.values())
| Team |
python | PyCQA__bandit | tests/unit/core/test_config.py | {
"start": 3500,
"end": 8565
} | class ____(testtools.TestCase):
sample = textwrap.dedent(
"""
profiles:
test_1:
include:
- any_other_function_with_shell_equals_true
- assert_used
exclude:
test_2:
include:
- blacklist_calls
test_3:
include:
- blacklist_imports
test_4:
exclude:
- assert_used
test_5:
exclude:
- blacklist_calls
- blacklist_imports
test_6:
include:
- blacklist_calls
exclude:
- blacklist_imports
blacklist_calls:
bad_name_sets:
- pickle:
qualnames: [pickle.loads]
message: "{func} library appears to be in use."
blacklist_imports:
bad_import_sets:
- telnet:
imports: [telnetlib]
level: HIGH
message: "{module} is considered insecure."
"""
)
suffix = ".yaml"
def setUp(self):
super().setUp()
f = self.useFixture(TempFile(self.sample, suffix=self.suffix))
self.config = config.BanditConfig(f.name)
def test_converted_include(self):
profiles = self.config.get_option("profiles")
test = profiles["test_1"]
data = {
"blacklist": {},
"exclude": set(),
"include": {"B101", "B604"},
}
self.assertEqual(data, test)
def test_converted_exclude(self):
profiles = self.config.get_option("profiles")
test = profiles["test_4"]
self.assertEqual({"B101"}, test["exclude"])
def test_converted_blacklist_call_data(self):
profiles = self.config.get_option("profiles")
test = profiles["test_2"]
data = {
"Call": [
{
"qualnames": ["telnetlib"],
"level": "HIGH",
"message": "{name} is considered insecure.",
"name": "telnet",
}
]
}
self.assertEqual(data, test["blacklist"])
def test_converted_blacklist_import_data(self):
profiles = self.config.get_option("profiles")
test = profiles["test_3"]
data = [
{
"message": "{name} library appears to be in use.",
"name": "pickle",
"qualnames": ["pickle.loads"],
}
]
self.assertEqual(data, test["blacklist"]["Call"])
self.assertEqual(data, test["blacklist"]["Import"])
self.assertEqual(data, test["blacklist"]["ImportFrom"])
def test_converted_blacklist_call_test(self):
profiles = self.config.get_option("profiles")
test = profiles["test_2"]
self.assertEqual({"B001"}, test["include"])
def test_converted_blacklist_import_test(self):
profiles = self.config.get_option("profiles")
test = profiles["test_3"]
self.assertEqual({"B001"}, test["include"])
def test_converted_exclude_blacklist(self):
profiles = self.config.get_option("profiles")
test = profiles["test_5"]
self.assertEqual({"B001"}, test["exclude"])
def test_deprecation_message(self):
msg = (
"Config file '%s' contains deprecated legacy config data. "
"Please consider upgrading to the new config format. The tool "
"'bandit-config-generator' can help you with this. Support for "
"legacy configs will be removed in a future bandit version."
)
with mock.patch("bandit.core.config.LOG.warning") as m:
self.config._config = {"profiles": {}}
self.config.validate("")
self.assertEqual((msg, ""), m.call_args_list[0][0])
def test_blacklist_error(self):
msg = (
" : Config file has an include or exclude reference to legacy "
"test '%s' but no configuration data for it. Configuration "
"data is required for this test. Please consider switching to "
"the new config file format, the tool "
"'bandit-config-generator' can help you with this."
)
for name in [
"blacklist_call",
"blacklist_imports",
"blacklist_imports_func",
]:
self.config._config = {"profiles": {"test": {"include": [name]}}}
try:
self.config.validate("")
except utils.ConfigError as e:
self.assertEqual(msg % name, e.message)
def test_bad_yaml(self):
f = self.useFixture(TempFile("[]"))
try:
self.config = config.BanditConfig(f.name)
except utils.ConfigError as e:
self.assertIn("Error parsing file.", e.message)
| TestConfigCompat |
python | keon__algorithms | tests/test_dp.py | {
"start": 2528,
"end": 3535
} | class ____(unittest.TestCase):
"""[summary]
Test for the file hosoya_triangle
Arguments:
unittest {[type]} -- [description]
"""
def test_hosoya(self):
self.assertEqual([1], hosoya_testing(1))
self.assertEqual([1,
1, 1,
2, 1, 2,
3, 2, 2, 3,
5, 3, 4, 3, 5,
8, 5, 6, 6, 5, 8],
hosoya_testing(6))
self.assertEqual([1,
1, 1,
2, 1, 2,
3, 2, 2, 3,
5, 3, 4, 3, 5,
8, 5, 6, 6, 5, 8,
13, 8, 10, 9, 10, 8, 13,
21, 13, 16, 15, 15, 16, 13, 21,
34, 21, 26, 24, 25, 24, 26, 21, 34,
55, 34, 42, 39, 40, 40, 39, 42, 34, 55],
hosoya_testing(10))
| TestHosoyaTriangle |
python | doocs__leetcode | solution/1300-1399/1339.Maximum Product of Splitted Binary Tree/Solution.py | {
"start": 192,
"end": 820
} | class ____:
def maxProduct(self, root: Optional[TreeNode]) -> int:
def sum(root: Optional[TreeNode]) -> int:
if root is None:
return 0
return root.val + sum(root.left) + sum(root.right)
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
t = root.val + dfs(root.left) + dfs(root.right)
nonlocal ans, s
if t < s:
ans = max(ans, t * (s - t))
return t
mod = 10**9 + 7
s = sum(root)
ans = 0
dfs(root)
return ans % mod
| Solution |
python | neetcode-gh__leetcode | python/2348-number-of-zero-filled-subarrays.py | {
"start": 0,
"end": 596
} | class ____(object):
def zeroFilledSubarray(self, nums):
# check if there are any Zeros in the list
res = nums.count(0)
if res == 0:
return 0
r = 0
l = len(nums)
while r < l:
Temp_Subarray=[]
while r < l and nums[r] == 0:
Temp_Subarray.append(nums[r])
r += 1
if len(Temp_Subarray) > 1:
Temp_Count = len(Temp_Subarray) * ( len(Temp_Subarray) - 1 ) / 2
res += int(Temp_Count)
r += 1
return res
| Solution |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modular_sam3_tracker.py | {
"start": 5294,
"end": 5356
} | class ____(Sam2PromptEncoder):
pass
| Sam3TrackerPromptEncoder |
python | lazyprogrammer__machine_learning_examples | supervised_class/app.py | {
"start": 571,
"end": 676
} | class ____(tornado.web.RequestHandler):
def get(self):
self.write("Hello, Tornado!")
| MainHandler |
python | nedbat__coveragepy | tests/test_setup.py | {
"start": 371,
"end": 1968
} | class ____(CoverageTest):
"""Tests of setup.py"""
run_in_temp_dir = False
def setUp(self) -> None:
super().setUp()
# Force the most restrictive interpretation.
self.set_environ("LC_ALL", "C")
def test_metadata(self) -> None:
status, output = self.run_command_status(
"python setup.py --description --version --url --author",
)
assert status == 0
out = output.splitlines()
assert "measurement" in out[0]
assert coverage.__version__ == out[1]
assert "github.com/coveragepy/coveragepy" in out[2]
assert "Ned Batchelder" in out[3]
@pytest.mark.skipif(
env.PYVERSION[3:5] == ("alpha", 0),
reason="don't expect classifiers until labelled builds",
)
def test_more_metadata(self) -> None:
# Let's be sure we pick up our own setup.py
# CoverageTest restores the original sys.path for us.
sys.path.insert(0, "")
from setup import setup_args
classifiers = cast(list[str], setup_args["classifiers"])
assert len(classifiers) > 7
assert classifiers[-1].startswith("Development Status ::")
assert "Programming Language :: Python :: %d" % sys.version_info[:1] in classifiers
assert "Programming Language :: Python :: %d.%d" % sys.version_info[:2] in classifiers
long_description = cast(str, setup_args["long_description"]).splitlines()
assert len(long_description) > 7
assert long_description[0].strip() != ""
assert long_description[-1].strip() != ""
| SetupPyTest |
python | neetcode-gh__leetcode | python/0707-design-linked-list.py | {
"start": 0,
"end": 119
} | class ____:
def __init__(self, val):
self.val = val
self.prev = None
self.next = None
| ListNode |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis27.py | {
"start": 315,
"end": 1476
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis27.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<a:defRPr"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [108315392, 108329216]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
chart.set_x_axis({"num_font": {"rotation": -35}})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | python-openxml__python-docx | src/docx/text/font.py | {
"start": 373,
"end": 13604
} | class ____(ElementProxy):
"""Proxy object for parent of a `<w:rPr>` element and providing access to
character properties such as font name, font size, bold, and subscript."""
def __init__(self, r: CT_R, parent: Any | None = None):
super().__init__(r, parent)
self._element = r
self._r = r
@property
def all_caps(self) -> bool | None:
"""Read/write.
Causes text in this font to appear in capital letters.
"""
return self._get_bool_prop("caps")
@all_caps.setter
def all_caps(self, value: bool | None) -> None:
self._set_bool_prop("caps", value)
@property
def bold(self) -> bool | None:
"""Read/write.
Causes text in this font to appear in bold.
"""
return self._get_bool_prop("b")
@bold.setter
def bold(self, value: bool | None) -> None:
self._set_bool_prop("b", value)
@property
def color(self):
"""A |ColorFormat| object providing a way to get and set the text color for this
font."""
return ColorFormat(self._element)
@property
def complex_script(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the characters in the run to be treated as complex script
regardless of their Unicode values.
"""
return self._get_bool_prop("cs")
@complex_script.setter
def complex_script(self, value: bool | None) -> None:
self._set_bool_prop("cs", value)
@property
def cs_bold(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the complex script characters in the run to be displayed in
bold typeface.
"""
return self._get_bool_prop("bCs")
@cs_bold.setter
def cs_bold(self, value: bool | None) -> None:
self._set_bool_prop("bCs", value)
@property
def cs_italic(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the complex script characters in the run to be displayed in
italic typeface.
"""
return self._get_bool_prop("iCs")
@cs_italic.setter
def cs_italic(self, value: bool | None) -> None:
self._set_bool_prop("iCs", value)
@property
def double_strike(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text in the run to appear with double strikethrough.
"""
return self._get_bool_prop("dstrike")
@double_strike.setter
def double_strike(self, value: bool | None) -> None:
self._set_bool_prop("dstrike", value)
@property
def emboss(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text in the run to appear as if raised off the page in
relief.
"""
return self._get_bool_prop("emboss")
@emboss.setter
def emboss(self, value: bool | None) -> None:
self._set_bool_prop("emboss", value)
@property
def hidden(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text in the run to be hidden from display, unless
applications settings force hidden text to be shown.
"""
return self._get_bool_prop("vanish")
@hidden.setter
def hidden(self, value: bool | None) -> None:
self._set_bool_prop("vanish", value)
@property
def highlight_color(self) -> WD_COLOR_INDEX | None:
"""Color of highlighing applied or |None| if not highlighted."""
rPr = self._element.rPr
if rPr is None:
return None
return rPr.highlight_val
@highlight_color.setter
def highlight_color(self, value: WD_COLOR_INDEX | None):
rPr = self._element.get_or_add_rPr()
rPr.highlight_val = value
@property
def italic(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text of the run to appear in italics. |None| indicates
the effective value is inherited from the style hierarchy.
"""
return self._get_bool_prop("i")
@italic.setter
def italic(self, value: bool | None) -> None:
self._set_bool_prop("i", value)
@property
def imprint(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text in the run to appear as if pressed into the page.
"""
return self._get_bool_prop("imprint")
@imprint.setter
def imprint(self, value: bool | None) -> None:
self._set_bool_prop("imprint", value)
@property
def math(self) -> bool | None:
"""Read/write tri-state value.
When |True|, specifies this run contains WML that should be handled as though it
was Office Open XML Math.
"""
return self._get_bool_prop("oMath")
@math.setter
def math(self, value: bool | None) -> None:
self._set_bool_prop("oMath", value)
@property
def name(self) -> str | None:
"""The typeface name for this |Font|.
Causes the text it controls to appear in the named font, if a matching font is
found. |None| indicates the typeface is inherited from the style hierarchy.
"""
rPr = self._element.rPr
if rPr is None:
return None
return rPr.rFonts_ascii
@name.setter
def name(self, value: str | None) -> None:
rPr = self._element.get_or_add_rPr()
rPr.rFonts_ascii = value
rPr.rFonts_hAnsi = value
@property
def no_proof(self) -> bool | None:
"""Read/write tri-state value.
When |True|, specifies that the contents of this run should not report any
errors when the document is scanned for spelling and grammar.
"""
return self._get_bool_prop("noProof")
@no_proof.setter
def no_proof(self, value: bool | None) -> None:
self._set_bool_prop("noProof", value)
@property
def outline(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the characters in the run to appear as if they have an
outline, by drawing a one pixel wide border around the inside and outside
borders of each character glyph.
"""
return self._get_bool_prop("outline")
@outline.setter
def outline(self, value: bool | None) -> None:
self._set_bool_prop("outline", value)
@property
def rtl(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the text in the run to have right-to-left characteristics.
"""
return self._get_bool_prop("rtl")
@rtl.setter
def rtl(self, value: bool | None) -> None:
self._set_bool_prop("rtl", value)
@property
def shadow(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the text in the run to appear as if each character has a
shadow.
"""
return self._get_bool_prop("shadow")
@shadow.setter
def shadow(self, value: bool | None) -> None:
self._set_bool_prop("shadow", value)
@property
def size(self) -> Length | None:
"""Font height in English Metric Units (EMU).
|None| indicates the font size should be inherited from the style hierarchy.
|Length| is a subclass of |int| having properties for convenient conversion into
points or other length units. The :class:`docx.shared.Pt` class allows
convenient specification of point values::
>>> font.size = Pt(24)
>>> font.size
304800
>>> font.size.pt
24.0
"""
rPr = self._element.rPr
if rPr is None:
return None
return rPr.sz_val
@size.setter
def size(self, emu: int | Length | None) -> None:
rPr = self._element.get_or_add_rPr()
rPr.sz_val = None if emu is None else Emu(emu)
@property
def small_caps(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the lowercase characters in the run to appear as capital
letters two points smaller than the font size specified for the run.
"""
return self._get_bool_prop("smallCaps")
@small_caps.setter
def small_caps(self, value: bool | None) -> None:
self._set_bool_prop("smallCaps", value)
@property
def snap_to_grid(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the run to use the document grid characters per line settings
defined in the docGrid element when laying out the characters in this run.
"""
return self._get_bool_prop("snapToGrid")
@snap_to_grid.setter
def snap_to_grid(self, value: bool | None) -> None:
self._set_bool_prop("snapToGrid", value)
@property
def spec_vanish(self) -> bool | None:
"""Read/write tri-state value.
When |True|, specifies that the given run shall always behave as if it is
hidden, even when hidden text is being displayed in the current document. The
property has a very narrow, specialized use related to the table of contents.
Consult the spec (§17.3.2.36) for more details.
"""
return self._get_bool_prop("specVanish")
@spec_vanish.setter
def spec_vanish(self, value: bool | None) -> None:
self._set_bool_prop("specVanish", value)
@property
def strike(self) -> bool | None:
"""Read/write tri-state value.
When |True| causes the text in the run to appear with a single horizontal line
through the center of the line.
"""
return self._get_bool_prop("strike")
@strike.setter
def strike(self, value: bool | None) -> None:
self._set_bool_prop("strike", value)
@property
def subscript(self) -> bool | None:
"""Boolean indicating whether the characters in this |Font| appear as subscript.
|None| indicates the subscript/subscript value is inherited from the style
hierarchy.
"""
rPr = self._element.rPr
if rPr is None:
return None
return rPr.subscript
@subscript.setter
def subscript(self, value: bool | None) -> None:
rPr = self._element.get_or_add_rPr()
rPr.subscript = value
@property
def superscript(self) -> bool | None:
"""Boolean indicating whether the characters in this |Font| appear as
superscript.
|None| indicates the subscript/superscript value is inherited from the style
hierarchy.
"""
rPr = self._element.rPr
if rPr is None:
return None
return rPr.superscript
@superscript.setter
def superscript(self, value: bool | None) -> None:
rPr = self._element.get_or_add_rPr()
rPr.superscript = value
@property
def underline(self) -> bool | WD_UNDERLINE | None:
"""The underline style for this |Font|.
The value is one of |None|, |True|, |False|, or a member of :ref:`WdUnderline`.
|None| indicates the font inherits its underline value from the style hierarchy.
|False| indicates no underline. |True| indicates single underline. The values
from :ref:`WdUnderline` are used to specify other outline styles such as double,
wavy, and dotted.
"""
rPr = self._element.rPr
if rPr is None:
return None
val = rPr.u_val
return (
None
if val == WD_UNDERLINE.INHERITED
else True
if val == WD_UNDERLINE.SINGLE
else False
if val == WD_UNDERLINE.NONE
else val
)
@underline.setter
def underline(self, value: bool | WD_UNDERLINE | None) -> None:
rPr = self._element.get_or_add_rPr()
# -- works fine without these two mappings, but only because True == 1 and
# -- False == 0, which happen to match the mapping for WD_UNDERLINE.SINGLE
# -- and .NONE respectively.
val = (
WD_UNDERLINE.SINGLE if value is True else WD_UNDERLINE.NONE if value is False else value
)
rPr.u_val = val
@property
def web_hidden(self) -> bool | None:
"""Read/write tri-state value.
When |True|, specifies that the contents of this run shall be hidden when the
document is displayed in web page view.
"""
return self._get_bool_prop("webHidden")
@web_hidden.setter
def web_hidden(self, value: bool | None) -> None:
self._set_bool_prop("webHidden", value)
def _get_bool_prop(self, name: str) -> bool | None:
"""Return the value of boolean child of `w:rPr` having `name`."""
rPr = self._element.rPr
if rPr is None:
return None
return rPr._get_bool_val(name) # pyright: ignore[reportPrivateUsage]
def _set_bool_prop(self, name: str, value: bool | None):
"""Assign `value` to the boolean child `name` of `w:rPr`."""
rPr = self._element.get_or_add_rPr()
rPr._set_bool_val(name, value) # pyright: ignore[reportPrivateUsage]
| Font |
python | doocs__leetcode | solution/0200-0299/0248.Strobogrammatic Number III/Solution.py | {
"start": 0,
"end": 706
} | class ____:
def strobogrammaticInRange(self, low: str, high: str) -> int:
def dfs(u):
if u == 0:
return ['']
if u == 1:
return ['0', '1', '8']
ans = []
for v in dfs(u - 2):
for l, r in ('11', '88', '69', '96'):
ans.append(l + v + r)
if u != n:
ans.append('0' + v + '0')
return ans
a, b = len(low), len(high)
low, high = int(low), int(high)
ans = 0
for n in range(a, b + 1):
for s in dfs(n):
if low <= int(s) <= high:
ans += 1
return ans
| Solution |
python | docker__docker-py | tests/unit/dockertypes_test.py | {
"start": 8495,
"end": 8957
} | class ____(unittest.TestCase):
def test_parse_mounts(self):
spec = ContainerSpec(
image='scratch', mounts=[
'/local:/container',
'/local2:/container2:ro',
Mount(target='/target', source='/source')
]
)
assert 'Mounts' in spec
assert len(spec['Mounts']) == 3
for mount in spec['Mounts']:
assert isinstance(mount, Mount)
| ContainerSpecTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.