language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1175245,
"end": 1175466
} | class ____(VegaLiteSchema):
"""SelectionInitMapping schema wrapper."""
_schema = {"$ref": "#/definitions/SelectionInitMapping"}
def __init__(self, **kwds):
super().__init__(**kwds)
| SelectionInitMapping |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_sql.py | {
"start": 2688,
"end": 2860
} | class ____:
"""Helper class with operation statuses."""
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
UNKNOWN = "UNKNOWN"
| CloudSqlOperationStatus |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py | {
"start": 5982,
"end": 6050
} | class ____(Qwen2VLPreTrainedModel):
pass
| Qwen2_5_VLPreTrainedModel |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 111073,
"end": 116425
} | class ____:
def test_reproducibility(self):
seed = 514
rng = np.random.RandomState(seed)
x = ortho_group.rvs(3, random_state=rng)
x2 = ortho_group.rvs(3, random_state=seed)
# Note this matrix has det -1, distinguishing O(N) from SO(N)
assert_almost_equal(np.linalg.det(x), -1)
expected = np.array([[0.381686, -0.090374, 0.919863],
[0.905794, -0.161537, -0.391718],
[-0.183993, -0.98272, -0.020204]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, ortho_group.rvs, None)
assert_raises(ValueError, ortho_group.rvs, (2, 2))
assert_raises(ValueError, ortho_group.rvs, -1)
assert_raises(ValueError, ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = ortho_group(dim)
frozen_seed = ortho_group(dim, seed=1234)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = ortho_group.rvs(dim, random_state=1234)
rvs3 = frozen_seed.rvs(size=1)
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
def test_det_and_ortho(self):
xs = [[ortho_group.rvs(dim)
for i in range(10)]
for dim in range(2,12)]
# Test that abs determinants are always +1
dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs])
assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13)
# Test that these are orthogonal matrices
for xx in xs:
for x in xx:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
@pytest.mark.parametrize("dim", [2, 5, 10, 20])
def test_det_distribution_gh18272(self, dim):
# Test that positive and negative determinants are equally likely.
rng = np.random.default_rng(6796248956179332344)
dist = ortho_group(dim=dim)
rvs = dist.rvs(size=5000, random_state=rng)
dets = scipy.linalg.det(rvs)
k = np.sum(dets > 0)
n = len(dets)
res = stats.binomtest(k, n)
low, high = res.proportion_ci(confidence_level=0.95)
assert low < 0.5 < high
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
rng = np.random.RandomState(518) # Note that the test is sensitive to seed too
xs = ortho_group.rvs(dim, size=samples, random_state=rng)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same distribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
def test_one_by_one(self):
# Test that the 1x1 distribution gives ±1 with equal probability.
dim = 1
xs = ortho_group.rvs(dim, size=5000, random_state=np.random.default_rng(514))
assert_allclose(np.abs(xs), 1, rtol=1e-13)
k = np.sum(xs > 0)
n = len(xs)
res = stats.binomtest(k, n)
low, high = res.proportion_ci(confidence_level=0.95)
assert low < 0.5 < high
def test_zero_by_zero(self):
assert_equal(special_ortho_group.rvs(0, size=4).shape, (4, 0, 0))
@pytest.mark.slow
def test_pairwise_distances(self):
# Test that the distribution of pairwise distances is close to correct.
rng = np.random.RandomState(514)
def random_ortho(dim, random_state=None):
u, _s, v = np.linalg.svd(rng.normal(size=(dim, dim)))
return np.dot(u, v)
for dim in range(2, 6):
def generate_test_statistics(rvs, N=1000, eps=1e-10):
stats = np.array([
np.sum((rvs(dim=dim, random_state=rng) -
rvs(dim=dim, random_state=rng))**2)
for _ in range(N)
])
# Add a bit of noise to account for numeric accuracy.
stats += np.random.uniform(-eps, eps, size=stats.shape)
return stats
expected = generate_test_statistics(random_ortho)
actual = generate_test_statistics(scipy.stats.ortho_group.rvs)
_D, p = scipy.stats.ks_2samp(expected, actual)
assert_array_less(.05, p)
| TestOrthoGroup |
python | getsentry__sentry | src/sentry/integrations/msteams/linkage.py | {
"start": 243,
"end": 1028
} | class ____(IdentityLinkageView, ABC):
@property
def parent_messaging_spec(self) -> MessagingIntegrationSpec:
from sentry.integrations.msteams.spec import MsTeamsMessagingSpec
return MsTeamsMessagingSpec()
@property
def provider(self) -> ExternalProviders:
return ExternalProviders.MSTEAMS
@property
def external_provider_enum(self) -> ExternalProviderEnum:
return ExternalProviderEnum.MSTEAMS
@property
def salt(self) -> str:
from .constants import SALT
return SALT
@property
def external_id_parameter(self) -> str:
return "teams_user_id"
@property
def expired_link_template(self) -> str:
return "sentry/integrations/msteams/expired-link.html"
| MsTeamsIdentityLinkageView |
python | Lightning-AI__lightning | tests/tests_pytorch/loops/test_double_iter_in_iterable_dataset.py | {
"start": 1128,
"end": 2641
} | class ____(IterableDataset):
def __init__(self, queue: Queue) -> None:
super().__init__()
self.queue = queue
def __iter__(self) -> Iterator:
for _ in range(5):
tensor, _ = self.queue.get(timeout=5)
yield tensor
def train_model(queue: Queue, max_epochs: int, ckpt_path: Path) -> None:
dataloader = DataLoader(QueueDataset(queue), num_workers=1, batch_size=None)
trainer = Trainer(
max_epochs=max_epochs,
enable_progress_bar=False,
enable_checkpointing=False,
devices=1,
logger=False,
)
if ckpt_path.exists():
trainer.fit(BoringModel(), dataloader, ckpt_path=str(ckpt_path))
else:
trainer.fit(BoringModel(), dataloader)
trainer.save_checkpoint(str(ckpt_path))
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on macOS due to multiprocessing issues")
def test_resume_training_with(tmp_path):
"""Test resuming training from checkpoint file using a IterableDataset."""
q = mp.Queue()
arr = np.random.random([1, 32]).astype(np.float32)
for idx in range(20):
q.put((arr, idx))
max_epoch = 2
ckpt_path = tmp_path / "model.ckpt"
train_model(q, max_epoch, ckpt_path)
assert os.path.exists(ckpt_path), f"Checkpoint file '{ckpt_path}' wasn't created"
ckpt_size = os.path.getsize(ckpt_path)
assert ckpt_size > 0, f"Checkpoint file is empty (size: {ckpt_size} bytes)"
train_model(q, max_epoch + 2, ckpt_path)
| QueueDataset |
python | pytorch__pytorch | torch/distributed/distributed_c10d.py | {
"start": 15407,
"end": 16165
} | class ____:
r"""
Deprecated enum-like class.
For reduction operations: ``SUM``, ``PRODUCT``, ``MIN``, and ``MAX``.
:class:`~torch.distributed.ReduceOp` is recommended to use instead.
"""
def __init__(self) -> None:
# __members__ is a dict storing key-value pairs for enum classes
for k, v in ReduceOp.RedOpType.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.RedOpType.__members__
@deprecated(
"`torch.distributed.reduce_op` is deprecated, "
"please use `torch.distributed.ReduceOp` instead",
category=FutureWarning,
)
def __getattribute__(self, key):
return object.__getattribute__(self, key)
reduce_op = _reduce_op()
| _reduce_op |
python | celery__celery | celery/result.py | {
"start": 17535,
"end": 29885
} | class ____(ResultBase):
"""A collection of results.
Arguments:
results (Sequence[AsyncResult]): List of result instances.
"""
_app = None
#: List of results in in the set.
results = None
def __init__(self, results, app=None, ready_barrier=None, **kwargs):
self._app = app
self.results = results
self.on_ready = promise(args=(proxy(self),))
self._on_full = ready_barrier or barrier(results)
if self._on_full:
self._on_full.then(promise(self._on_ready, weak=True))
def add(self, result):
"""Add :class:`AsyncResult` as a new member of the set.
Does nothing if the result is already a member.
"""
if result not in self.results:
self.results.append(result)
if self._on_full:
self._on_full.add(result)
def _on_ready(self):
if self.backend.is_async:
self.on_ready()
def remove(self, result):
"""Remove result from the set; it must be a member.
Raises:
KeyError: if the result isn't a member.
"""
if isinstance(result, str):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)
def discard(self, result):
"""Remove result from the set if it is a member.
Does nothing if it's not a member.
"""
try:
self.remove(result)
except KeyError:
pass
def update(self, results):
"""Extend from iterable of results."""
self.results.extend(r for r in results if r not in self.results)
def clear(self):
"""Remove all results from this set."""
self.results[:] = [] # don't create new list.
def successful(self):
"""Return true if all tasks successful.
Returns:
bool: true if all of the tasks finished
successfully (i.e. didn't raise an exception).
"""
return all(result.successful() for result in self.results)
def failed(self):
"""Return true if any of the tasks failed.
Returns:
bool: true if one of the tasks failed.
(i.e., raised an exception)
"""
return any(result.failed() for result in self.results)
def maybe_throw(self, callback=None, propagate=True):
for result in self.results:
result.maybe_throw(callback=callback, propagate=propagate)
maybe_reraise = maybe_throw # XXX compat alias.
def waiting(self):
"""Return true if any of the tasks are incomplete.
Returns:
bool: true if one of the tasks are still
waiting for execution.
"""
return any(not result.ready() for result in self.results)
def ready(self):
"""Did all of the tasks complete? (either by success of failure).
Returns:
bool: true if all of the tasks have been executed.
"""
return all(result.ready() for result in self.results)
def completed_count(self):
"""Task completion count.
Note that `complete` means `successful` in this context. In other words, the
return value of this method is the number of ``successful`` tasks.
Returns:
int: the number of complete (i.e. successful) tasks.
"""
return sum(int(result.successful()) for result in self.results)
def forget(self):
"""Forget about (and possible remove the result of) all the tasks."""
for result in self.results:
result.forget()
def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers for all tasks in the set.
Arguments:
terminate (bool): Also terminate the process currently working
on the task (if any).
signal (str): Name of signal to send to process if terminate.
Default is TERM.
wait (bool): Wait for replies from worker.
The ``timeout`` argument specifies the number of seconds
to wait. Disabled by default.
timeout (float): Time in seconds to wait for replies when
the ``wait`` argument is enabled.
"""
self.app.control.revoke([r.id for r in self.results],
connection=connection, timeout=timeout,
terminate=terminate, signal=signal, reply=wait)
def __iter__(self):
return iter(self.results)
def __getitem__(self, index):
"""`res[i] -> res.results[i]`."""
return self.results[index]
def get(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True, on_message=None,
disable_sync_subtasks=True, on_interval=None):
"""See :meth:`join`.
This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.
"""
return (self.join_native if self.supports_native_join else self.join)(
timeout=timeout, propagate=propagate,
interval=interval, callback=callback, no_ack=no_ack,
on_message=on_message, disable_sync_subtasks=disable_sync_subtasks,
on_interval=on_interval,
)
def join(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True, on_message=None,
disable_sync_subtasks=True, on_interval=None):
"""Gather the results of all tasks as a list in order.
Note:
This can be an expensive operation for result store
backends that must resort to polling (e.g., database).
You should consider using :meth:`join_native` if your backend
supports it.
Warning:
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
Arguments:
timeout (float): The number of seconds to wait for results
before the operation times out.
propagate (bool): If any of the tasks raises an exception,
the exception will be re-raised when this flag is set.
interval (float): Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this does not have
any effect when using the amqp result store backend,
as it does not use polling.
callback (Callable): Optional callback to be called for every
result received. Must have signature ``(task_id, value)``
No results will be returned by this function if a callback
is specified. The order of results is also arbitrary when a
callback is used. To get access to the result object for
a particular id you'll have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
no_ack (bool): Automatic message acknowledgment (Note that if this
is set to :const:`False` then the messages
*will not be acknowledged*).
disable_sync_subtasks (bool): Disable tasks to wait for sub tasks
this is the default configuration. CAUTION do not enable this
unless you must.
Raises:
celery.exceptions.TimeoutError: if ``timeout`` isn't
:const:`None` and the operation takes longer than ``timeout``
seconds.
"""
if disable_sync_subtasks:
assert_will_not_block()
time_start = time.monotonic()
remaining = None
if on_message is not None:
raise ImproperlyConfigured(
'Backend does not support on_message callback')
results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (time.monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError('join operation timed out')
value = result.get(
timeout=remaining, propagate=propagate,
interval=interval, no_ack=no_ack, on_interval=on_interval,
disable_sync_subtasks=disable_sync_subtasks,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
def then(self, callback, on_error=None, weak=False):
return self.on_ready.then(callback, on_error)
def iter_native(self, timeout=None, interval=0.5, no_ack=True,
on_message=None, on_interval=None):
"""Backend optimized version of :meth:`iterate`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
return self.backend.iter_native(
self,
timeout=timeout, interval=interval, no_ack=no_ack,
on_message=on_message, on_interval=on_interval,
)
def join_native(self, timeout=None, propagate=True,
interval=0.5, callback=None, no_ack=True,
on_message=None, on_interval=None,
disable_sync_subtasks=True):
"""Backend optimized version of :meth:`join`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
if disable_sync_subtasks:
assert_will_not_block()
order_index = None if callback else {
result.id: i for i, result in enumerate(self.results)
}
acc = None if callback else [None for _ in range(len(self))]
for task_id, meta in self.iter_native(timeout, interval, no_ack,
on_message, on_interval):
if isinstance(meta, list):
value = []
for children_result in meta:
value.append(children_result.get())
else:
value = meta['result']
if propagate and meta['status'] in states.PROPAGATE_STATES:
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc
def _iter_meta(self, **kwargs):
return (meta for _, meta in self.backend.get_many(
{r.id for r in self.results}, max_iterations=1, **kwargs
))
def _failed_join_report(self):
return (res for res in self.results
if res.backend.is_cached(res.id) and
res.state in states.PROPAGATE_STATES)
def __len__(self):
return len(self.results)
def __eq__(self, other):
if isinstance(other, ResultSet):
return other.results == self.results
return NotImplemented
def __repr__(self):
return f'<{type(self).__name__}: [{", ".join(r.id for r in self.results)}]>'
@property
def supports_native_join(self):
try:
return self.results[0].supports_native_join
except IndexError:
pass
@property
def app(self):
if self._app is None:
self._app = (self.results[0].app if self.results else
current_app._get_current_object())
return self._app
@app.setter
def app(self, app):
self._app = app
@property
def backend(self):
return self.app.backend if self.app else self.results[0].backend
@Thenable.register
| ResultSet |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 300196,
"end": 302652
} | class ____:
DTYPES = {}
for i in MatmulCommon.types:
for j in MatmulCommon.types:
if np.can_cast(j, i):
DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j))
@pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES)
def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None:
a = np.arange(10).reshape(5, 2).astype(dtype1)
a_id = id(a)
b = np.ones((2, 2), dtype=dtype2)
ref = a @ b
a @= b
assert id(a) == a_id
assert a.dtype == dtype1
assert a.shape == (5, 2)
if dtype1.kind in "fc":
np.testing.assert_allclose(a, ref)
else:
np.testing.assert_array_equal(a, ref)
SHAPES = {
"2d_large": ((10**5, 10), (10, 10)),
"3d_large": ((10**4, 10, 10), (1, 10, 10)),
"1d": ((3,), (3,)),
"2d_1d": ((3, 3), (3,)),
"1d_2d": ((3,), (3, 3)),
"2d_broadcast": ((3, 3), (3, 1)),
"2d_broadcast_reverse": ((1, 3), (3, 3)),
"3d_broadcast1": ((3, 3, 3), (1, 3, 1)),
"3d_broadcast2": ((3, 3, 3), (1, 3, 3)),
"3d_broadcast3": ((3, 3, 3), (3, 3, 1)),
"3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)),
"3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)),
"3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)),
}
@pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES)
def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]):
a_size = np.prod(a_shape)
a = np.arange(a_size).reshape(a_shape).astype(np.float64)
a_id = id(a)
b_size = np.prod(b_shape)
b = np.arange(b_size).reshape(b_shape)
ref = a @ b
if ref.shape != a_shape:
with pytest.raises(ValueError):
a @= b
return
else:
a @= b
assert id(a) == a_id
assert a.dtype.type == np.float64
assert a.shape == a_shape
np.testing.assert_allclose(a, ref)
def test_matmul_axes():
a = np.arange(3 * 4 * 5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
| TestMatmulInplace |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/hosting.py | {
"start": 9698,
"end": 9999
} | class ____(RemoveFieldsMixin, ProjectSerializer):
FIELDS_TO_REMOVE = [
"_links",
# users and tags result in additional queries for fields that we don't use.
"users",
"tags",
]
related_project_serializer = RelatedProjectAddonsSerializer
| ProjectAddonsSerializer |
python | pennersr__django-allauth | allauth/headless/tokens/inputs.py | {
"start": 55,
"end": 133
} | class ____(inputs.Input):
refresh_token = inputs.CharField()
| RefreshTokenInput |
python | great-expectations__great_expectations | tests/expectations/test_conditions.py | {
"start": 7444,
"end": 7867
} | class ____:
"""Tests for the NullityCondition class."""
def test_repr_is_null(self):
col = Column("email")
cond = NullityCondition(column=col, is_null=True)
assert repr(cond) == "email IS NULL"
def test_repr_is_not_null(self):
col = Column("email")
cond = NullityCondition(column=col, is_null=False)
assert repr(cond) == "email IS NOT NULL"
| TestNullityCondition |
python | PyCQA__pylint | tests/functional/i/init_return_from_inner_function.py | {
"start": 56,
"end": 240
} | class ____:
"""docstring"""
def __init__(self):
def inner_function(arg):
"""inner docstring"""
return arg + 4
self.func = inner_function
| Aaa |
python | PrefectHQ__prefect | tests/utilities/test_collections.py | {
"start": 2178,
"end": 2225
} | class ____:
x: int
y: int
| SimpleDataclass |
python | pytorch__pytorch | torch/distributed/_state_dict_utils.py | {
"start": 19415,
"end": 29890
} | class ____(NamedTuple):
size: torch.Size
dtype: torch.dtype
def _broadcast_tensors(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
keys: list[str],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
if pg is None:
pg = dist.distributed_c10d._get_default_group()
pg_device = (
device
if device.type in {pg_device.type for pg_device in pg._device_types}
else pg._device_types[0]
)
tensors: list[torch.Tensor] = []
for key in keys:
if dist.get_rank() == 0:
full_state = full_state_dict[key]
if not isinstance(full_state, torch.Tensor):
raise AssertionError("full_state must be a torch.Tensor")
full_tensor = full_state.detach().to(pg_device)
else:
tensor_info = full_state_dict[key]
full_tensor = torch.empty(
size=tensor_info.size,
device=pg_device,
dtype=tensor_info.dtype,
)
tensors.append(full_tensor)
if (local_state := local_state_dict.get(key)) is None:
continue
local_state_dict[key] = (
(local_state, full_tensor)
if isinstance(local_state, DTensor)
else full_tensor
)
if len(tensors) > 1:
dist._broadcast_coalesced(pg, tensors, 500, 0)
else:
dist.broadcast(tensors[0], src=0, group=pg)
if pg_device != device:
for key, full_tensor in zip(keys, tensors):
if (local_state := local_state_dict.get(key)) is not None:
local_state_dict[key] = (
(local_state[0], full_tensor.to(device))
if (
isinstance(local_state, tuple)
and isinstance(local_state[0], DTensor)
)
else full_tensor.to(device)
)
_distribute_tensors(local_state_dict, keys, device, pg)
def _distribute_tensors(
local_state_dict: dict[str, Any],
keys: list[str],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
if pg is None:
pg = dist.distributed_c10d._get_default_group()
for key in keys:
_local_state = local_state_dict.get(key)
if _local_state is None or torch.is_tensor(_local_state):
continue
local_state = _local_state[0]
full_tensor = _local_state[1]
shape, offset = compute_local_shape_and_global_offset(
full_tensor.shape, local_state.device_mesh, local_state.placements
)
slices = [
slice(cur_offset, cur_offset + cur_shape)
for cur_shape, cur_offset in zip(shape, offset)
]
if local_state.is_meta:
# Use .clone() here rather than view to clone and return only the sliced portion, minimizing memory access and cost.
local_tensor = full_tensor[tuple(slices)].detach().clone()
# TODO: currently, we cannot handle strided sharding if the dp dimension is not even. For example,
# one of the case that is not yet supported is when placements = (Shard(0), _StridedShard(0, sf=2)).
ret = DTensor.from_local(
local_tensor,
local_state.device_mesh,
local_state.placements,
shape=local_state.shape,
stride=local_state.stride(),
)
else:
ret = local_state
# Copy full_tensor[slices] into local_state.to_local() to reduce memory footprint.
ret.to_local().copy_(full_tensor[tuple(slices)])
local_state_dict[key] = ret
def _broadcast_state_dict(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
strict: bool = False,
cpu_offload: bool = False,
) -> None:
# Broadcast from rank0's `full_state_dict` to all ranks' `local_state_dict`.
# If strict is True, any keys in `local_state_dict` but not in `full_state_dict`
# will be removed from `local_state_dict`.
ret = {}
if dist.get_rank() == 0:
for key, value in full_state_dict.items():
if not torch.is_tensor(value):
ret[key] = value
elif value.dim() == 0:
ret[key] = value.cpu()
else:
ret[key] = _TensorInfo(value.size(), value.dtype)
broadcast_list = [ret]
dist.broadcast_object_list(broadcast_list, src=0, group=pg)
ret = broadcast_list[0]
# Gather values
keys = []
local_state_dict_keys = set(local_state_dict.keys())
global_keys = set()
for key, value in ret.items():
global_keys.add(key)
if not isinstance(value, _TensorInfo):
if key in local_state_dict:
local_state_dict[key] = value
continue
if dist.get_rank() == 0:
ret[key] = full_state_dict[key]
keys.append(key)
# Broadcast every tensor to avoid OOM for now.
if len(keys) >= 1:
_broadcast_tensors(ret, local_state_dict, keys, device, pg)
if cpu_offload:
for key in keys:
local_state_dict[key] = local_state_dict[key].cpu()
keys.clear()
if strict:
if missing_keys := (local_state_dict_keys - global_keys):
for key in missing_keys:
local_state_dict.pop(key)
if keys:
_broadcast_tensors(ret, local_state_dict, keys, device, pg)
if cpu_offload:
for key in keys:
local_state_dict[key] = local_state_dict[key].cpu()
def _distribute_state_dict(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
# Full_state_dict = True, broadcast_from_rank0 = False here. Each rank has
# full_state_dict. Skip the broadcast in ``_broadcast_state_dict`` and
# distribute tensors in each rank
for key, value in full_state_dict.items():
if key not in full_state_dict:
continue
if not torch.is_tensor(value):
local_state_dict[key] = value
elif value.dim() == 0:
local_state_dict[key] = value.cpu()
else:
if not isinstance(value, torch.Tensor):
raise AssertionError("value must be a torch.Tensor")
local_state = local_state_dict.get(key)
if local_state is None:
continue
elif isinstance(local_state, DTensor):
local_state_dict[key] = distribute_tensor(
value.detach().to(device),
local_state.device_mesh,
local_state.placements,
)
else:
local_state_dict[key] = value.detach().to(device)
# These APIs are from torch.distributed.checkpoint.
# TODO: We should consolidate the code here as some not all modules can depend on
# DCP.
PATH_ITEM = Union[str, int]
OBJ_PATH = tuple[PATH_ITEM, ...]
FLATTEN_MAPPING = dict[str, OBJ_PATH]
STATE_DICT_TYPE = dict[str, Any]
CONTAINER_TYPE = MutableMapping[PATH_ITEM, Any]
def _traverse_state_dict(
state_dict: STATE_DICT_TYPE,
visitor: Callable[[OBJ_PATH, Any], None],
) -> None:
"""
Invoke ``visitor`` for each value recursively in ``state_dict``.
Mapping, list, and tuple will be flattened and other value types are treated
as the terminal values and will invoke ``visitor``.
"""
def _traverse_obj(path: OBJ_PATH, value: Any) -> None:
if isinstance(value, Mapping):
for k, v in value.items():
_traverse_obj(path + (str(k),), v)
elif isinstance(value, (list, tuple)):
for i, v in enumerate(value):
_traverse_obj(path + (i,), v)
else:
visitor(path, value)
for key, value in state_dict.items():
_traverse_obj((str(key),), value)
def _flatten_state_dict(
state_dict: STATE_DICT_TYPE,
) -> tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]:
"""
Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary.
Use ``unflatten_state_dict`` to revert this process.
Returns:
A tuple with the flatten state_dict and a mapping from original to new state_dict.
N.B. The new keys are derived from the object paths, joined by dot.
For example: ``{ 'a': {'b':...}}`` results in the key `a.b`.
"""
flattened: STATE_DICT_TYPE = {}
mappings: FLATTEN_MAPPING = {}
def flat_copy(path: OBJ_PATH, value: Any) -> None:
new_fqn = ".".join(map(str, path))
if new_fqn in flattened:
raise ValueError(f"duplicated flatten key {new_fqn}")
flattened[new_fqn] = value
mappings[new_fqn] = path
_traverse_state_dict(state_dict, flat_copy)
return flattened, mappings
def _set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: Any) -> None:
"""Set ``value`` in ``root_dict`` along the ``path`` object path."""
cur_container = cast(CONTAINER_TYPE, root_dict)
def extend_list(lst: list[Any], idx: int) -> None:
while len(lst) <= idx:
lst.append(None)
for i in range(1, len(path)):
prev_key = path[i - 1]
key = path[i]
def_val: Union[CONTAINER_TYPE, list[Any]] = {} if type(key) is str else []
if isinstance(cur_container, Mapping):
cur_container = cast(
CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val)
)
else:
# pyrefly: ignore [bad-argument-type]
extend_list(cur_container, prev_key)
if cur_container[prev_key] is None:
cur_container[prev_key] = def_val
cur_container = cur_container[prev_key]
key = path[-1]
if type(key) is int:
extend_list(cast(list[Any], cur_container), key)
cur_container[key] = value
def _unflatten_state_dict(
state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING
) -> STATE_DICT_TYPE:
"""Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``."""
nested: STATE_DICT_TYPE = {}
for key, value in state_dict.items():
_set_element(nested, mapping[key], value)
return nested
| _TensorInfo |
python | ray-project__ray | python/ray/autoscaler/v2/instance_manager/config.py | {
"start": 2176,
"end": 3729
} | class ____:
# The timeout for waiting for a REQUESTED instance to be ALLOCATED.
request_status_timeout_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_REQUEST_STATUS_TIMEOUT_S", 10 * 60
)
# The timeout for waiting for a ALLOCATED instance to be RAY_RUNNING.
allocate_status_timeout_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_ALLOCATE_STATUS_TIMEOUT_S", 300
)
# The timeout for waiting for a RAY_INSTALLING instance to be RAY_RUNNING.
ray_install_status_timeout_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_RAY_INSTALL_STATUS_TIMEOUT_S", 30 * 60
)
# The timeout for waiting for a TERMINATING instance to be TERMINATED.
terminating_status_timeout_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_TERMINATING_STATUS_TIMEOUT_S", 300
)
# The timeout for waiting for a RAY_STOP_REQUESTED instance
# to be RAY_STOPPING or RAY_STOPPED.
ray_stop_requested_status_timeout_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_RAY_STOP_REQUESTED_STATUS_TIMEOUT_S", 300
)
# The interval for raise a warning when an instance in transient status
# is not updated for a long time.
transient_status_warn_interval_s: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_TRANSIENT_STATUS_WARN_INTERVAL_S", 90
)
# The number of times to retry requesting to allocate an instance.
max_num_retry_request_to_allocate: int = env_integer(
"RAY_AUTOSCALER_RECONCILE_MAX_NUM_RETRY_REQUEST_TO_ALLOCATE", 3
)
@dataclass
| InstanceReconcileConfig |
python | joblib__joblib | joblib/test/test_parallel.py | {
"start": 41086,
"end": 43156
} | class ____:
'''Class defined in the __main__ namespace'''
def __init__(self, value):
self.value = value
def square(x, ignored=None, ignored2=None):
'''Function defined in the __main__ namespace'''
return x.value ** 2
square2 = partial(square, ignored2='something')
# Here, we do not need the `if __name__ == "__main__":` safeguard when
# using the default `loky` backend (even on Windows).
# To make debugging easier
faulthandler.dump_traceback_later(30, exit=True)
# The following baroque function call is meant to check that joblib
# introspection rightfully uses cloudpickle instead of the (faster) pickle
# module of the standard library when necessary. In particular cloudpickle is
# necessary for functions and instances of classes interactively defined in the
# __main__ module.
print(Parallel(backend="loky", n_jobs=2)(
delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))])
for i in range(5)
))
""".format(joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
@with_multiprocessing
def test_parallel_with_interactively_defined_functions_loky(tmpdir):
# loky accepts interactive functions defined in __main__ and does not
# require if __name__ == '__main__' even when the __main__ module is
# defined by the result of the execution of a filesystem script.
script = tmpdir.join("joblib_interactively_defined_function.py")
script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT)
check_subprocess_call(
[sys.executable, script.strpath],
stdout_regex=r"\[0, 1, 4, 9, 16\]",
timeout=None, # rely on faulthandler to kill the process
)
INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\
import sys
# Make sure that joblib is importable in the subprocess launching this
# script. This is needed in case we run the tests from the joblib root
# folder without having installed joblib
sys.path.insert(0, {joblib_root_folder!r})
from joblib import Parallel, delayed, hash
import multiprocessing as mp
mp.util.log_to_stderr(5)
| MyClass |
python | numpy__numpy | numpy/random/tests/test_randomstate.py | {
"start": 3514,
"end": 5369
} | class ____:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
| TestSeed |
python | ray-project__ray | rllib/utils/metrics/ray_metrics.py | {
"start": 906,
"end": 1498
} | class ____:
"""Context manager for timing code execution.
Elapsed time is automatically logged to the provided Prometheus Histogram.
Example:
with TimerAndPrometheusLogger(Histogram):
learner.update()
"""
def __init__(self, histogram: Histogram):
self._histogram = histogram
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.elapsed = time.perf_counter() - self.start
self._histogram.observe(self.elapsed)
| TimerAndPrometheusLogger |
python | pytorch__pytorch | test/dynamo/test_regional_inductor.py | {
"start": 16138,
"end": 21763
} | class ____(torch._inductor.test_case.TestCase):
"""Tests for RegionalOutputCode and BundledAOTAutogradResult."""
def test_regional_output_code_serialization(self):
"""Test that RegionalOutputCode can be serialized and deserialized."""
def fn(x, y):
sin = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
mul = sin * y
add = mul + 1
return torch.sin(add)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
# Compile with regional inductor
with torch.fx.traceback.preserve_node_meta(enable=False):
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.proxy_tensor import make_fx
fake_mode = FakeTensorMode()
with fake_mode:
fake_x = fake_mode.from_tensor(x)
fake_y = fake_mode.from_tensor(y)
gm = make_fx(fn)(fake_x, fake_y)
# Run regional_inductor on the graph
result_gm = regional_inductor(gm, fake_x, fake_y)
# Create RegionalOutputCode
output_code = RegionalOutputCode(result_gm)
# Test that we can call it
self.assertIsNotNone(output_code._graph_module)
# Serialize
output_code.prepare_for_serialization()
self.assertIsNone(output_code._graph_module)
self.assertIsNotNone(output_code._serialized_graph_module)
# Deserialize via post_compile
from torch._inductor.output_code import CompiledFxGraphConstants
fx_config: _CompileFxKwargs = {"is_backward": False}
output_code.post_compile(
[fake_x, fake_y], CompiledFxGraphConstants(), fx_config
)
self.assertIsNotNone(output_code._graph_module)
self.assertIsInstance(output_code._graph_module, torch.fx.GraphModule)
# Test that deserialized graph works
with fake_mode:
result = output_code([fake_x, fake_y])
self.assertIsNotNone(result)
def test_regional_output_code_with_backward(self):
"""Test RegionalOutputCode with both forward and backward compilation."""
def fn(x, y):
sin = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
mul = sin * y
add = mul + 1
return torch.sin(add)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
# Compile with regional inductor backend
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.proxy_tensor import make_fx
fake_mode = FakeTensorMode()
with fake_mode:
fake_x = fake_mode.from_tensor(x)
fake_y = fake_mode.from_tensor(y)
# Create forward graph
with torch.fx.traceback.preserve_node_meta(enable=False):
gm = make_fx(fn)(fake_x, fake_y)
forward_gm = regional_inductor(gm, fake_x, fake_y)
# Create forward output code
fw_code = RegionalOutputCode(forward_gm)
# Verify it can be called
with fake_mode:
result = fw_code([fake_x, fake_y])
self.assertIsNotNone(result)
# Test serialization round-trip
fw_code.prepare_for_serialization()
# Deserialize via post_compile
from torch._inductor.output_code import CompiledFxGraphConstants
fx_config: _CompileFxKwargs = {"is_backward": False}
fw_code.post_compile([fake_x, fake_y], CompiledFxGraphConstants(), fx_config)
with fake_mode:
result2 = fw_code([fake_x, fake_y])
self.assertIsNotNone(result2)
def test_regional_compiled_forward_backward(self):
"""Test BundledCompiledForward and BundledCompiledBackward with RegionalOutputCode."""
def fn(x):
with fx_traceback.annotate({"compile_with_inductor": 0}):
return torch.sin(x) * 2
x = torch.randn(5, requires_grad=True)
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.proxy_tensor import make_fx
fake_mode = FakeTensorMode()
with fake_mode:
fake_x = fake_mode.from_tensor(x)
with torch.fx.traceback.preserve_node_meta(enable=False):
gm = make_fx(fn)(fake_x)
compiled_gm = regional_inductor(gm, fake_x)
# Create forward using the generic BundledCompiledForward
fw_code = RegionalOutputCode(compiled_gm)
fw_compiled = BundledCompiledForward[RegionalOutputCode](result=fw_code)
# Test pre_save
fw_compiled.pre_save()
# After pre_save, fw_compiled.result is a copy with serialized graph
self.assertIsNotNone(fw_compiled.result._serialized_graph_module)
self.assertIsNone(
fw_compiled.result._graph_module
) # Should be cleared after serialization
# Test load (doesn't deserialize yet)
loaded_code = fw_compiled.load([fake_x])
self.assertIsNone(loaded_code._graph_module) # Not yet deserialized
self.assertIsNotNone(loaded_code._serialized_graph_module)
fx_config: _CompileFxKwargs = {"is_backward": False}
post_compiled = fw_compiled.post_compile(loaded_code, fx_config)
self.assertIsNotNone(post_compiled)
self.assertIsNotNone(post_compiled._graph_module) # Now deserialized
if __name__ == "__main__":
run_tests()
| TestRegionalOutputCode |
python | PyCQA__pylint | pylint/checkers/newstyle.py | {
"start": 886,
"end": 3985
} | class ____(BaseChecker):
"""Checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems.
* use of property, __slots__, super
* "super" usage
"""
# configuration section name
name = "newstyle"
# messages
msgs = MSGS
# configuration options
options = ()
@only_required_for_messages("bad-super-call")
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Check use of super."""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(nodes.Call):
if node_frame_class(stmt) != node_frame_class(node):
# Don't look down in other scopes.
continue
expr = stmt.func
if not isinstance(expr, nodes.Attribute):
continue
match call := expr.expr:
case nodes.Call(func=nodes.Name(name="super"), args=[arg0, *_]):
pass
case _:
# skip the test if using super
# super first arg should not be the class
continue
# calling super(type(self), self) can lead to recursion loop
# in derived classes
match arg0:
case nodes.Call(func=nodes.Name(name="type")):
self.add_message("bad-super-call", node=call, args=("type",))
continue
# calling super(self.__class__, self) can lead to recursion loop
# in derived classes
match call.args:
case [
nodes.Attribute(attrname="__class__"),
nodes.Name(name="self"),
*_,
]:
self.add_message(
"bad-super-call", node=call, args=("self.__class__",)
)
continue
try:
supcls = call.args and next(call.args[0].infer(), None)
except astroid.InferenceError:
continue
# If the supcls is in the ancestors of klass super can be used to skip
# a step in the mro() and get a method from a higher parent
if klass is not supcls and all(i != supcls for i in klass.ancestors()):
name = None
# if supcls is not Uninferable, then supcls was inferred
# and use its name. Otherwise, try to look
# for call.args[0].name
if supcls:
name = supcls.name
elif call.args and hasattr(call.args[0], "name"):
name = call.args[0].name
if name:
self.add_message("bad-super-call", node=call, args=(name,))
visit_asyncfunctiondef = visit_functiondef
def register(linter: PyLinter) -> None:
linter.register_checker(NewStyleConflictChecker(linter))
| NewStyleConflictChecker |
python | scipy__scipy | scipy/linalg/tests/test_lapack.py | {
"start": 18976,
"end": 31361
} | class ____:
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
rng = np.random.RandomState(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype, rng)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype, rng)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.T.conjugate() @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
rng = np.random.default_rng(1234)
ab = rng.random((4, 2))
b = rng.random((2, 4))
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('norm', ['I', '1', 'O'])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
@pytest.mark.parametrize('n', [3, 10])
def test_trcon(dtype, norm, uplo, diag, n):
# Simple way to get deterministic (unlike `hash`) seed based on arguments
seed = list(f"{dtype}{norm}{uplo}{diag}{n}".encode())
rng = np.random.default_rng(seed)
A = rng.random(size=(n, n)) + rng.random(size=(n, n))*1j
# make the condition numbers more interesting
offset = rng.permuted(np.logspace(0, rng.integers(0, 10), n))
A += offset
A = A.real if np.issubdtype(dtype, np.floating) else A
A = np.triu(A) if uplo == 'U' else np.tril(A)
if diag == 'U':
A /= np.diag(A)[:, np.newaxis]
A = A.astype(dtype)
trcon = get_lapack_funcs('trcon', (A,))
res, _ = trcon(A, norm=norm, uplo=uplo, diag=diag)
if norm == 'I':
norm_A = np.linalg.norm(A, ord=np.inf)
norm_inv_A = np.linalg.norm(np.linalg.inv(A), ord=np.inf)
ref = 1 / (norm_A * norm_inv_A)
else:
anorm = np.linalg.norm(A, ord=1)
gecon, getrf = get_lapack_funcs(('gecon', 'getrf'), (A,))
lu, ipvt, info = getrf(A)
ref, _ = gecon(lu, anorm, norm=norm)
# This is an estimate of reciprocal condition number; we just need order of
# magnitude. In testing, we observed that much smaller rtol is OK in almost
# all cases... but sometimes it isn't.
rtol = 1 # np.finfo(dtype).eps**0.75
assert_allclose(res, ref, rtol=rtol)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(isinstance(r, complex))
assert_(isinstance(cs, float))
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
rng = np.random.default_rng(1234)
a0 = rng.random((4, 4))
a0 = a0.T.dot(a0)
a0j = rng.random((4, 4)) + 1j*rng.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that _compute_lwork() correctly works around a bug in
# LAPACK versions older than 3.10.1.
sgesdd_lwork = get_lapack_funcs('gesdd_lwork', dtype=np.float32,
ilp64='preferred')
n = 9537
lwork = _compute_lwork(sgesdd_lwork, n, n,
compute_uv=True, full_matrices=True)
# If we called the Fortran function SGESDD directly with IWORK=-1, the
# LAPACK bug would result in lwork being 272929856, which was too small.
# (The result was returned in a single precision float, which does not
# have sufficient precision to represent the exact integer value that it
# computed internally.) The work-around implemented in _compute_lwork()
# will convert that to 272929888. If we are using LAPACK 3.10.1 or later
# (such as in OpenBLAS 0.3.21 or later), the work-around will return
# 272929920, because it does not know which version of LAPACK is being
# used, so it always applies the correction to whatever it is given. We
# will accept either 272929888 or 272929920.
# Note that the acceptable values are a LAPACK implementation detail.
# If a future version of LAPACK changes how SGESDD works, and therefore
# changes the required LWORK size, the acceptable values might have to
# be updated.
assert lwork == 272929888 or lwork == 272929920
| TestTbtrs |
python | nryoung__algorithms | tests/test_dynamic_programming.py | {
"start": 70,
"end": 631
} | class ____(unittest.TestCase):
"""
Tests the Longest Common Subsequence of several strings
"""
def test_lcs(self):
str1 = "BANANA"
str2 = "ABA"
str3 = "BCAD"
str4 = "NNAD"
self.assertEqual(lcs(str1, str1), str1)
self.assertEqual(lcs(str1, str2), "BA")
self.assertEqual(lcs(str1, str3), "BA")
self.assertEqual(lcs(str1, str4), "NNA")
self.assertEqual(lcs(str2, str3), "BA")
self.assertEqual(lcs(str2, str4), "A")
self.assertEqual(lcs(str3, str4), "AD")
| TestLCS |
python | gevent__gevent | src/greentest/3.9/test_ftplib.py | {
"start": 39057,
"end": 42238
} | class ____(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = socket_helper.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
# bpo-39259
with self.assertRaises(ValueError):
ftplib.FTP(HOST, timeout=0)
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
| TestTimeouts |
python | astropy__astropy | astropy/wcs/wcsapi/tests/test_high_level_api.py | {
"start": 6720,
"end": 8153
} | class ____(HighLevelWCSMixin):
def __init__(self, low_level_wcs):
self._low_level_wcs = low_level_wcs
@property
def low_level_wcs(self):
return self._low_level_wcs
def test_minimal_mixin_subclass():
# Regression test for a bug that caused coordinate conversions to fail
# unless the WCS dimensions were defined on the high level WCS (which they
# are not required to be)
fits_wcs = WCS(naxis=2)
high_level_wcs = MinimalHighLevelWCS(fits_wcs)
coord = high_level_wcs.pixel_to_world(1, 2)
pixel = high_level_wcs.world_to_pixel(*coord)
coord = high_level_wcs.array_index_to_world(1, 2)
pixel = high_level_wcs.world_to_array_index(*coord)
assert_allclose(pixel, (1, 2))
def test_world_to_array_index_nan():
# see https://github.com/astropy/astropy/issues/17227
wcs1 = WCS(naxis=1)
wcs1.wcs.crpix = (1,)
wcs1.wcs.set()
wcs1.pixel_bounds = [None]
res1 = wcs1.world_to_array_index(*wcs1.pixel_to_world((5,)))
assert not np.any(np.isnan(res1))
assert res1.ndim == 0
assert res1.item() == 5
wcs2 = WCS(naxis=2)
wcs2.wcs.crpix = (1, 1)
wcs2.wcs.set()
wcs2.pixel_bounds = [None, (-0.5, 3.5)]
res2 = wcs2.world_to_array_index(*wcs2.pixel_to_world(5, 5))
assert not np.any(np.isnan(res2))
assert isinstance(res2, tuple)
assert len(res2) == 2
assert res2 == (np.iinfo(int).min, 5)
| MinimalHighLevelWCS |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 5120,
"end": 6023
} | class ____(TestCase):
def test_real(self):
y = np.random.rand(
10,
)
assert_array_equal(0, np.imag(y))
y = np.array(1)
out = np.imag(y)
assert_array_equal(0, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.imag(y)
assert_equal(0, out)
# assert_(not isinstance(out, np.ndarray)) # XXX: 0D tensor, not scalar
def test_cmplx(self):
y = np.random.rand(
10,
) + 1j * np.random.rand(
10,
)
assert_array_equal(y.imag, np.imag(y))
y = np.array(1 + 1j)
out = np.imag(y)
assert_array_equal(y.imag, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.imag(y)
assert_equal(1.0, out)
# assert_(not isinstance(out, np.ndarray)) # XXX: 0D tensor, not scalar
| TestImag |
python | pytorch__pytorch | torch/fx/subgraph_rewriter.py | {
"start": 591,
"end": 835
} | class ____(NamedTuple):
# Node from which the match was found
anchor: Node
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: dict[Node, Node]
@compatibility(is_backward_compatible=False)
@dataclass
| Match |
python | PyCQA__pylint | tests/functional/n/name/name_styles.py | {
"start": 1434,
"end": 2603
} | class ____(CorrectClassName):
"""A derived class with an invalid inherited members.
Derived attributes and methods with invalid names do not trigger warnings.
"""
zz = 'Now a good class attribute'
def __init__(self):
super().__init__()
self._Bad_AtTR_name = None # Ignored
def BadMethodName(self):
"""Ignored since the method is in the interface."""
V = [WHAT_Ever_inListComp for WHAT_Ever_inListComp in GOOD_CONST_NAME]
def class_builder():
"""Function returning a class object."""
class EmbeddedClass:
"""Useless class."""
return EmbeddedClass
# +1:[invalid-name]
BAD_NAME_FOR_CLASS = collections.namedtuple('Named', ['tuple'])
NEXT_BAD_NAME_FOR_CLASS = class_builder() # [invalid-name]
GoodName = collections.namedtuple('Named', ['tuple'])
ToplevelClass = class_builder()
# Aliases for classes have the same name constraints.
AlsoCorrect = CorrectClassName
NOT_CORRECT = CorrectClassName # [invalid-name]
def test_globals():
"""Names in global statements are also checked."""
global NOT_CORRECT
global AlsoCorrect
NOT_CORRECT = 1
AlsoCorrect = 2
| DerivedFromCorrect |
python | pytorch__pytorch | torchgen/model.py | {
"start": 79251,
"end": 80203
} | class ____(Type):
class_name: str
def __str__(self) -> str:
"""
Return the class name will prefix __torch__.torch.classes
"""
return f"__torch__.torch.classes.{self.class_name}"
def is_base_ty_like(self, base_ty: BaseTy) -> bool:
return False
def is_symint_like(self) -> bool:
return False
def is_nullable(self) -> bool:
"""
Assume a custom class is not nullable.
"""
return False
def is_list_like(self) -> ListType | None:
return None
# List types specify that we may have multiples of an element. We
# also support explicit sizes on list types, but these have
# some nontrivial semantics! (However, for C++ API purposes, explicit
# sizes are mostly erased from the type system.)
#
# DANGER WILL ROBINSON: C++ elaboration depends on elem type; e.g.,
# int[] elaborates differently than bool[3]!
@dataclass(frozen=True)
| CustomClassType |
python | getsentry__sentry | src/sentry/search/snuba/executors.py | {
"start": 27661,
"end": 46607
} | class ____(AbstractQueryExecutor):
ISSUE_FIELD_NAME = "group_id"
logger = logging.getLogger("sentry.search.postgressnuba")
dependency_aggregations = {"trends": ["last_seen", "times_seen"]}
postgres_only_fields = {*SKIP_SNUBA_FIELDS, "regressed_in_release"}
# add specific fields here on top of skip_snuba_fields from the serializer
sort_strategies = {
"date": "last_seen",
"freq": "times_seen",
"new": "first_seen",
"trends": "trends",
"user": "user_count",
# We don't need a corresponding snuba field here, since this sort only happens
# in Postgres
"inbox": "",
}
aggregation_defs = {
"times_seen": ["count()", ""],
"first_seen": ["multiply(toUInt64(min(coalesce(group_first_seen, timestamp))), 1000)", ""],
"last_seen": ["multiply(toUInt64(max(timestamp)), 1000)", ""],
"trends": trends_aggregation,
# Only makes sense with WITH TOTALS, returns 1 for an individual group.
"total": ["uniq", ISSUE_FIELD_NAME],
"user_count": ["uniq", "tags[sentry:user]"],
"trends_issue_platform": trends_issue_platform_aggregation,
}
@property
def dataset(self) -> Dataset:
return Dataset.Events
def query(
self,
projects: Sequence[Project],
retention_window_start: datetime | None,
group_queryset: BaseQuerySet,
environments: Sequence[Environment] | None,
sort_by: str,
limit: int,
cursor: Cursor | None,
count_hits: bool,
paginator_options: Mapping[str, Any] | None,
search_filters: Sequence[SearchFilter] | None,
date_from: datetime | None,
date_to: datetime | None,
max_hits: int | None = None,
actor: Any | None = None,
aggregate_kwargs: TrendsSortWeights | None = None,
*,
referrer: str,
) -> CursorResult[Group]:
now = timezone.now()
end = None
paginator_options = {} if paginator_options is None else paginator_options
end_params = [
_f
for _f in [
date_to,
get_search_filter(search_filters, "date", "<"),
get_search_filter(search_filters, "timestamp", "<"),
]
if _f
]
if end_params:
end = min(end_params)
if not end:
end = now + ALLOWED_FUTURE_DELTA
allow_postgres_only_search = True
else:
allow_postgres_only_search = features.has(
"organizations:issue-search-allow-postgres-only-search", projects[0].organization
)
# TODO: Presumably we only want to search back to the project's max
# retention date, which may be closer than 90 days in the past, but
# apparently `retention_window_start` can be None(?), so we need a
# fallback.
retention_date = max(_f for _f in [retention_window_start, now - timedelta(days=90)] if _f)
start_params = [
date_from,
retention_date,
get_search_filter(search_filters, "date", ">"),
get_search_filter(search_filters, "timestamp", ">"),
]
start = max(_f for _f in start_params if _f)
end = max([retention_date, end])
if start == retention_date and end == retention_date:
# Both `start` and `end` must have been trimmed to `retention_date`,
# so this entire search was against a time range that is outside of
# retention. We'll return empty results to maintain backwards compatibility
# with Django search (for now).
return self.empty_result
if start >= end:
# TODO: This maintains backwards compatibility with Django search, but
# in the future we should find a way to notify the user that their search
# is invalid.
return self.empty_result
# If the requested sort is `date` (`last_seen`) and there
# are no other Snuba-based search predicates, we can simply
# return the results from Postgres.
if (
# XXX: Don't enable this for now, it doesn't properly respect issue platform rules for hiding issue types.
# We'll need to consolidate where we apply the type filters if we do want this.
allow_postgres_only_search
and cursor is None
and sort_by == "date"
and
# This handles tags and date parameters for search filters.
not [
sf
for sf in (search_filters or ())
if sf.key.name not in self.postgres_only_fields.union(["date", "timestamp"])
]
):
group_queryset = (
group_queryset.using_replica()
.filter(last_seen__gte=start, last_seen__lte=end)
.order_by("-last_seen")
)
for sf in search_filters or ():
# general search query:
if "message" == sf.key.name and isinstance(sf.value.raw_value, str):
group_queryset = group_queryset.filter(
Q(type=ErrorGroupType.type_id)
| (
Q(type__in=get_group_types_by_category(GroupCategory.PERFORMANCE.value))
and (
~Q(message__icontains=sf.value.raw_value)
if sf.is_negation
else Q(message__icontains=sf.value.raw_value)
)
)
)
paginator = DateTimePaginator(group_queryset, "-last_seen", **paginator_options)
# When it's a simple django-only search, we count_hits like normal
results = paginator.get_result(limit, cursor, count_hits=count_hits, max_hits=max_hits)
metrics.timing(
"snuba.search.query",
(timezone.now() - now).total_seconds(),
tags={"postgres_only": True},
)
return results
# Here we check if all the django filters reduce the set of groups down
# to something that we can send down to Snuba in a `group_id IN (...)`
# clause.
max_candidates = options.get("snuba.search.max-pre-snuba-candidates")
with sentry_sdk.start_span(op="snuba_group_query") as span:
group_ids = list(
group_queryset.using_replica().values_list("id", flat=True)[: max_candidates + 1]
)
span.set_data("Max Candidates", max_candidates)
span.set_data("Result Size", len(group_ids))
metrics.distribution("snuba.search.num_candidates", len(group_ids))
too_many_candidates = False
if not group_ids:
# no matches could possibly be found from this point on
metrics.incr("snuba.search.no_candidates", skip_internal=False)
return self.empty_result
elif len(group_ids) > max_candidates:
# If the pre-filter query didn't include anything to significantly
# filter down the number of results (from 'first_release', 'status',
# 'bookmarked_by', 'assigned_to', 'unassigned', or 'subscribed_by')
# then it might have surpassed the `max_candidates`. In this case,
# we *don't* want to pass candidates down to Snuba, and instead we
# want Snuba to do all the filtering/sorting it can and *then* apply
# this queryset to the results from Snuba, which we call
# post-filtering.
metrics.incr("snuba.search.too_many_candidates", skip_internal=False)
too_many_candidates = True
group_ids = []
sort_field = self.sort_strategies[sort_by]
chunk_growth = options.get("snuba.search.chunk-growth-rate")
max_chunk_size = options.get("snuba.search.max-chunk-size")
chunk_limit = limit
offset = 0
num_chunks = 0
hits = self.calculate_hits(
group_ids,
too_many_candidates,
sort_field,
projects,
retention_window_start,
group_queryset,
environments,
sort_by,
limit,
cursor,
count_hits,
paginator_options,
search_filters,
start,
end,
actor,
referrer=referrer,
)
if count_hits and hits == 0:
return self.empty_result
paginator_results = self.empty_result
result_groups = []
result_group_ids = set()
max_time = options.get("snuba.search.max-total-chunk-time-seconds")
time_start = time.time()
more_results = False
# Do smaller searches in chunks until we have enough results
# to answer the query (or hit the end of possible results). We do
# this because a common case for search is to return 100 groups
# sorted by `last_seen`, and we want to avoid returning all of
# a project's groups and then post-sorting them all in Postgres
# when typically the first N results will do.
while (time.time() - time_start) < max_time:
num_chunks += 1
# grow the chunk size on each iteration to account for huge projects
# and weird queries, up to a max size
chunk_limit = min(int(chunk_limit * chunk_growth), max_chunk_size)
# but if we have group_ids always query for at least that many items
chunk_limit = max(chunk_limit, len(group_ids))
# {group_id: group_score, ...}
snuba_groups, total = self.snuba_search(
start=start,
end=end,
project_ids=[p.id for p in projects],
environment_ids=environments and [environment.id for environment in environments],
organization=projects[0].organization,
sort_field=sort_field,
cursor=cursor,
group_ids=group_ids,
limit=chunk_limit,
offset=offset,
search_filters=search_filters,
referrer=referrer,
actor=actor,
aggregate_kwargs=aggregate_kwargs,
)
metrics.distribution("snuba.search.num_snuba_results", len(snuba_groups))
count = len(snuba_groups)
more_results = count >= limit and (offset + limit) < total
offset += len(snuba_groups)
if not snuba_groups:
break
if group_ids:
# pre-filtered candidates were passed down to Snuba, so we're
# finished with filtering and these are the only results. Note
# that because we set the chunk size to at least the size of
# the group_ids, we know we got all of them (ie there are
# no more chunks after the first)
result_groups = snuba_groups
if count_hits and hits is None:
hits = len(snuba_groups)
else:
# pre-filtered candidates were *not* passed down to Snuba,
# so we need to do post-filtering to verify Sentry DB predicates
filtered_group_ids = group_queryset.filter(
id__in=[gid for gid, _ in snuba_groups]
).values_list("id", flat=True)
group_to_score = dict(snuba_groups)
for group_id in filtered_group_ids:
if group_id in result_group_ids:
# because we're doing multiple Snuba queries, which
# happen outside of a transaction, there is a small possibility
# of groups moving around in the sort scoring underneath us,
# so we at least want to protect against duplicates
continue
group_score = group_to_score[group_id]
result_group_ids.add(group_id)
result_groups.append((group_id, group_score))
# break the query loop for one of three reasons:
# * we started with Postgres candidates and so only do one Snuba query max
# * the paginator is returning enough results to satisfy the query (>= the limit)
# * there are no more groups in Snuba to post-filter
# TODO: do we actually have to rebuild this SequencePaginator every time
# or can we just make it after we've broken out of the loop?
paginator_results = SequencePaginator(
[(score, id) for (id, score) in result_groups], reverse=True, **paginator_options
).get_result(limit, cursor, known_hits=hits, max_hits=max_hits)
if group_ids or len(paginator_results.results) >= limit or not more_results:
break
# HACK: We're using the SequencePaginator to mask the complexities of going
# back and forth between two databases. This causes a problem with pagination
# because we're 'lying' to the SequencePaginator (it thinks it has the entire
# result set in memory when it does not). For this reason we need to make some
# best guesses as to whether the `prev` and `next` cursors have more results.
if len(paginator_results.results) == limit and more_results:
# Because we are going back and forth between DBs there is a small
# chance that we will hand the SequencePaginator exactly `limit`
# items. In this case the paginator will assume there are no more
# results, so we need to override the `next` cursor's results.
paginator_results.next.has_results = True
if cursor is not None and (not cursor.is_prev or len(paginator_results.results) > 0):
# If the user passed a cursor, and it isn't already a 0 result `is_prev`
# cursor, then it's worth allowing them to go back a page to check for
# more results.
paginator_results.prev.has_results = True
metrics.distribution("snuba.search.num_chunks", num_chunks)
groups = Group.objects.in_bulk(paginator_results.results)
paginator_results.results = [groups[k] for k in paginator_results.results if k in groups]
metrics.timing(
"snuba.search.query",
(timezone.now() - now).total_seconds(),
tags={"postgres_only": False},
)
return paginator_results
def calculate_hits(
self,
group_ids: Sequence[int],
too_many_candidates: bool,
sort_field: str,
projects: Sequence[Project],
retention_window_start: datetime | None,
group_queryset: Query,
environments: Sequence[Environment] | None,
sort_by: str,
limit: int,
cursor: Cursor | None,
count_hits: bool,
paginator_options: Mapping[str, Any],
search_filters: Sequence[SearchFilter] | None,
start: datetime,
end: datetime,
actor: Any | None = None,
*,
referrer: str,
) -> int | None:
"""
This method should return an integer representing the number of hits (results) of your search.
It will return 0 if hits were calculated and there are none.
It will return None if hits were not calculated.
"""
if count_hits is False:
return None
elif too_many_candidates or cursor is not None:
# If we had too many candidates to reasonably pass down to snuba,
# or if we have a cursor that bisects the overall result set (such
# that our query only sees results on one side of the cursor) then
# we need an alternative way to figure out the total hits that this
# query has.
# To do this, we get a sample of groups matching the snuba side of
# the query, and see how many of those pass the post-filter in
# postgres. This should give us an estimate of the total number of
# snuba matches that will be overall matches, which we can use to
# get an estimate for X-Hits.
# The sampling is not simple random sampling. It will return *all*
# matching groups if there are less than N groups matching the
# query, or it will return a random, deterministic subset of N of
# the groups if there are more than N overall matches. This means
# that the "estimate" is actually an accurate result when there are
# less than N matching groups.
# The number of samples required to achieve a certain error bound
# with a certain confidence interval can be calculated from a
# rearrangement of the normal approximation (Wald) confidence
# interval formula:
#
# https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
#
# Effectively if we want the estimate to be within +/- 10% of the
# real value with 95% confidence, we would need (1.96^2 * p*(1-p))
# / 0.1^2 samples. With a starting assumption of p=0.5 (this
# requires the most samples) we would need 96 samples to achieve
# +/-10% @ 95% confidence.
sample_size = options.get("snuba.search.hits-sample-size")
kwargs = {}
if not too_many_candidates:
kwargs["group_ids"] = group_ids
snuba_groups, snuba_total = self.snuba_search(
start=start,
end=end,
project_ids=[p.id for p in projects],
environment_ids=environments and [environment.id for environment in environments],
organization=projects[0].organization,
sort_field=sort_field,
limit=sample_size,
offset=0,
get_sample=True,
search_filters=search_filters,
actor=actor,
referrer=referrer,
**kwargs,
)
snuba_count = len(snuba_groups)
if snuba_count == 0:
# Maybe check for 0 hits and return EMPTY_RESULT in ::query? self.empty_result
return 0
else:
filtered_count = group_queryset.filter(
id__in=[gid for gid, _ in snuba_groups]
).count()
hit_ratio = filtered_count / float(snuba_count)
hits = int(hit_ratio * snuba_total)
return hits
return None
| PostgresSnubaQueryExecutor |
python | apache__airflow | airflow-core/src/airflow/plugins_manager.py | {
"start": 4560,
"end": 4644
} | class ____(Exception):
"""Exception when loading plugin."""
| AirflowPluginException |
python | fluentpython__example-code-2e | 24-class-metaprog/checked/decorator/checkeddeco.py | {
"start": 2027,
"end": 4623
} | class ____:
def __init__(self, name: str, constructor: Callable) -> None: # <2>
if not callable(constructor) or constructor is type(None):
raise TypeError(f'{name!r} type hint must be callable')
self.name = name
self.constructor = constructor
def __set__(self, instance: Any, value: Any) -> None: # <3>
if value is ...: # <4>
value = self.constructor()
else:
try:
value = self.constructor(value) # <5>
except (TypeError, ValueError) as e:
type_name = self.constructor.__name__
msg = (
f'{value!r} is not compatible with {self.name}:{type_name}'
)
raise TypeError(msg) from e
instance.__dict__[self.name] = value # <6>
# tag::CHECKED_DECORATOR[]
def checked(cls: type) -> type: # <1>
for name, constructor in _fields(cls).items(): # <2>
setattr(cls, name, Field(name, constructor)) # <3>
cls._fields = classmethod(_fields) # type: ignore # <4>
instance_methods = ( # <5>
__init__,
__repr__,
__setattr__,
_asdict,
__flag_unknown_attrs,
)
for method in instance_methods: # <6>
setattr(cls, method.__name__, method)
return cls # <7>
# end::CHECKED_DECORATOR[]
# tag::CHECKED_METHODS[]
def _fields(cls: type) -> dict[str, type]:
return get_type_hints(cls)
def __init__(self: Any, **kwargs: Any) -> None:
for name in self._fields():
value = kwargs.pop(name, ...)
setattr(self, name, value)
if kwargs:
self.__flag_unknown_attrs(*kwargs)
def __setattr__(self: Any, name: str, value: Any) -> None:
if name in self._fields():
cls = self.__class__
descriptor = getattr(cls, name)
descriptor.__set__(self, value)
else:
self.__flag_unknown_attrs(name)
def __flag_unknown_attrs(self: Any, *names: str) -> NoReturn:
plural = 's' if len(names) > 1 else ''
extra = ', '.join(f'{name!r}' for name in names)
cls_name = repr(self.__class__.__name__)
raise AttributeError(f'{cls_name} has no attribute{plural} {extra}')
def _asdict(self: Any) -> dict[str, Any]:
return {
name: getattr(self, name)
for name, attr in self.__class__.__dict__.items()
if isinstance(attr, Field)
}
def __repr__(self: Any) -> str:
kwargs = ', '.join(
f'{key}={value!r}' for key, value in self._asdict().items()
)
return f'{self.__class__.__name__}({kwargs})'
# end::CHECKED_METHODS[]
| Field |
python | pytorch__pytorch | torch/distributed/tensor/_random.py | {
"start": 6672,
"end": 19302
} | class ____(_RNGStateTracker):
"""
This subclass of ``_RNGStateTracker`` defines the default policy of how RNG states
should be shared and synchronized among all ranks to respect the semantics of DTensor
random operators.
note: _RNGStateTracker only supports cuda/cuda-like device.
"""
def __init__(
self,
device_mesh: DeviceMesh,
run_state_sync: bool = True,
):
super().__init__(_resolve_device(device_mesh=device_mesh))
assert self._device_handle is not None
# DTensor RNG tracker so far only supports CUDA/CUDA-like devices
if self._device.type == "cpu":
raise RuntimeError(
f"{self.__class__.__name__} instantiation requires the presence of "
f"CUDA/CUDA-like/XPU device. Got {self._device.type} instead."
)
rng_state = self._get_device_state()
if run_state_sync:
# synchronize RNG state using rank 0's current one
torch.distributed.broadcast(rng_state, 0)
my_rng_state = self._get_device_state()
if not all(my_rng_state == rng_state):
logger.warning(
"DTensor is synchronizing RNG states of every rank with the state from rank 0. "
"This behavior is deprecated. "
"Please call `torch.manual_seed()` on every rank that participates in SPMD DTensor Operations with "
"the same seed. If using Pipeline Parallelism, each pipeling state would use a different seed, "
"but all ranks belonging to one pipeline stage would use the same seed."
)
self._set_device_state(rng_state)
def _get_device_state(self) -> torch.Tensor:
if self._device.type == "hpu":
self._device_handle.set_rng_ctx("philox")
rng_state = self._device_handle.get_rng_state().to(self._device)
if self._device.type == "hpu":
self._device_handle.unset_rng_ctx("philox")
return rng_state
def _set_device_state(self, state: torch.Tensor):
# It seems that the underlying generator wants a cpu tensor but the dtensor code expects `_get_device_state`
# to convert to a 'device' tensor, probably because we may use it with our backend comms for sync/debug
# for now, we just convert back to cpu here to make sure it always works.
if self._device.type == "hpu":
self._device_handle.set_rng_ctx("philox")
self._device_handle.set_rng_state(state.to("cpu"))
if self._device.type == "hpu":
self._device_handle.unset_rng_ctx("philox")
@contextlib.contextmanager
def _distribute_region(
self, spec: DTensorSpec, generator: torch.Generator | None = None
):
from torch.distributed._local_tensor import maybe_enable_local_tracker
if local_tracker_context := maybe_enable_local_tracker(
self._device.type, self.distribute_region_enabled, spec, generator
):
with local_tracker_context:
yield
return
# regular (non-LocalTensor) mode
if generator is not None:
# This is a little hacky, but for any user-passed generator, we store its state under a unique key,
# not because we need to keep a copy of it but because its the easiest way to make it work with the
# existing set/get APIs. We also ensure we remove it from rng_states after each _distribute_region.
state = _PhiloxState(generator.get_state())
else:
state = _PhiloxState(self._get_device_state())
if self.distribute_region_enabled:
if self._device.type == "hpu":
self._device_handle.set_rng_ctx("philox")
old_offset = state.offset
self._set_pre_op_offset(state, spec)
with torch.random.fork_rng(
devices=[self._device], device_type=self._device.type
):
assert self._device_handle is not None
self._device_handle.set_rng_state(state.state)
try:
yield # execute the region code
finally:
# update offset to synchronize among ranks
self._set_post_op_offset(state, spec, old_offset)
if self._device.type == "hpu":
self._device_handle.unset_rng_ctx("philox")
else:
yield
if generator is not None:
# ensure we (a) propagate the state advancement back to the user's RNG so its visible and impacts any future
# usage of that RNG (dtensor or non-dtensor), (b) drop it from our own cache so that if the user updates
# the seed value in their rng and uses it with DTensor again, we always use the latest value
generator.set_state(state.state)
else:
self._set_device_state(state.state)
def _set_pre_op_offset(self, state: _PhiloxState, spec: DTensorSpec) -> None:
"""Set the starting RNG offset for current device's local shard before actual
op execution. The pre_op_offset value should start from the current RNG offset
and increment by the size of local shard until it reaches the size of the whole
DTensor. For different ranks that hold the same DTensor shard, their pre_op_offset
will be the same.
Args:
state (:class:`Tensor`): The generator state to modify
spec (:class:`DTensorSpec`): the spec of the DTensor object on which
we prepare the offset for running random ops.
Returns:
None
.. warning::
Note that, current implementation does not consider DTensor's continguity.
Example:
take a DTensor of shape [8, 16] as an example. Assume that the DTensor
is placed on a device mesh with placements ([Shard(1), Replicate(), Shard(0)]),
and the mesh is:
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
``spec.mesh.get_coordinate()`` provides the coordinate of the current rank
in the mesh. For example, the coordinate of rank 5 is (1, 0, 1).
Another concept to introduce besides rank coordinate is shard coordinate.
Each rank holds a local shard of the DTensor. In the example, the DTensor
is partitioned into 4 [4, 8] shards. The first shard has 2 replicas and
rank 0 (coord (0, 0, 0)) and rank 2 (coord (0, 1, 0)) have 1 replica each.
That being said, the local shard on rank 0 and rank 2 correspond to the same
shard of the DTensor. To denote each DTensor shard, we use a shard coordinate
(in the example, it will be a tuple (i, j) where shard (i, j) has the slice
DTensor[4 * i : 4 * (i + 1), 8 * j : 8 * (j + 1)], 0 <= i < 2, 0 <= j < 2).
Once we have rank coordinate and shard coordinate, we can calculate on each rank
what shard of the DTensor the rank holds, with the help of dim_map. The dim_map
of the above DTensor is [2, 0] so the shard coordinate of a rank with rank coord
(x, y, z) is simply (z, x) by taking(rank_coord[dim_map[0]],rank_coord[dim_map[1]]).
Following this calculation,
rank 0 and rank 2 holds the shard of coord (0, 0);
rank 1 and rank 3 holds the shard of coord (0, 1);
rank 4 and rank 6 holds the shard of coord (1, 0);
rank 5 and rank 7 holds the shard of coord (1, 1);
The last value to calculate before obtaining the starting offset is the shard linear index.
The starting offset for each rank will be its shard_linear_index * local_tensor_numel.
"""
dtensor_shape = spec.shape
mesh = spec.mesh
# note: dim_map does not allow double sharding which is the FSDP(fully_shard)+TP
# case. Replace the custom logic with dim_map once we support it.
dim_map: list[int | list[int]] = [-1] * spec.ndim
for i, placement in enumerate(spec.placements):
if isinstance(placement, Shard):
shard_dim = placement.dim
if dim_map[shard_dim] == -1:
dim_map[shard_dim] = [i]
else:
mesh_dim_list = dim_map[shard_dim]
assert isinstance(mesh_dim_list, list)
mesh_dim_list.append(i)
# Compute shard coordinate:
# The coordinate on each tensor dim is a tuple (idx, range)
# If a DTensor is partitioned on its dim i into n shards, and the current rank
# holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i
mesh_coordinate = mesh.get_coordinate()
assert mesh_coordinate is not None
mesh_size = mesh.shape
shard_idx_by_dim = []
total_num_shards_by_dim = [] # total number of shards on each tensor dim
for mesh_dim in dim_map:
shard_idx = 0
total_num_shards = 1
# the tensor dim is sharded on more than 1 mesh dim
if isinstance(mesh_dim, list):
rank_coord = [mesh_coordinate[d] for d in mesh_dim]
num_shards = [mesh_size[d] for d in mesh_dim]
# compute the shard idx and total number of shards
for idx, size in zip(rank_coord, num_shards):
shard_idx = shard_idx * size + idx
total_num_shards *= size
shard_idx_by_dim.append(shard_idx)
total_num_shards_by_dim.append(total_num_shards)
# compute shard linear index
shard_linear_idx = self._calc_shard_linear_idx(
shard_idx_by_dim, total_num_shards_by_dim
)
# compute starting offset using the first shard's size
local_size_on_rank_0 = list(dtensor_shape)
for idx, placement in enumerate(spec.placements):
if isinstance(placement, Shard):
mesh_dim_size = mesh.size(idx)
shard_dim = placement.dim
local_size_on_rank_0[shard_dim], _ = (
placement._local_shard_size_and_offset(
dtensor_shape[shard_dim],
mesh_dim_size,
0,
)
)
from torch.distributed.tensor._ops.utils import prod
local_size = prod(local_size_on_rank_0)
# get current RNG offset
current_offset = state.offset
# pytorch: offset must be multiple of 4
# source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4
state.offset = current_offset + offset_incr
def _set_post_op_offset(
self, state: _PhiloxState, spec: DTensorSpec, old_offset: int
) -> None:
"""Sets the RNG to a synchronized state after running the local random op. Every
rank should set its RNG offset to `old_offset + DTensor.numel()` where old_offset is
the offset before calling `set_pre_op_offset` i.e. the offset before running DTensor
random ops.
Args:
state (:class:`Tensor`): The generator state to modify.
spec (:class:`DTensorSpec`): the spec of the DTensor object on which
we post-process the offset for running random ops.
Returns:
None
"""
dtensor_shape = spec.shape
from torch.distributed.tensor._ops.utils import prod
numel = prod(dtensor_shape)
# pytorch: offset must be multiple of 4
# source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
numel = (numel + 3) // 4 * 4
state.offset = old_offset + numel
def _calc_shard_linear_idx(
self, shard_coord: list[int], shard_size: list[int]
) -> int:
# compute shard linear index
shard_linear_idx = 0
shard_coord_stride = 1
for idx, size in zip(reversed(shard_coord), reversed(shard_size)):
shard_linear_idx += idx * shard_coord_stride
shard_coord_stride *= size
return shard_linear_idx
def _resolve_device(device_mesh: DeviceMesh) -> torch.device:
device_type = device_mesh.device_type
device_handle = _get_device_handle(device_type)
assert device_handle is not None
device_idx = device_mesh.get_rank() % device_handle.device_count()
return torch.device(f"{device_type}:{device_idx:d}")
| OffsetBasedRNGTracker |
python | gevent__gevent | src/greentest/3.10/signalinterproctester.py | {
"start": 154,
"end": 2803
} | class ____(unittest.TestCase):
def setUp(self):
self.got_signals = {'SIGHUP': 0, 'SIGUSR1': 0, 'SIGALRM': 0}
def sighup_handler(self, signum, frame):
self.got_signals['SIGHUP'] += 1
def sigusr1_handler(self, signum, frame):
self.got_signals['SIGUSR1'] += 1
raise SIGUSR1Exception
def wait_signal(self, child, signame):
if child is not None:
# This wait should be interrupted by exc_class
# (if set)
child.wait()
timeout = support.SHORT_TIMEOUT
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
if self.got_signals[signame]:
return
signal.pause()
self.fail('signal %s not received after %s seconds'
% (signame, timeout))
def subprocess_send_signal(self, pid, signame):
code = 'import os, signal; os.kill(%s, signal.%s)' % (pid, signame)
args = [sys.executable, '-I', '-c', code]
return subprocess.Popen(args)
def test_interprocess_signal(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGUSR1, self.sigusr1_handler)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Let the sub-processes know who to send signals to.
pid = str(os.getpid())
with self.subprocess_send_signal(pid, "SIGHUP") as child:
self.wait_signal(child, 'SIGHUP')
self.assertEqual(self.got_signals, {'SIGHUP': 1, 'SIGUSR1': 0,
'SIGALRM': 0})
with self.assertRaises(SIGUSR1Exception):
with self.subprocess_send_signal(pid, "SIGUSR1") as child:
self.wait_signal(child, 'SIGUSR1')
self.assertEqual(self.got_signals, {'SIGHUP': 1, 'SIGUSR1': 1,
'SIGALRM': 0})
with self.subprocess_send_signal(pid, "SIGUSR2") as child:
# Nothing should happen: SIGUSR2 is ignored
child.wait()
try:
with self.assertRaises(KeyboardInterrupt):
signal.alarm(1)
self.wait_signal(None, 'SIGALRM')
self.assertEqual(self.got_signals, {'SIGHUP': 1, 'SIGUSR1': 1,
'SIGALRM': 0})
finally:
signal.alarm(0)
if __name__ == "__main__":
unittest.main()
| InterProcessSignalTests |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/sum_test.py | {
"start": 541,
"end": 1297
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, R, V, dim, contiguous, device):
shape = (R, V) if dim == 0 else (V, R)
tensor = torch.rand(shape, device=device)
if not contiguous:
storage = torch.empty([s * 2 for s in shape], device=device)
storage[::2, ::2] = tensor
self.input_tensor = storage[::2, ::2]
else:
self.input_tensor = tensor
self.inputs = {"input_tensor": self.input_tensor, "dim": dim}
self.set_module_name("sum")
def forward(self, input_tensor, dim: int):
return input_tensor.sum(dim=dim)
op_bench.generate_pt_test(sum_configs, SumBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| SumBenchmark |
python | gevent__gevent | src/gevent/events.py | {
"start": 15569,
"end": 16279
} | class ____(IGeventDidPatchEvent):
"""
Event emitted *after* the builtin modules have been patched.
If you're going to monkey-patch a third-party library, this is
usually the event to listen for.
The values of the *source* and *target* attributes are undefined.
"""
patch_all_arguments = Attribute(
"A dictionary of all the arguments to `gevent.monkey.patch_all`. "
"This dictionary should not be modified. "
)
patch_all_kwargs = Attribute(
"A dictionary of the extra arguments to `gevent.monkey.patch_all`. "
"This dictionary should not be modified. "
)
@implementer(IGeventDidPatchBuiltinModulesEvent)
| IGeventDidPatchBuiltinModulesEvent |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 22291,
"end": 27768
} | class ____(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,30
def __init__(self,
size, # Final size, duplicated if one value is specified
min_scale=0.08, # Minimum scale of the crop, in relation to image area
ratio=(3/4, 4/3), # Range of width over height of the output
mode='bilinear', # PyTorch `F.grid_sample` interpolation
valid_scale=1., # Scale of the crop for the validation set, in relation to image area
max_scale=1., # Maximum scale of the crop, in relation to image area
mode_mask='nearest', # Interpolation mode for `TensorMask`
**kwargs
):
if isinstance(size, int): size = (size,size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.do = True
h,w = fastuple((b[0] if isinstance(b, tuple) else b).shape[-2:])
for attempt in range(10):
if split_idx: break
area = random.uniform(self.min_scale,self.max_scale) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nh,nw)
self.tl = random.randint(0,h - nh),random.randint(0,w-nw)
return
if w/h < self.ratio[0]: self.cp_size = (int(w/self.ratio[0]), w)
elif w/h > self.ratio[1]: self.cp_size = (h, int(h*self.ratio[1]))
else: self.cp_size = (h, w)
if split_idx: self.cp_size = (int(self.cp_size[0]*self.valid_scale), int(self.cp_size[1]*self.valid_scale))
self.tl = ((h-self.cp_size[0])//2,(w-self.cp_size[1])//2)
def _encode(self, x, mode):
x = x[...,self.tl[0]:self.tl[0]+self.cp_size[0], self.tl[1]:self.tl[1]+self.cp_size[1]]
return x.affine_coord(sz=self.size, mode=mode)
def encodes(self, x:TensorImage|TensorPoint|TensorBBox): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
# %% ../../nbs/09_vision.augment.ipynb 110
def mask_tensor(
x:Tensor, # Input `Tensor`
p=0.5, # Probability of not applying mask
neutral=0., # Mask value
batch=False # Apply identical mask to entire batch
):
"Mask elements of `x` with `neutral` with probability `1-p`"
if p==1.: return x
if batch: return x if random.random() < p else x.new_zeros(*x.size()) + neutral
if neutral != 0: x.add_(-neutral)
# Extra casting to float and long to prevent crashes on mps accelerator (issue #3911)
mask = x.new_empty(*x.size()).float().bernoulli_(p).long()
x.mul_(mask)
return x.add_(neutral) if neutral != 0 else x
# %% ../../nbs/09_vision.augment.ipynb 117
def _draw_mask(x, def_draw, draw=None, p=0.5, neutral=0., batch=False):
"Creates mask_tensor based on `x` with `neutral` with probability `1-p`. "
if draw is None: draw=def_draw
if callable(draw): res=draw(x)
elif is_listy(draw):
assert len(draw)>=x.size(0)
res = tensor(draw[:x.size(0)], dtype=x.dtype, device=x.device)
else: res = x.new_zeros(x.size(0)) + draw
return TensorBase(mask_tensor(res, p=p, neutral=neutral, batch=batch))
# %% ../../nbs/09_vision.augment.ipynb 126
def affine_mat(*ms):
"Restructure length-6 vector `ms` into an affine matrix with 0,0,1 in the last line"
return stack([stack([ms[0], ms[1], ms[2]], dim=1),
stack([ms[3], ms[4], ms[5]], dim=1),
stack([t0(ms[0]), t0(ms[0]), t1(ms[0])], dim=1)], dim=1)
# %% ../../nbs/09_vision.augment.ipynb 132
def flip_mat(
x:Tensor, # The input Tensor
p=0.5, # Probability of appying transformation
draw:int|MutableSequence|Callable=None, # Custom flips instead of random
batch:bool=False # Apply identical flip to entire batch
):
"Return a random flip matrix"
def _def_draw(x): return x.new_ones(x.size(0))
mask = x.new_ones(x.size(0)) - 2*_draw_mask(x, _def_draw, draw=draw, p=p, batch=batch)
return affine_mat(mask, t0(mask), t0(mask),
t0(mask), t1(mask), t0(mask))
# %% ../../nbs/09_vision.augment.ipynb 136
def _get_default(x, mode=None, pad_mode=None):
if mode is None: mode='bilinear' if isinstance(x, TensorMask) else 'bilinear'
if pad_mode is None: pad_mode=PadMode.Zeros if isinstance(x, (TensorPoint, TensorBBox)) else PadMode.Reflection
x0 = x[0] if isinstance(x, tuple) else x
return x0,mode,pad_mode
# %% ../../nbs/09_vision.augment.ipynb 139
@patch
def flip_batch(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
p=0.5, # Probability of applying flip
draw:int|MutableSequence|Callable=None, # Custom flips instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode=None, # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=None, # Padding applied to `x`
align_corners=True, # PyTorch `F.grid_sample` align_corners
batch=False # Apply identical flip to entire batch
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat=flip_mat(x0, p=p, draw=draw, batch=batch)
return x.affine_coord(mat=mat[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 141
| RandomResizedCropGPU |
python | tensorflow__tensorflow | tensorflow/python/types/core.py | {
"start": 2581,
"end": 3586
} | class ____(inspect.Signature, metaclass=abc.ABCMeta):
"""Represents the type of a TensorFlow callable.
FunctionType inherits from inspect.Signature which canonically represents the
structure (and optionally type) information of input parameters and output of
a Python function. Additionally, it integrates with the tf.function type
system (`tf.types.experimental.TraceType`) to provide a holistic
representation of the the I/O contract of the callable. It is used for:
- Canonicalization and type-checking of Python input arguments
- Type-based dispatch to concrete functions
- Packing/unpacking structured python values to Tensors
- Generation of structured placeholder values for tracing
"""
# The signature of this method changes in Py3.10 so we override to enforce it.
@classmethod
def from_callable(cls, obj, *, follow_wrapped=True):
return super().from_callable(obj, follow_wrapped=follow_wrapped)
@tf_export("types.experimental.Callable", v1=[])
| FunctionType |
python | huggingface__transformers | tests/models/granitemoeshared/test_modeling_granitemoeshared.py | {
"start": 6941,
"end": 10969
} | class ____(unittest.TestCase):
@slow
@require_read_token
def test_model_3b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = GraniteMoeSharedForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto")
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
# fmt: off
# Expected mean on dim = -1
EXPECTED_MEANS = Expectations(
{
("xpu", 3): torch.tensor([[-4.4005, -3.6689, -3.6187, -2.8308, -3.9871, -3.1001, -2.8738, -2.8063]]),
("cuda", 7): torch.tensor([[-2.2122, -1.6632, -2.9269, -2.3344, -2.0143, -3.0146, -2.6839, -2.5610]]),
("cuda", 8): torch.tensor([[-4.4005, -3.6689, -3.6187, -2.8308, -3.9871, -3.1001, -2.8738, -2.8063]]),
}
)
EXPECTED_MEAN = EXPECTED_MEANS.get_expectation()
torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:15]
EXPECTED_SLICES = Expectations(
{
("xpu", 3): torch.tensor([[2.5479, -9.2123, -9.2121, -9.2175, -9.2122, -1.5024, -9.2121, -9.2122, -9.2161, -9.2122, -6.3100, -3.6223, -3.6377, -5.2542, -5.2523]]),
("cuda", 7): torch.tensor([[4.8785, -2.2890, -2.2892, -2.2885, -2.2890, -3.5007, -2.2897, -2.2892, -2.2895, -2.2891, -2.2887, -2.2882, -2.2889, -2.2898, -2.2892]]),
("cuda", 8): torch.tensor([[2.5479, -9.2123, -9.2121, -9.2175, -9.2122, -1.5024, -9.2121, -9.2122, -9.2161, -9.2122, -6.3100, -3.6223, -3.6377, -5.2542, -5.2523]]),
}
)
EXPECTED_SLICE = EXPECTED_SLICES.get_expectation()
# fmt: on
self.assertTrue(
torch.allclose(
EXPECTED_SLICE.to(torch_device),
out.logits[0, 0, :15].float(),
atol=1e-3,
rtol=1e-3,
)
)
@slow
def test_model_3b_generation(self):
# fmt: off
EXPECTED_TEXT_COMPLETIONS = Expectations(
{
("xpu", 3): (
"Simply put, the theory of relativity states that 1) the speed of light is constant, and 2) the speed of light is the same for all observers.\n\n"
"The first part is easy to understand. The second part is a little more difficult.\n\n"
"The second part of the theory of relativity is a little more difficult to understand.\n"
),
("cuda", 7): (
"Simply put, the theory of relativity states that \n$$\n\\frac{d^2x^\\mu}{d\\tau^2} = "
"\\frac{1}{c^2}\\frac{d^2x^\\mu}{dt^2}\n$$\nwhere $x^\\mu$ is a four-vector, $\\tau$ is the proper time"
),
("cuda", 8): (
"Simply put, the theory of relativity states that 1) the speed of light is constant, and 2) the speed of light is the same for all observers.\n\n"
"The first part is easy to understand. The second part is a little more difficult.\n\n"
"The second part of the theory of relativity is a little more difficult to understand.\n"
),
}
)
# fmt: on
EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation()
prompt = "Simply put, the theory of relativity states that "
tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")
model = GraniteMoeSharedForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto")
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# greedy generation outputs
generated_ids = model.generate(**model_inputs, max_new_tokens=64, top_p=None, temperature=1, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| GraniteMoeSharedIntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/ext_slice_test.py | {
"start": 1035,
"end": 1492
} | class ____(reference_test_base.TestCase):
def test_basic_ext_slice(self):
self.assertFunctionMatchesEager(basic_ext_slice, tf.eye(3))
def test_basic_expand_dims(self):
self.assertFunctionMatchesEager(basic_expand_dims, tf.eye(3))
def test_slice_of_application(self):
self.assertFunctionMatchesEager(slice_of_application, lambda x: x,
tf.eye(3))
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | rq__rq | tests/test_executions.py | {
"start": 342,
"end": 9484
} | class ____(RQTestCase):
"""Test the execution registry."""
def setUp(self):
super().setUp()
self.queue = Queue(connection=self.connection)
def test_equality(self):
"""Test equality between Execution objects"""
job = self.queue.enqueue(say_hello)
pipeline = self.connection.pipeline()
execution_1 = Execution.create(job=job, ttl=100, pipeline=pipeline)
execution_2 = Execution.create(job=job, ttl=100, pipeline=pipeline)
pipeline.execute()
self.assertNotEqual(execution_1, execution_2)
fetched_execution = Execution.fetch(id=execution_1.id, job_id=job.id, connection=self.connection)
self.assertEqual(execution_1, fetched_execution)
def test_add_delete_executions(self):
"""Test adding and deleting executions"""
job = self.queue.enqueue(say_hello)
pipeline = self.connection.pipeline()
execution = Execution.create(job=job, ttl=100, pipeline=pipeline)
pipeline.execute()
created_at = execution.created_at
composite_key = execution.composite_key
self.assertTrue(execution.composite_key.startswith(job.id)) # Composite key is prefixed by job ID
self.assertLessEqual(self.connection.ttl(execution.key), 100)
execution = Execution.fetch(id=execution.id, job_id=job.id, connection=self.connection)
self.assertEqual(execution.created_at.timestamp(), created_at.timestamp())
self.assertEqual(execution.composite_key, composite_key)
self.assertEqual(execution.last_heartbeat.timestamp(), created_at.timestamp())
execution.delete(job=job, pipeline=pipeline)
pipeline.execute()
self.assertFalse(self.connection.exists(execution.key))
def test_execution_registry(self):
"""Test the ExecutionRegistry class"""
job = self.queue.enqueue(say_hello)
registry = ExecutionRegistry(job_id=job.id, connection=self.connection)
pipeline = self.connection.pipeline()
execution = Execution.create(job=job, ttl=100, pipeline=pipeline)
pipeline.execute()
self.assertEqual(self.connection.zcard(registry.key), 1)
# Registry key TTL should be execution TTL + some buffer time (60 at the moment)
self.assertTrue(158 <= self.connection.ttl(registry.key) <= 160)
execution.delete(pipeline=pipeline, job=job)
pipeline.execute()
self.assertEqual(self.connection.zcard(registry.key), 0)
def test_ttl(self):
"""Execution registry and job execution should follow heartbeat TTL"""
job = self.queue.enqueue(say_hello, timeout=-1)
worker = Worker([self.queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
self.assertGreaterEqual(self.connection.ttl(job.execution_registry.key), worker.get_heartbeat_ttl(job))
self.assertGreaterEqual(self.connection.ttl(execution.key), worker.get_heartbeat_ttl(job))
def test_heartbeat(self):
"""Test heartbeat should refresh execution as well as registry TTL"""
job = self.queue.enqueue(say_hello, timeout=1)
worker = Worker([self.queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
# The actual TTL should be 150 seconds
self.assertTrue(1 < self.connection.ttl(job.execution_registry.key) < 160)
self.assertTrue(1 < self.connection.ttl(execution.key) < 160)
with self.connection.pipeline() as pipeline:
worker.execution.heartbeat(job.started_job_registry, 200, pipeline)
pipeline.execute()
# The actual TTL should be 260 seconds for registry and 200 seconds for execution
self.assertTrue(200 <= self.connection.ttl(job.execution_registry.key) <= 260)
self.assertTrue(200 <= self.connection.ttl(execution.key) < 260)
def test_registry_cleanup(self):
"""ExecutionRegistry.cleanup() should remove expired executions."""
job = self.queue.enqueue(say_hello)
worker = Worker([self.queue], connection=self.connection)
worker.prepare_execution(job=job)
registry = job.execution_registry
registry.cleanup()
self.assertEqual(len(registry), 1)
registry.cleanup(current_timestamp() + 100)
self.assertEqual(len(registry), 1)
# If we pass in a timestamp past execution's TTL, it should be removed.
# Expiration should be about 150 seconds (worker.get_heartbeat_ttl(job) + 60)
registry.cleanup(current_timestamp() + 200)
self.assertEqual(len(registry), 0)
def test_delete_registry(self):
"""ExecutionRegistry.delete() should delete registry and its executions."""
job = self.queue.enqueue(say_hello)
worker = Worker([self.queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
self.assertIn(execution.job_id, job.started_job_registry.get_job_ids())
registry = job.execution_registry
pipeline = self.connection.pipeline()
registry.delete(job=job, pipeline=pipeline)
pipeline.execute()
self.assertNotIn(execution.job_id, job.started_job_registry.get_job_ids())
self.assertFalse(self.connection.exists(registry.key))
def test_get_execution_ids(self):
"""ExecutionRegistry.get_execution_ids() should return a list of execution IDs"""
job = self.queue.enqueue(say_hello)
worker = Worker([self.queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
execution_2 = worker.prepare_execution(job=job)
registry = job.execution_registry
self.assertEqual(set(registry.get_execution_ids()), {execution.id, execution_2.id})
def test_execution_added_to_started_job_registry(self):
"""Ensure worker adds execution to started job registry"""
job = self.queue.enqueue(long_running_job, timeout=3)
Worker([self.queue], connection=self.connection)
# Start worker process in background with 1 second monitoring interval
process = start_worker_process(
self.queue.name, worker_name='w1', connection=self.connection, burst=True, job_monitoring_interval=1
)
sleep(0.5)
# Execution should be registered in started job registry
execution = job.get_executions()[0]
self.assertEqual(len(job.get_executions()), 1)
self.assertIn(execution.job_id, job.started_job_registry.get_job_ids())
last_heartbeat = execution.last_heartbeat
last_heartbeat = now()
self.assertTrue(30 < self.connection.ttl(execution.key) < 200)
sleep(2)
# During execution, heartbeat should be updated, this test is flaky on MacOS
execution.refresh()
self.assertNotEqual(execution.last_heartbeat, last_heartbeat)
process.join(10)
# When job is done, execution should be removed from started job registry
self.assertNotIn(execution.composite_key, job.started_job_registry.get_job_ids())
self.assertEqual(job.get_status(), 'finished')
def test_fetch_execution(self):
"""Ensure Execution.fetch() fetches the correct execution"""
job = self.queue.enqueue(say_hello)
worker = Worker([self.queue], connection=self.connection)
execution = worker.prepare_execution(job=job)
fetched_execution = Execution.fetch(id=execution.id, job_id=job.id, connection=self.connection)
self.assertEqual(execution, fetched_execution)
self.connection.delete(execution.key)
# Execution.fetch raises ValueError if execution is not found
with self.assertRaises(ValueError):
Execution.fetch(id=execution.id, job_id=job.id, connection=self.connection)
def test_init_from_composite_key(self):
"""Ensure the from_composite_key can correctly parse job_id and execution_id"""
composite_key = 'job_id:execution_id'
execution = Execution.from_composite_key(composite_key, connection=self.connection)
self.assertEqual(execution.job_id, 'job_id')
self.assertEqual(execution.id, 'execution_id')
def test_job_auto_fetch(self):
"""Ensure that if the job is not set, the Job.fetch is not called"""
job = self.queue.enqueue(say_hello)
execution = Execution('execution_id', job.id, connection=self.connection)
with patch.object(Job, 'fetch') as mock:
mock.return_value = Job(id=job.id, connection=self.connection)
# the first call would fetch the job
first_fetch = execution.job
self.assertEqual(first_fetch.id, job.id)
self.assertEqual(mock.call_count, 1)
self.assertNotEqual(id(job), id(first_fetch))
# the second call should return the same object
second_fetch = execution.job
self.assertEqual(second_fetch.id, job.id)
# call count remains the same
self.assertEqual(mock.call_count, 1)
self.assertEqual(id(first_fetch), id(second_fetch))
| TestRegistry |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/neptune.py | {
"start": 1094,
"end": 2664
} | class ____(AwsBaseWaiterTrigger):
"""
Triggers when a Neptune Cluster is available.
:param db_cluster_id: Cluster ID to poll.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: AWS region name (example: us-east-1)
"""
def __init__(
self,
*,
db_cluster_id: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = None,
region_name: str | None = None,
**kwargs,
) -> None:
super().__init__(
serialized_fields={"db_cluster_id": db_cluster_id},
waiter_name="cluster_available",
waiter_args={"DBClusterIdentifier": db_cluster_id},
failure_message="Failed to start Neptune cluster",
status_message="Status of Neptune cluster is",
status_queries=["DBClusters[0].Status"],
return_key="db_cluster_id",
return_value=db_cluster_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
**kwargs,
)
def hook(self) -> AwsGenericHook:
return NeptuneHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
verify=self.verify,
config=self.botocore_config,
)
| NeptuneClusterAvailableTrigger |
python | doocs__leetcode | lcof/面试题06. 从尾到头打印链表/Solution.py | {
"start": 136,
"end": 333
} | class ____:
def reversePrint(self, head: ListNode) -> List[int]:
ans = []
while head:
ans.append(head.val)
head = head.next
return ans[::-1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/product-of-the-last-k-numbers.py | {
"start": 75,
"end": 583
} | class ____(object):
def __init__(self):
self.__accu = [1]
def add(self, num):
"""
:type num: int
:rtype: None
"""
if not num:
self.__accu = [1]
return
self.__accu.append(self.__accu[-1]*num)
def getProduct(self, k):
"""
:type k: int
:rtype: int
"""
if len(self.__accu) <= k:
return 0
return self.__accu[-1] // self.__accu[-1-k]
| ProductOfNumbers |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 6058,
"end": 6482
} | class ____:
def __init__(self, tensor):
self.tensor = tensor
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
args = pytree.tree_map_only(WrapperSubclass, lambda x: x.tensor, args)
kwargs = pytree.tree_map_only(WrapperSubclass, lambda x: x.tensor, kwargs)
return func(*args, **kwargs)
| WrapperSubclass |
python | wandb__wandb | tests/system_tests/test_artifacts/test_misc2.py | {
"start": 6933,
"end": 8697
} | class ____:
def test_validates_metadata_ok(
self, create_artifact: Callable[..., wandb.Artifact]
):
assert create_artifact(metadata=None).metadata == {}
assert create_artifact(metadata={"foo": "bar"}).metadata == {"foo": "bar"}
assert create_artifact(
metadata={"foo": {"bar": [1, 2, (3, None)]}}
).metadata == {"foo": {"bar": [1, 2, [3, None]]}}
assert create_artifact(metadata={"foo": np.arange(3)}).metadata == {
"foo": [0, 1, 2]
}
assert create_artifact(metadata={"foo": slice(4, 9, 2)}).metadata == {
"foo": {"slice_start": 4, "slice_stop": 9, "slice_step": 2}
}
def test_validates_metadata_err(
self, create_artifact: Callable[..., wandb.Artifact]
):
with pytest.raises(TypeError):
create_artifact(metadata=123)
with pytest.raises(TypeError):
create_artifact(metadata=[])
with pytest.raises(TypeError):
create_artifact(metadata={"unserializable": object()})
def test_deepcopies_metadata(self, create_artifact: Callable[..., wandb.Artifact]):
orig_metadata = {"foo": ["original"]}
artifact = create_artifact(metadata=orig_metadata)
# ensure `artifact.metadata` isn't just a reference to the argument
assert artifact.metadata is not orig_metadata
orig_metadata["bar"] = "modifying the top-level value"
assert "bar" not in artifact.metadata
# ensure that any mutable sub-values are also copies
assert artifact.metadata["foo"] is not orig_metadata["foo"]
orig_metadata["foo"].append("modifying the sub-value")
assert artifact.metadata["foo"] == ["original"]
| TestArtifactChecksMetadata |
python | PyCQA__pylint | tests/functional/n/non/non_iterator_returned.py | {
"start": 899,
"end": 1009
} | class ____(type):
def __next__(cls):
return 1
def next(cls):
return 2
| IteratorMetaclass |
python | walkccc__LeetCode | solutions/606. Construct String from Binary Tree/606.py | {
"start": 0,
"end": 372
} | class ____:
def tree2str(self, t: TreeNode | None) -> str:
def dfs(root: TreeNode | None) -> str:
if not root:
return ''
if root.right:
return str(root.val) + '(' + dfs(root.left) + ')(' + dfs(root.right) + ')'
if root.left:
return str(root.val) + '(' + dfs(root.left) + ')'
return str(root.val)
return dfs(t)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib.py | {
"start": 24226,
"end": 25439
} | class ____(collections_abc.Iterable,
distribute_types.DistributedDatasetInterface):
"""Base class for iterable inputs for distribution strategies."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers):
assert isinstance(input_workers, InputWorkers)
self._input_workers = input_workers
def __iter__(self):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, initial_state, reduce_fn):
"""Execute a `reduce_fn` over all the elements of the input."""
iterator = iter(self)
optional_data = iterator.get_next_as_optional()
def cond(optional_data, state):
del state # Unused.
return optional_data.has_value()
def loop_body(optional_data, state):
"""Executes `reduce_fn` in a loop till the dataset is empty."""
state = reduce_fn(state, optional_data.get_value())
optional_data = iterator.get_next_as_optional()
return optional_data, state
optional_data, final_state = while_loop.while_loop(
cond,
loop_body, [optional_data, initial_state],
parallel_iterations=1,
return_same_structure=True)
return final_state
| _IterableInput |
python | faif__python-patterns | patterns/other/blackboard.py | {
"start": 3216,
"end": 4421
} | class ____(AbstractExpert):
def __init__(self, blackboard) -> None:
super().__init__(blackboard)
@property
def is_eager_to_contribute(self) -> bool:
return True if self.blackboard.common_state["problems"] > 100 else False
def contribute(self) -> None:
self.blackboard.common_state["problems"] += random.randint(1, 2)
self.blackboard.common_state["suggestions"] += random.randint(10, 20)
self.blackboard.common_state["contributions"] += [self.__class__.__name__]
self.blackboard.common_state["progress"] += random.randint(10, 100)
def main():
"""
>>> blackboard = Blackboard()
>>> blackboard.add_expert(Student(blackboard))
>>> blackboard.add_expert(Scientist(blackboard))
>>> blackboard.add_expert(Professor(blackboard))
>>> c = Controller(blackboard)
>>> contributions = c.run_loop()
>>> from pprint import pprint
>>> pprint(contributions)
['Student',
'Scientist',
'Student',
'Scientist',
'Student',
'Scientist',
'Professor']
"""
if __name__ == "__main__":
random.seed(1234) # for deterministic doctest outputs
import doctest
doctest.testmod()
| Professor |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modular_qwen3_vl.py | {
"start": 9838,
"end": 13070
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3VLModel`]. It is used to instantiate a
Qwen3-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3-VL-4B-Instruct [Qwen/Qwen3-VL-4B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-4B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 151652):
The start token index to encode the image prompt.
vision_end_token_id (`int`, *optional*, defaults to 151653):
The end token index to encode the image prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the word embeddings.
```python
>>> from transformers import Qwen3VLForConditionalGeneration, Qwen3VLConfig
>>> # Initializing a Qwen3-VL style configuration
>>> configuration = Qwen3VLConfig()
>>> # Initializing a model from the Qwen3-VL-4B style configuration
>>> model = Qwen3VLForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_vl"
sub_configs = {"vision_config": Qwen3VLVisionConfig, "text_config": Qwen3VLTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151655,
video_token_id=151656,
vision_start_token_id=151652,
vision_end_token_id=151653,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
self.text_config = self.sub_configs["text_config"]()
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
| Qwen3VLConfig |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 36467,
"end": 37413
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = nn.Dropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
@auto_docstring(
custom_intro="""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| ContextPooler |
python | django__django | tests/model_forms/tests.py | {
"start": 135035,
"end": 135688
} | class ____(TestCase):
def test_many_to_many(self):
"""Data for a ManyToManyField is a list rather than a lazy QuerySet."""
blue = Color.objects.create(name="blue")
red = Color.objects.create(name="red")
item = ColorfulItem.objects.create()
item.colors.set([blue])
data = model_to_dict(item)["colors"]
self.assertEqual(data, [blue])
item.colors.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
@skipUnlessDBFeature("supports_table_check_constraints")
| ModelToDictTests |
python | google__flatbuffers | tests/MyGame/Example/NestedUnion/TestSimpleTableWithEnum.py | {
"start": 203,
"end": 1672
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset: int = 0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TestSimpleTableWithEnum()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTestSimpleTableWithEnum(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# TestSimpleTableWithEnum
def Init(self, buf: bytes, pos: int):
self._tab = flatbuffers.table.Table(buf, pos)
# TestSimpleTableWithEnum
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 2
def TestSimpleTableWithEnumStart(builder: flatbuffers.Builder):
builder.StartObject(1)
def Start(builder: flatbuffers.Builder):
TestSimpleTableWithEnumStart(builder)
def TestSimpleTableWithEnumAddColor(builder: flatbuffers.Builder, color: int):
builder.PrependUint8Slot(0, color, 2)
def AddColor(builder: flatbuffers.Builder, color: int):
TestSimpleTableWithEnumAddColor(builder, color)
def TestSimpleTableWithEnumEnd(builder: flatbuffers.Builder) -> int:
return builder.EndObject()
def End(builder: flatbuffers.Builder) -> int:
return TestSimpleTableWithEnumEnd(builder)
| TestSimpleTableWithEnum |
python | google__pytype | pytype/rewrite/flow/state_test.py | {
"start": 341,
"end": 1179
} | class ____(unittest.TestCase):
def test_store_local(self):
b = state.BlockState({})
var = variables.Variable.from_value(42)
b.store_local('x', var)
self.assertEqual(b._locals, {'x': var})
def test_load_local(self):
b = state.BlockState({})
b.store_local('x', variables.Variable.from_value(42))
x = b.load_local('x')
self.assertEqual(x.name, 'x')
self.assertEqual(x.get_atomic_value(), 42)
def test_get_locals(self):
x = variables.Variable.from_value(42)
b = state.BlockState({'x': x})
self.assertEqual(b.get_locals(), immutabledict.immutabledict({'x': x}))
def test_typing(self):
locals_ = {'x': variables.Variable.from_value(0)}
b = state.BlockState(locals_)
assert_type(b, state.BlockState[int])
assert_type(b.load_local('x'), variables.Variable[int])
| LocalsTest |
python | kamyu104__LeetCode-Solutions | Python/find-longest-self-contained-substring.py | {
"start": 2164,
"end": 3331
} | class ____(object):
def maxSubstringLength(self, s):
"""
:type s: str
:rtype: int
"""
def update(x, d, distinct, valid):
x = ord(x)-ord('a')
if cnt2[x] == cnt[x]:
valid -= 1
if cnt2[x] == 0:
distinct += 1
cnt2[x] += d
if cnt2[x] == 0:
distinct -= 1
if cnt2[x] == cnt[x]:
valid += 1
return distinct, valid
cnt = [0]*26
for x in s:
cnt[ord(x)-ord('a')] += 1
result = -1
for l in xrange(1, sum(x != 0 for x in cnt)):
cnt2 = [0]*26
left = distinct = valid = 0
for right in xrange(len(s)):
distinct, valid = update(s[right], +1, distinct, valid)
while distinct == l+1:
distinct, valid = update(s[left], -1, distinct, valid)
left += 1
if valid == l:
result = max(result, right-left+1)
return result
# Time: O(26^2 * n)
# Space: O(26)
# hash table, brute force
| Solution3 |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 27978,
"end": 29820
} | class ____(HandlerBase):
"""
Handler for `.PolyCollection` used in `~.Axes.fill_between` and
`~.Axes.stackplot`.
"""
def _update_prop(self, legend_handle, orig_handle):
def first_color(colors):
if colors.size == 0:
return (0, 0, 0, 0)
return tuple(colors[0])
def get_first(prop_array):
if len(prop_array):
return prop_array[0]
else:
return None
# orig_handle is a PolyCollection and legend_handle is a Patch.
# Directly set Patch color attributes (must be RGBA tuples).
legend_handle._facecolor = first_color(orig_handle.get_facecolor())
legend_handle._edgecolor = first_color(orig_handle.get_edgecolor())
legend_handle._hatch_color = first_color(orig_handle.get_hatchcolor())
legend_handle._original_facecolor = orig_handle._original_facecolor
legend_handle._original_edgecolor = orig_handle._original_edgecolor
legend_handle._fill = orig_handle.get_fill()
legend_handle._hatch = orig_handle.get_hatch()
# Setters are fine for the remaining attributes.
legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
legend_handle.set_transform(get_first(orig_handle.get_transforms()))
# Alpha is already taken into account by the color attributes.
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
| HandlerPolyCollection |
python | numba__numba | numba/cuda/tests/nocuda/test_library_lookup.py | {
"start": 4044,
"end": 5995
} | class ____(LibraryLookupBase):
def test_nvvm_path_decision(self):
# Check that the default is using conda environment
by, info, warns = self.remote_do(self.do_clear_envs)
if has_cuda:
self.assertEqual(by, 'Conda environment')
else:
self.assertEqual(by, "<unknown>")
self.assertIsNone(info)
self.assertFalse(warns)
# Check that CUDA_HOME works by removing conda-env
by, info, warns = self.remote_do(self.do_set_cuda_home)
self.assertEqual(by, 'CUDA_HOME')
self.assertFalse(warns)
if IS_WIN32:
self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'bin'))
elif IS_OSX:
self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'lib'))
else:
self.assertEqual(info, os.path.join('mycudahome', 'nvvm', 'lib64'))
if get_system_ctk() is None:
# Fake remove conda environment so no cudatoolkit is available
by, info, warns = self.remote_do(self.do_clear_envs)
self.assertEqual(by, '<unknown>')
self.assertIsNone(info)
self.assertFalse(warns)
else:
# Use system available cudatoolkit
by, info, warns = self.remote_do(self.do_clear_envs)
self.assertEqual(by, 'System')
self.assertFalse(warns)
@staticmethod
def do_clear_envs():
remove_env('CUDA_HOME')
remove_env('CUDA_PATH')
return True, _get_nvvm_path_decision()
@staticmethod
def do_set_cuda_home():
os.environ['CUDA_HOME'] = os.path.join('mycudahome')
_fake_non_conda_env()
return True, _get_nvvm_path_decision()
@skip_on_cudasim('Library detection unsupported in the simulator')
@unittest.skipUnless(has_mp_get_context, 'mp.get_context not available')
@skip_unless_conda_cudatoolkit('test assumes conda installed cudatoolkit')
| TestNvvmLookUp |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 73710,
"end": 81134
} | class ____(torch.nn.Module):
def forward(self, s77: "Sym(s77)", L_x_: "f32[s77]"):
l_x_ = L_x_
sum_1: "f32[]" = l_x_.sum(); l_x_ = None
gt: "b8[]" = sum_1 > 0; sum_1 = None
return (gt,)
""",
)
def test_filter_with_graph_break(self):
def f(a):
a += 1
def g(x):
nonlocal a
a += 1
return x > 0
m = filter(g, [1, 2, 3, 4, 5])
a += next(m) # won't graph break
torch._dynamo.graph_break()
a += next(m) # will graph break
return a
cnts = torch._dynamo.testing.CompileCounter()
opt_f = torch.compile(f, backend=cnts)
self.assertEqual(f(torch.ones(3, 3)), opt_f(torch.ones(3, 3)))
self.assertEqual(cnts.frame_count, 3)
@make_test
def test_getattr(x):
def fn(y):
return y + 1
try:
_exit = type(fn).__exit__
except AttributeError:
return x.sin()
else:
return x.cos()
@unittest.expectedFailure
def test_getattr_metaclass(self):
class Meta(type):
def __getattr__(cls, name):
return len(name)
class C(metaclass=Meta):
attr = 123
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
return t + C.attr + C.dynamic_attr
t = torch.randn(2)
y = fn(t)
self.assertEqual(y, t + 123 + 12)
def test_two_point_iter(self):
def fn(x, y):
it = map(lambda n: n + 1, range(6))
for i in it:
x = x + i
y = y + next(it)
return x, y
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.ones(3)
y = torch.ones(3)
self.assertEqual(fn(x, y), opt_fn(x, y))
# Test dict_keys passed along with the corresponding dict object
def test_dict_key_set1(self):
d = {"a": 1, "b": 2}
def fn(x, d, keys):
if "c" in keys:
return x + d["c"]
else:
return x + 1
x = torch.zeros(2, 3)
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, d, d.keys()), fn(x, d, d.keys()))
d.update({"c": 3})
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, d, d.keys()), fn(x, d, d.keys()))
# Test only dict_keys passed into the compiled region
def test_dict_key_set2(self):
d = {"a": 1, "b": 2}
def fn(x, keys):
if "c" in keys:
return x - 1
else:
return x + 1
x = torch.zeros(2, 3)
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, d.keys()), fn(x, d.keys()))
d.update({"c": 3})
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, d.keys()), fn(x, d.keys()))
def test_dict_key_set3(self):
a = {
"domains": {
"d1": {"attr": 1},
"d2": {"attr": 2},
}
}
b = a["domains"].keys()
def fn(x, a, b):
for e in b:
x += a["domains"][e]["attr"]
return x
x = torch.ones(2, 3)
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, a, b), fn(x, a, b))
a["domains"].update({"d3": {"attr": 3}})
opt_fn = torch.compile(fullgraph=True, backend="eager")(fn)
self.assertEqual(opt_fn(x, a, b), fn(x, a, b))
def test_list_setitem(self):
def fn(a: int):
some_array = [1, 2, 3]
some_array[a] = 5
return torch.ones(some_array)
opt_fn = torch.compile(fullgraph=True, backend="eager", dynamic=True)(fn)
self.assertEqual(opt_fn(0), fn(0))
self.assertEqual(opt_fn(1), fn(1))
def test_list_setitem_slice(self):
def fn(a: int):
some_array = [1, 2, 3]
some_array[a : a + 1] = [5]
return torch.ones(some_array)
opt_fn = torch.compile(fullgraph=True, backend="eager", dynamic=True)(fn)
self.assertEqual(opt_fn(0), fn(0))
self.assertEqual(opt_fn(1), fn(1))
def test_pow_int(self):
def fn(a, b):
return torch.pow(a, b)
x = torch.ones(2, 2)
opt_fn = torch.compile(fullgraph=True, backend="eager", dynamic=True)(fn)
self.assertEqual(opt_fn(x, 2), fn(x, 2))
def test_tensor_size_indexed_by_symint(self):
def fn(x, y):
index = x.shape[-1]
return x + y.shape[index]
x = torch.rand(10, 2)
y = torch.rand(10, 8, 6)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
self.assertEqual(opt_fn(x, y), fn(x, y))
def test_partials_as_input_partials_lambda(self):
def fn(f0, f1, x):
return f0(x) * f1(x)
multiply = lambda x, y: x * y
lambda0 = functools.partial(multiply, y=3)
lambda1 = functools.partial(multiply, y=2)
cnts = torch._dynamo.testing.CompileCounter()
torch.compile(fn, backend=cnts, fullgraph=True)(
lambda0, lambda1, torch.randn(2, 2)
)
self.assertEqual(cnts.frame_count, 1)
def test_partials_as_input_partials_mod(self):
def fn(f0, f1, x):
return f0(x) * f1(x)
lambda0 = functools.partial(SmallNN(), y=torch.randn(2, 2))
lambda1 = functools.partial(SmallNN(), y=torch.randn(2, 2))
cnts = torch._dynamo.testing.CompileCounter()
x = torch.randn(2, 2)
dynamo_result = torch.compile(fn, backend=cnts, fullgraph=True)(
lambda0, lambda1, x
)
self.assertEqual(cnts.frame_count, 1)
eager_result = fn(lambda0, lambda1, x)
self.assertEqual(eager_result, dynamo_result)
def test_partials_as_input_UDF(self):
def fn(f0, f1, x):
return f0(x) * f1(x)
lambda0 = functools.partial(udf_mul, y=torch.randn(2, 2))
lambda1 = functools.partial(udf_mul, y=torch.randn(2, 2))
cnts = torch._dynamo.testing.CompileCounter()
x = torch.randn(2, 2)
dynamo_result = torch.compile(fn, backend=cnts, fullgraph=True)(
lambda0, lambda1, x
)
self.assertEqual(cnts.frame_count, 1)
eager_result = fn(lambda0, lambda1, x)
self.assertEqual(eager_result, dynamo_result)
def test_partials_graph_break_reconstruct(self):
def fn(udf_mul_0, udf_mul_1, x):
lambda0 = functools.partial(udf_mul_0, y=x)
lambda1 = functools.partial(udf_mul_1, y=x)
print("break")
return torch.mul(lambda0(x), lambda1(x))
backend = EagerAndRecordGraphs()
cnts = CompileCounterWithBackend(backend)
x = torch.randn(2, 2)
dynamo_result = torch.compile(fn, backend=cnts)(udf_mul, udf_mul, x)
eager_result = fn(udf_mul, udf_mul, x)
self.assertEqual(eager_result, dynamo_result)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_annotations.py | {
"start": 16271,
"end": 17921
} | class ____:
"""Tests Per Component Pod Annotations."""
def test_annotations_are_added(self, values, show_only, expected_annotations):
k8s_objects = render_chart(
values=values,
show_only=[show_only],
)
# This test relies on the convention that the helm chart puts a single
# Deployment in its own .yaml file, so by specifying `show_only`,
# we should only get a single k8s_object here - the target object that
# we hope to test on.
assert len(k8s_objects) == 1
obj = k8s_objects[0]
annotations = get_object_annotations(obj)
for k, v in expected_annotations.items():
assert k in annotations
assert v == annotations[k]
def test_precedence(self, values, show_only, expected_annotations):
values_global_annotations = {"airflowPodAnnotations": {k: "GLOBAL" for k in expected_annotations}}
values_merged = {**values, **values_global_annotations}
k8s_objects = render_chart(
values=values_merged,
show_only=[show_only],
)
# This test relies on the convention that the helm chart puts a single
# Deployment in its own .yaml file, so by specifying `show_only`,
# we should only get a single k8s_object here - the target object that
# we hope to test on.
assert len(k8s_objects) == 1
obj = k8s_objects[0]
annotations = get_object_annotations(obj)
for k, v in expected_annotations.items():
assert k in annotations
assert v == annotations[k]
| TestPerComponentPodAnnotations |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 105615,
"end": 105850
} | class ____(Structure):
_fields_ = [('environment', c_uint),
('ccFeature', c_uint),
('devToolsMode', c_uint),
]
nvmlSystemConfComputeSettings_v1 = 0x1000014
| c_nvmlConfComputeSystemState_t |
python | pallets__itsdangerous | src/itsdangerous/url_safe.py | {
"start": 1950,
"end": 2221
} | class ____(URLSafeSerializerMixin, Serializer[str]):
"""Works like :class:`.Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
| URLSafeSerializer |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 368403,
"end": 371626
} | class ____(Request):
"""
Mark a task status as in_progress. Optionally allows to set the task's execution progress.
:param force: If not true, call fails if the task status is not 'not_started'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "started"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'not_started'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(StartedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| StartedRequest |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_expression.py | {
"start": 1093,
"end": 1831
} | class ____(QueryExpressionVisitor[QueryExpression]):
"""
Visitor that recursively injects the environments filter into all `Timeseries`.
"""
def __init__(self, environments: Sequence[Environment]):
self._environment_names = [environment.name for environment in environments]
def _visit_timeseries(self, timeseries: Timeseries) -> QueryExpression:
if self._environment_names:
current_filters = timeseries.filters if timeseries.filters else []
current_filters.extend(
[Condition(Column("environment"), Op.IN, self._environment_names)]
)
return timeseries.set_filters(current_filters)
return timeseries
| EnvironmentsInjectionVisitor |
python | openai__openai-python | src/openai/types/webhooks/response_failed_webhook_event.py | {
"start": 323,
"end": 757
} | class ____(BaseModel):
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response failed."""
data: Data
"""Event data payload."""
type: Literal["response.failed"]
"""The type of the event. Always `response.failed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| ResponseFailedWebhookEvent |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 52255,
"end": 52606
} | class ____(unittest.TestCase):
def _makeOne(self, msg, expected=None):
from pyramid.authentication import BadTicket
return BadTicket(msg, expected)
def test_it(self):
exc = self._makeOne('msg', expected=True)
self.assertEqual(exc.expected, True)
self.assertTrue(isinstance(exc, Exception))
| TestBadTicket |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/distlib/wheel.py | {
"start": 4756,
"end": 43958
} | class ____(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver,
abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
self.get_wheel_metadata(zf)
# wv = wheel_metadata['Wheel-Version'].split('.', 1)
# file_version = tuple([int(i) for i in wv])
# if file_version < (1, 1):
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
# LEGACY_METADATA_FILENAME]
# else:
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
result = None
for fn in fns:
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
if result:
break
except KeyError:
pass
if not result:
raise ValueError('Invalid wheel, because metadata is '
'missing: looked in %s' % ', '.join(fns))
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' %
hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, archive_record_path):
records = list(records) # make a copy, as mutated
records.append((archive_record_path, '', ''))
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
# hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
ap = to_posix(os.path.join(info_dir, 'RECORD'))
self.write_record(records, p, ap)
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# sort the entries by archive path. Not needed by any spec, but it
# keeps the archive listing and RECORD tidier than they would otherwise
# be. Use the number of path segments to keep directory entries together,
# and keep the dist-info stuff at the end.
def sorter(t):
ap = t[0]
n = ap.count('/')
if '.dist-info' in ap:
n += 10000
return (n, ap)
archive_paths = sorted(archive_paths, key=sorter)
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def skip_entry(self, arcname):
"""
Determine whether an archive entry should be skipped when verifying
or installing.
"""
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
# We also skip directories, as they won't be in RECORD
# either. See:
#
# https://github.com/pypa/wheel/issues/294
# https://github.com/pypa/wheel/issues/287
# https://github.com/pypa/wheel/pull/289
#
return arcname.endswith(('/', '/RECORD.jws'))
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation',
False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
# Issue #147: permission bits aren't preserved. Using
# zf.extract(zinfo, libdir) should have worked, but didn't,
# see https://www.thetopsites.net/article/53834422.shtml
# So ... manually preserve permission bits as given in zinfo
if os.name == 'posix':
# just set the normal permission bits
os.chmod(outfile,
(zinfo.external_attr >> 16) & 0x1FF)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(
outfile,
hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' [%s]' % ','.join(v.flags)
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True}
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
'%s.%s' % sys.version_info[:2])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(
file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
# data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
# metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message_from_file(wf)
# wv = message['Wheel-Version'].split('.', 1)
# file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# See issue #115: some wheels have .. in their entries, but
# in the filename ... e.g. __main__..py ! So the check is
# updated to look for .. in the directory portions
p = u_arcname.split('/')
if '..' in p:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i], '.'.join(
str(i) for i in parts))
except UnsupportedVersionError:
logger.debug(
'Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = path.endswith(LEGACY_METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version, updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' %
dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def _get_glibc_version():
import platform
ver = platform.libc_ver()
result = []
if ver[0] == 'glibc':
for s in ver[1].split('.'):
result.append(int(s) if s.isdigit() else 0)
result = tuple(result)
return result
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, -1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix in _get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# manylinux
if abi != 'none' and sys.platform.startswith('linux'):
arch = arch.replace('linux_', '')
parts = _get_glibc_version()
if len(parts) == 2:
if parts >= (2, 5):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux1_%s' % arch))
if parts >= (2, 12):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux2010_%s' % arch))
if parts >= (2, 17):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux2014_%s' % arch))
result.append(
(''.join((IMP_PREFIX, versions[0])), abi,
'manylinux_%s_%s_%s' % (parts[0], parts[1], arch)))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| Wheel |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/secrets/lockbox.py | {
"start": 1825,
"end": 12168
} | class ____(BaseSecretsBackend, LoggingMixin):
"""
Retrieves connections or variables or configs from Yandex Lockbox.
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.yandex.secrets.lockbox.LockboxSecretBackend
backend_kwargs = {"connections_prefix": "airflow/connections"}
For example, when ``{"connections_prefix": "airflow/connections"}`` is set, if a secret is defined with
the path ``airflow/connections/smtp_default``, the connection with conn_id ``smtp_default`` would be
accessible.
When ``{"variables_prefix": "airflow/variables"}`` is set, if a secret is defined with
the path ``airflow/variables/hello``, the variable with the name ``hello`` would be accessible.
When ``{"config_prefix": "airflow/config"}`` is set, if a secret is defined with
the path ``airflow/config/sql_alchemy_conn``, the config with key ``sql_alchemy_conn`` would be
accessible.
If the prefix is empty, the requests will not be sent to Yandex Lockbox.
.. code-block:: ini
[secrets]
backend = airflow.providers.yandex.secrets.lockbox.LockboxSecretBackend
backend_kwargs = {"yc_connection_id": "<connection_ID>", "folder_id": "<folder_ID>"}
You need to specify credentials or the ID of the ``yandexcloud`` connection to connect to Yandex Lockbox.
The credentials will be used with the following priority:
* OAuth token
* Service account key in JSON from file
* Service account key in JSON
* Yandex Cloud connection
If you do not specify any credentials,
the system will use the default connection ID:``yandexcloud_default``.
Also, you need to specify the Yandex Cloud folder ID to search for Yandex Lockbox secrets in.
If you do not specify folder ID, the requests will use the connection ``folder_id`` if specified.
:param yc_oauth_token: Specifies the user account OAuth token to connect to Yandex Lockbox.
The parameter value should look like ``y3_xx123``.
:param yc_sa_key_json: Specifies the service account key in JSON.
The parameter value should look like
``{"id": "...", "service_account_id": "...", "private_key": "..."}``.
:param yc_sa_key_json_path: Specifies the service account key in JSON file path.
The parameter value should look like ``/home/airflow/authorized_key.json``,
while the file content should have the following format:
``{"id": "...", "service_account_id": "...", "private_key": "..."}``.
:param yc_connection_id: Specifies the connection ID to connect to Yandex Lockbox.
The default value is ``yandexcloud_default``.
:param folder_id: Specifies the folder ID to search for Yandex Lockbox secrets in.
If set to ``None`` (``null`` in JSON),
the requests will use the connection ``folder_id``, if specified.
:param connections_prefix: Specifies the prefix of the secret to read to get connections.
If set to ``None`` (``null`` in JSON),
the requests for connections will not be sent to Yandex Lockbox.
The default value is ``airflow/connections``.
:param variables_prefix: Specifies the prefix of the secret to read to get variables.
If set to ``None`` (``null`` in JSON), the requests for variables will not be sent to Yandex Lockbox.
The default value is ``airflow/variables``.
:param config_prefix: Specifies the prefix of the secret to read to get configurations.
If set to ``None`` (``null`` in JSON), the requests for variables will not be sent to Yandex Lockbox.
The default value is ``airflow/config``.
:param sep: Specifies the separator to concatenate ``secret_prefix`` and ``secret_id``.
The default value is ``/``.
:param endpoint: Specifies the API endpoint.
If set to ``None`` (``null`` in JSON), the requests will use the connection endpoint, if specified;
otherwise, they will use the default endpoint.
"""
def __init__(
self,
yc_oauth_token: str | None = None,
yc_sa_key_json: dict | str | None = None,
yc_sa_key_json_path: str | None = None,
yc_connection_id: str | None = None,
folder_id: str = "",
connections_prefix: str | None = "airflow/connections",
variables_prefix: str | None = "airflow/variables",
config_prefix: str | None = "airflow/config",
sep: str = "/",
endpoint: str | None = None,
):
super().__init__()
self.yc_oauth_token = yc_oauth_token
self.yc_sa_key_json = yc_sa_key_json
self.yc_sa_key_json_path = yc_sa_key_json_path
self.yc_connection_id = None
if not any([yc_oauth_token, yc_sa_key_json, yc_sa_key_json_path]):
self.yc_connection_id = yc_connection_id or default_conn_name
elif yc_connection_id is not None:
raise ValueError("`yc_connection_id` should not be used if other credentials are specified")
self.folder_id = folder_id
self.connections_prefix = connections_prefix.rstrip(sep) if connections_prefix is not None else None
self.variables_prefix = variables_prefix.rstrip(sep) if variables_prefix is not None else None
self.config_prefix = config_prefix.rstrip(sep) if config_prefix is not None else None
self.sep = sep
self.endpoint = endpoint
def get_conn_value(self, conn_id: str) -> str | None:
"""
Retrieve from Secrets Backend a string value representing the Connection object.
:param conn_id: Connection ID
:return: Connection Value
"""
if self.connections_prefix is None:
return None
if conn_id == self.yc_connection_id:
return None
return self._get_secret_value(self.connections_prefix, conn_id)
def get_variable(self, key: str) -> str | None:
"""
Return value for Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret_value(self.variables_prefix, key)
def get_config(self, key: str) -> str | None:
"""
Return value for Airflow Config Key.
:param key: Config Key
:return: Config Value
"""
if self.config_prefix is None:
return None
return self._get_secret_value(self.config_prefix, key)
@cached_property
def _client(self):
"""
Create a Yandex Cloud SDK client.
Lazy loading is used here
because we can't establish a Connection until all secrets backends have been initialized.
"""
if self.yc_connection_id:
self.yc_oauth_token = self._get_field("oauth")
self.yc_sa_key_json = self._get_field("service_account_json")
self.yc_sa_key_json_path = self._get_field("service_account_json_path")
self.folder_id = self.folder_id or self._get_field("folder_id")
self.endpoint = self.endpoint or self._get_field("endpoint")
credentials = get_credentials(
oauth_token=self.yc_oauth_token,
service_account_json=self.yc_sa_key_json,
service_account_json_path=self.yc_sa_key_json_path,
)
sdk_config = self._get_endpoint()
return yandexcloud.SDK(user_agent=provider_user_agent(), **credentials, **sdk_config).client
def _get_endpoint(self) -> dict[str, str]:
sdk_config = {}
if self.endpoint:
sdk_config["endpoint"] = self.endpoint
return sdk_config
@cached_property
def _connection(self) -> Connection | None:
if not self.yc_connection_id:
return None
conn = Connection.get_connection_from_secrets(self.yc_connection_id)
self.log.info("Using connection ID '%s' for task execution.", conn.conn_id)
return conn
def _get_field(self, field_name: str, default: Any = None) -> Any:
conn = self._connection
if not conn:
return None
return get_field_from_extras(
extras=conn.extra_dejson,
field_name=field_name,
default=default,
)
def _build_secret_name(self, prefix: str, key: str):
if len(prefix) == 0:
return key
return f"{prefix}{self.sep}{key}"
def _get_secret_value(self, prefix: str, key: str) -> str | None:
secret: secret_pb.Secret | None = None
for s in self._get_secrets():
if s.name == self._build_secret_name(prefix=prefix, key=key):
secret = s
break
if not secret:
return None
payload = self._get_payload(secret.id, secret.current_version.id)
entries = {entry.key: entry.text_value for entry in payload.entries if entry.text_value}
if len(entries) == 0:
return None
return sorted(entries.values())[0]
def _get_secrets(self) -> list[secret_pb.Secret]:
# generate client if not exists, to load folder_id from connections
_ = self._client
response = self._list_secrets(folder_id=self.folder_id)
secrets: list[secret_pb.Secret] = response.secrets[:]
next_page_token = response.next_page_token
while next_page_token != "":
response = self._list_secrets(
folder_id=self.folder_id,
page_token=next_page_token,
)
secrets.extend(response.secrets)
next_page_token = response.next_page_token
return secrets
def _get_payload(self, secret_id: str, version_id: str) -> payload_pb.Payload:
request = payload_service_pb.GetPayloadRequest(
secret_id=secret_id,
version_id=version_id,
)
return self._client(payload_service_pb_grpc.PayloadServiceStub).Get(request)
def _list_secrets(self, folder_id: str, page_token: str = "") -> secret_service_pb.ListSecretsResponse:
request = secret_service_pb.ListSecretsRequest(
folder_id=folder_id,
page_token=page_token,
)
return self._client(secret_service_pb_grpc.SecretServiceStub).List(request)
| LockboxSecretBackend |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 28146,
"end": 28411
} | class ____(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
| PyNoneStructPtr |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/sensors/test_named_hive_partition.py | {
"start": 1350,
"end": 4891
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.next_day = (DEFAULT_DATE + timedelta(days=1)).isoformat()[:10]
self.database = "airflow"
self.partition_by = "ds"
self.table = "static_babynames_partitioned"
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY ({{ params.partition_by }} string);
ALTER TABLE {{ params.table }}
ADD PARTITION({{ params.partition_by }}='{{ ds }}');
"""
self.hook = MockHiveMetastoreHook()
def test_parse_partition_name_correct(self):
schema = "default"
table = "users"
partition = "ds=2016-01-01/state=IT"
name = f"{schema}.{table}/{partition}"
parsed_schema, parsed_table, parsed_partition = NamedHivePartitionSensor.parse_partition_name(name)
assert schema == parsed_schema
assert table == parsed_table
assert partition == parsed_partition
def test_parse_partition_name_incorrect(self):
name = "incorrect.name"
with pytest.raises(ValueError, match="Could not parse incorrect.nameinto table, partition"):
NamedHivePartitionSensor.parse_partition_name(name)
def test_parse_partition_name_default(self):
table = "users"
partition = "ds=2016-01-01/state=IT"
name = f"{table}/{partition}"
parsed_schema, parsed_table, parsed_partition = NamedHivePartitionSensor.parse_partition_name(name)
assert parsed_schema == "default"
assert table == parsed_table
assert partition == parsed_partition
def test_poke_existing(self):
self.hook.metastore.__enter__().check_for_named_partition.return_value = True
partitions = [f"{self.database}.{self.table}/{self.partition_by}={DEFAULT_DATE_DS}"]
sensor = NamedHivePartitionSensor(
partition_names=partitions,
task_id="test_poke_existing",
poke_interval=1,
hook=self.hook,
dag=self.dag,
)
assert sensor.poke(None)
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, f"{self.partition_by}={DEFAULT_DATE_DS}"
)
def test_poke_non_existing(self):
self.hook.metastore.__enter__().check_for_named_partition.return_value = False
partitions = [f"{self.database}.{self.table}/{self.partition_by}={self.next_day}"]
sensor = NamedHivePartitionSensor(
partition_names=partitions,
task_id="test_poke_non_existing",
poke_interval=1,
hook=self.hook,
dag=self.dag,
)
assert not sensor.poke(None)
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, f"{self.partition_by}={self.next_day}"
)
@pytest.mark.skipif(
"AIRFLOW_RUNALL_TESTS" not in os.environ, reason="Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
| TestNamedHivePartitionSensor |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/cli/test_commands.py | {
"start": 15307,
"end": 26458
} | class ____:
@pytest.fixture
def mock_open_process(self, monkeypatch):
open_process = MagicMock(name="open_process")
open_process.return_value = AsyncMock(name="returned open_process")
monkeypatch.setattr("prefect_shell.commands.open_process", open_process)
return open_process
@pytest.fixture
def mock_shell_process(self, monkeypatch):
shell_process = MagicMock()
opened_shell_process = AsyncMock()
shell_process.return_value = opened_shell_process
monkeypatch.setattr("prefect_shell.commands.ShellProcess", shell_process)
return shell_process
@pytest.fixture
def dbt_cli_profile(self):
return DbtCliProfile(
name="my_name",
target="my_target",
target_configs={"type": "my_type", "threads": 4, "schema": "my_schema"},
)
def test_find_valid_profiles_dir_default_env(
self, tmp_path, mock_open_process, mock_shell_process, monkeypatch
):
monkeypatch.setenv("DBT_PROFILES_DIR", str(tmp_path))
(tmp_path / "profiles.yml").write_text("test")
DbtCoreOperation(commands=["dbt debug"]).run()
actual = str(mock_open_process.call_args_list[0][1]["env"]["DBT_PROFILES_DIR"])
expected = str(tmp_path)
assert actual == expected
def test_find_valid_profiles_dir_input_env(
self, tmp_path, mock_open_process, mock_shell_process
):
(tmp_path / "profiles.yml").write_text("test")
DbtCoreOperation(
commands=["dbt debug"], env={"DBT_PROFILES_DIR": str(tmp_path)}
).run()
actual = str(mock_open_process.call_args_list[0][1]["env"]["DBT_PROFILES_DIR"])
expected = str(tmp_path)
assert actual == expected
def test_find_valid_profiles_dir_overwrite_without_profile(
self, tmp_path, mock_open_process, mock_shell_process
):
with pytest.raises(ValueError, match="Since overwrite_profiles is True"):
DbtCoreOperation(
commands=["dbt debug"], profiles_dir=tmp_path, overwrite_profiles=True
).run()
def test_find_valid_profiles_dir_overwrite_with_profile(
self, tmp_path, dbt_cli_profile, mock_open_process, mock_shell_process
):
DbtCoreOperation(
commands=["dbt debug"],
profiles_dir=tmp_path,
overwrite_profiles=True,
dbt_cli_profile=dbt_cli_profile,
).run()
assert (tmp_path / "profiles.yml").exists()
def test_find_valid_profiles_dir_not_overwrite_with_profile(
self, tmp_path, dbt_cli_profile, mock_open_process, mock_shell_process
):
(tmp_path / "profiles.yml").write_text("test")
with pytest.raises(ValueError, match="Since overwrite_profiles is False"):
DbtCoreOperation(
commands=["dbt debug"],
profiles_dir=tmp_path,
overwrite_profiles=False,
dbt_cli_profile=dbt_cli_profile,
).run()
def test_find_valid_profiles_dir_path_without_profile(self):
with pytest.raises(ValueError, match="Since overwrite_profiles is True"):
DbtCoreOperation(commands=["dbt debug"], profiles_dir=Path("fake")).run()
def test_append_dirs_to_commands(
self,
tmp_path,
dbt_cli_profile,
mock_open_process,
mock_shell_process,
monkeypatch,
):
mock_named_temporary_file = MagicMock(name="tempfile")
monkeypatch.setattr("tempfile.NamedTemporaryFile", mock_named_temporary_file)
try:
with DbtCoreOperation(
commands=["dbt debug"],
profiles_dir=tmp_path,
project_dir=tmp_path,
dbt_cli_profile=dbt_cli_profile,
) as op:
op.run()
except (FileNotFoundError, TypeError): # py37 raises TypeError
pass # we're mocking the tempfile; this is expected
mock_write = mock_named_temporary_file.return_value.write
assert (
mock_write.call_args_list[0][0][0]
== f'dbt debug --profiles-dir "{tmp_path}" --project-dir "{tmp_path}"'.encode()
)
@pytest.mark.usefixtures("dbt_runner_freshness_success")
def test_sync_dbt_cli_command_creates_artifact(
profiles_dir: str, dbt_cli_profile: Any
) -> None:
@flow
def test_flow() -> None:
trigger_dbt_cli_command(
command="dbt source freshness",
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile,
summary_artifact_key="foo",
create_summary_artifact=True,
)
test_flow()
assert (a := Artifact.get(key="foo"))
assert a.type == "markdown"
assert isinstance(a.data, str) and a.data.startswith(
"# dbt source freshness Task Summary"
)
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_model_result")
async def test_run_dbt_build_creates_artifact(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_build(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt build Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_model_result")
async def test_run_dbt_test_creates_artifact(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_test(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt test Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_model_result")
async def test_run_dbt_snapshot_creates_artifact(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_snapshot(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt snapshot Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_model_result")
async def test_run_dbt_seed_creates_artifact(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_seed(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt seed Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_model_result")
async def test_run_dbt_model_creates_artifact(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_model(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt run Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_freshness_success")
async def test_run_dbt_source_freshness_creates_artifact(
profiles_dir, dbt_cli_profile_bare
):
@flow
async def test_flow():
return await run_dbt_source_freshness(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt source freshness Task Summary")
assert "my_first_dbt_model" in a.data
assert "Successful Nodes" in a.data
@pytest.fixture
def dbt_runner_model_error(monkeypatch, mock_dbt_runner_model_error):
_mock_dbt_runner_invoke_error = MagicMock(return_value=mock_dbt_runner_model_error)
monkeypatch.setattr("dbt.cli.main.dbtRunner.invoke", _mock_dbt_runner_invoke_error)
@pytest.mark.usefixtures("dbt_runner_model_error")
async def test_run_dbt_model_creates_unsuccessful_artifact(
profiles_dir, dbt_cli_profile_bare
):
@flow
async def test_flow():
return await run_dbt_model(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
with pytest.raises(
Exception, match="dbt task result success: False with exception: None"
):
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt run Task Summary")
assert "my_first_dbt_model" in a.data
assert "Unsuccessful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_freshness_error")
async def test_run_dbt_source_freshness_creates_unsuccessful_artifact(
profiles_dir, dbt_cli_profile_bare
):
@flow
async def test_flow():
return await run_dbt_source_freshness(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
with pytest.raises(
Exception, match="dbt task result success: False with exception: None"
):
await test_flow()
assert (a := await Artifact.get(key="foo"))
assert a.type == "markdown"
assert a.data.startswith("# dbt source freshness Task Summary")
assert "my_first_dbt_model" in a.data
assert "Unsuccessful Nodes" in a.data
@pytest.mark.usefixtures("dbt_runner_failed_result")
async def test_run_dbt_model_throws_error(profiles_dir, dbt_cli_profile_bare):
@flow
async def test_flow():
return await run_dbt_model(
profiles_dir=profiles_dir,
dbt_cli_profile=dbt_cli_profile_bare,
summary_artifact_key="foo",
create_summary_artifact=True,
)
with pytest.raises(DbtUsageException, match="No such command 'weeeeeee'."):
await test_flow()
| TestDbtCoreOperation |
python | pydata__xarray | xarray/namedarray/_typing.py | {
"start": 3210,
"end": 5057
} | class ____(
_array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co]
):
"""
Duck array supporting NEP 18.
Corresponds to np.ndarray.
"""
@overload
def __getitem__(
self, key: _arrayfunction[Any, Any] | tuple[_arrayfunction[Any, Any], ...], /
) -> _arrayfunction[Any, _DType_co]: ...
@overload
def __getitem__(self, key: _IndexKeyLike, /) -> Any: ...
def __getitem__(
self,
key: (
_IndexKeyLike
| _arrayfunction[Any, Any]
| tuple[_arrayfunction[Any, Any], ...]
),
/,
) -> _arrayfunction[Any, _DType_co] | Any: ...
@overload
def __array__(
self, dtype: None = ..., /, *, copy: bool | None = ...
) -> np.ndarray[Any, _DType_co]: ...
@overload
def __array__(
self, dtype: _DType, /, *, copy: bool | None = ...
) -> np.ndarray[Any, _DType]: ...
def __array__(
self, dtype: _DType | None = ..., /, *, copy: bool | None = ...
) -> np.ndarray[Any, _DType] | np.ndarray[Any, _DType_co]: ...
# TODO: Should return the same subclass but with a new dtype generic.
# https://github.com/python/typing/issues/548
def __array_ufunc__(
self,
ufunc: Any,
method: Any,
*inputs: Any,
**kwargs: Any,
) -> Any: ...
# TODO: Should return the same subclass but with a new dtype generic.
# https://github.com/python/typing/issues/548
def __array_function__(
self,
func: Callable[..., Any],
types: Iterable[type],
args: Iterable[Any],
kwargs: Mapping[str, Any],
) -> Any: ...
@property
def imag(self) -> _arrayfunction[_ShapeType_co, Any]: ...
@property
def real(self) -> _arrayfunction[_ShapeType_co, Any]: ...
@runtime_checkable
| _arrayfunction |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 470,
"end": 2031
} | class ____(unittest.TestCase):
def test_unbiased(self):
options = [1, 2, 3]
d = fuzzy.FuzzyChoice(options)
res = utils.evaluate_declaration(d)
self.assertIn(res, options)
def test_mock(self):
options = [1, 2, 3]
fake_choice = lambda d: sum(d)
d = fuzzy.FuzzyChoice(options)
with mock.patch('factory.random.randgen.choice', fake_choice):
res = utils.evaluate_declaration(d)
self.assertEqual(6, res)
def test_generator(self):
def options():
yield from range(3)
d = fuzzy.FuzzyChoice(options())
res = utils.evaluate_declaration(d)
self.assertIn(res, [0, 1, 2])
# And repeat
res = utils.evaluate_declaration(d)
self.assertIn(res, [0, 1, 2])
def test_lazy_generator(self):
class Gen:
def __init__(self, options):
self.options = options
self.unrolled = False
def __iter__(self):
self.unrolled = True
return iter(self.options)
opts = Gen([1, 2, 3])
d = fuzzy.FuzzyChoice(opts)
self.assertFalse(opts.unrolled)
res = utils.evaluate_declaration(d)
self.assertIn(res, [1, 2, 3])
self.assertTrue(opts.unrolled)
def test_getter(self):
options = [('a', 1), ('b', 2), ('c', 3)]
d = fuzzy.FuzzyChoice(options, getter=lambda x: x[1])
res = utils.evaluate_declaration(d)
self.assertIn(res, [1, 2, 3])
| FuzzyChoiceTestCase |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 6914,
"end": 7065
} | class ____(OAuth2Error):
"""
The resource owner or authorization server denied the request.
"""
error = 'access_denied'
| AccessDeniedError |
python | pytest-dev__pytest | src/_pytest/mark/structures.py | {
"start": 19333,
"end": 22437
} | class ____:
"""Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance.
Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
applies a 'slowtest' :class:`Mark` on ``test_function``.
"""
# See TYPE_CHECKING above.
if TYPE_CHECKING:
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
def __init__(self, *, _ispytest: bool = False) -> None:
check_ispytest(_ispytest)
self._config: Config | None = None
self._markers: set[str] = set()
def __getattr__(self, name: str) -> MarkDecorator:
"""Generate a new :class:`MarkDecorator` with the given name."""
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if self._config is not None:
# We store a set of markers as a performance optimisation - if a mark
# name is in the set we definitely know it, but a mark may be known and
# not in the set. We therefore start by updating the set!
if name not in self._markers:
for line in self._config.getini("markers"):
# example lines: "skipif(condition): skip the given test if..."
# or "hypothesis: tests which use Hypothesis", so to get the
# marker name we split on both `:` and `(`.
marker = line.split(":")[0].split("(")[0].strip()
self._markers.add(marker)
# If the name is not in the set of known marks after updating,
# then it really is time to issue a warning or an error.
if name not in self._markers:
# Raise a specific error for common misspellings of "parametrize".
if name in ["parameterize", "parametrise", "parameterise"]:
__tracebackhide__ = True
fail(f"Unknown '{name}' mark, did you mean 'parametrize'?")
strict_markers = self._config.getini("strict_markers")
if strict_markers is None:
strict_markers = self._config.getini("strict")
if strict_markers:
fail(
f"{name!r} not found in `markers` configuration option",
pytrace=False,
)
warnings.warn(
f"Unknown pytest.mark.{name} - is this a typo? You can register "
"custom marks to avoid this warning - for details, see "
"https://docs.pytest.org/en/stable/how-to/mark.html",
PytestUnknownMarkWarning,
2,
)
return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True)
MARK_GEN = MarkGenerator(_ispytest=True)
@final
| MarkGenerator |
python | kamyu104__LeetCode-Solutions | Python/generate-tag-for-video-caption.py | {
"start": 38,
"end": 604
} | class ____(object):
def generateTag(self, caption):
"""
:type caption: str
:rtype: str
"""
L = 100
result = ['#']
for i in xrange(len(caption)):
if caption[i] == ' ':
continue
result.append(caption[i].upper() if i == 0 or caption[i-1] == ' ' else caption[i].lower())
if len(result) == L:
break
if 1 < len(result):
result[1] = result[1].lower()
return "".join(result)
# Time: O(n)
# Space: O(n)
# string
| Solution |
python | pytest-dev__pytest | testing/python/fixtures.py | {
"start": 124075,
"end": 130611
} | class ____:
def test_call_from_fixture(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_call_from_fixture="""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
@pytest.fixture
def get_named_fixture(request):
return request.getfixturevalue('fix_with_param')
def test_foo(request, get_named_fixture):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_call_from_fixture.py::test_foo",
"Requested fixture 'fix_with_param' defined in:",
"test_call_from_fixture.py:4",
"Requested here:",
"test_call_from_fixture.py:9",
"*1 error in*",
]
)
def test_call_from_test(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_call_from_test="""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_call_from_test.py::test_foo",
"Requested fixture 'fix_with_param' defined in:",
"test_call_from_test.py:4",
"Requested here:",
"test_call_from_test.py:8",
"*1 failed*",
]
)
def test_external_fixture(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
"""
)
pytester.makepyfile(
test_external_fixture="""
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_external_fixture.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
"conftest.py:4",
"Requested here:",
"test_external_fixture.py:2",
"*1 failed*",
]
)
def test_non_relative_path(self, pytester: Pytester) -> None:
tests_dir = pytester.mkdir("tests")
fixdir = pytester.mkdir("fixtures")
fixfile = fixdir.joinpath("fix.py")
fixfile.write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
"""
),
encoding="utf-8",
)
testfile = tests_dir.joinpath("test_foos.py")
testfile.write_text(
textwrap.dedent(
"""\
from fix import fix_with_param
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
),
encoding="utf-8",
)
os.chdir(tests_dir)
pytester.syspathinsert(fixdir)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
f"{fixfile}:4",
"Requested here:",
"test_foos.py:4",
"*1 failed*",
]
)
# With non-overlapping rootdir, passing tests_dir.
rootdir = pytester.mkdir("rootdir")
os.chdir(rootdir)
result = pytester.runpytest("--rootdir", rootdir, tests_dir)
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
f"{fixfile}:4",
"Requested here:",
f"{testfile}:4",
"*1 failed*",
]
)
def test_pytest_fixture_setup_and_post_finalizer_hook(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_fixture_setup(fixturedef, request):
print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
def pytest_fixture_post_finalizer(fixturedef, request):
print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
"""
)
pytester.makepyfile(
**{
"tests/conftest.py": """
def pytest_fixture_setup(fixturedef, request):
print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
def pytest_fixture_post_finalizer(fixturedef, request):
print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
""",
"tests/test_hooks.py": """
import pytest
@pytest.fixture()
def my_fixture():
return 'some'
def test_func(my_fixture):
print('TEST test_func')
assert my_fixture == 'some'
""",
}
)
result = pytester.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*TESTS setup hook called for my_fixture from test_func*",
"*ROOT setup hook called for my_fixture from test_func*",
"*TEST test_func*",
"*TESTS finalizer hook called for my_fixture from test_func*",
"*ROOT finalizer hook called for my_fixture from test_func*",
]
)
| TestParameterizedSubRequest |
python | walkccc__LeetCode | solutions/2730. Find the Longest Semi-Repetitive Substring/2730.py | {
"start": 0,
"end": 306
} | class ____:
def longestSemiRepetitiveSubstring(self, s: str) -> int:
ans = 1
prevStart = 0
start = 0
for i in range(1, len(s)):
if s[i] == s[i - 1]:
if prevStart > 0:
start = prevStart
prevStart = i
ans = max(ans, i - start + 1)
return ans
| Solution |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_wmt.py | {
"start": 6488,
"end": 7685
} | class ____(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self,
inputs,
encoder_mask=None):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
encoder_mask: encoder self-attention mask.
Returns:
output after transformer encoder block.
"""
config = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=config.dtype)(inputs)
x = nn.SelfAttention(
num_heads=config.num_heads,
dtype=config.dtype,
qkv_features=config.qkv_dim,
kernel_init=config.kernel_init,
bias_init=config.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=config.attention_dropout_rate,
deterministic=config.deterministic)(x, encoder_mask)
x = nn.Dropout(rate=config.dropout_rate)(
x, deterministic=config.deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=config.dtype)(x)
y = MlpBlock(config=config)(y)
return x + y
| Encoder1DBlock |
python | giampaolo__psutil | tests/test_contracts.py | {
"start": 7186,
"end": 12214
} | class ____(PsutilTestCase):
"""Check the return types of system related APIs.
https://github.com/giampaolo/psutil/issues/1039.
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def assert_ntuple_of_nums(self, nt, type_=float, gezero=True):
assert is_namedtuple(nt)
for n in nt:
assert isinstance(n, type_)
if gezero:
assert n >= 0
def test_cpu_times(self):
self.assert_ntuple_of_nums(psutil.cpu_times())
for nt in psutil.cpu_times(percpu=True):
self.assert_ntuple_of_nums(nt)
def test_cpu_percent(self):
assert isinstance(psutil.cpu_percent(interval=None), float)
assert isinstance(psutil.cpu_percent(interval=0.00001), float)
def test_cpu_times_percent(self):
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=None))
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=0.0001))
def test_cpu_count(self):
assert isinstance(psutil.cpu_count(), int)
# TODO: remove this once 1892 is fixed
@pytest.mark.skipif(MACOS and AARCH64, reason="skipped due to #1892")
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
def test_cpu_freq(self):
if psutil.cpu_freq() is None:
return pytest.skip("cpu_freq() returns None")
self.assert_ntuple_of_nums(psutil.cpu_freq(), type_=(float, int))
def test_disk_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for k, v in psutil.disk_io_counters(perdisk=True).items():
assert isinstance(k, str)
self.assert_ntuple_of_nums(v, type_=int)
def test_disk_partitions(self):
# Duplicate of test_system.py. Keep it anyway.
for disk in psutil.disk_partitions():
assert isinstance(disk.device, str)
assert isinstance(disk.mountpoint, str)
assert isinstance(disk.fstype, str)
assert isinstance(disk.opts, str)
@pytest.mark.skipif(SKIP_SYSCONS, reason="requires root")
def test_net_connections(self):
with create_sockets():
ret = psutil.net_connections('all')
assert len(ret) == len(set(ret))
for conn in ret:
assert is_namedtuple(conn)
def test_net_if_addrs(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, addrs in psutil.net_if_addrs().items():
assert isinstance(ifname, str)
for addr in addrs:
assert isinstance(addr.family, enum.IntEnum)
assert isinstance(addr.address, str)
assert isinstance(addr.netmask, (str, type(None)))
assert isinstance(addr.broadcast, (str, type(None)))
def test_net_if_stats(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, info in psutil.net_if_stats().items():
assert isinstance(ifname, str)
assert isinstance(info.isup, bool)
assert isinstance(info.duplex, enum.IntEnum)
assert isinstance(info.speed, int)
assert isinstance(info.mtu, int)
@pytest.mark.skipif(not HAS_NET_IO_COUNTERS, reason="not supported")
def test_net_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname in psutil.net_io_counters(pernic=True):
assert isinstance(ifname, str)
@pytest.mark.skipif(not HAS_SENSORS_FANS, reason="not supported")
def test_sensors_fans(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_fans().items():
assert isinstance(name, str)
for unit in units:
assert isinstance(unit.label, str)
assert isinstance(unit.current, (float, int, type(None)))
@pytest.mark.skipif(not HAS_SENSORS_TEMPERATURES, reason="not supported")
def test_sensors_temperatures(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_temperatures().items():
assert isinstance(name, str)
for unit in units:
assert isinstance(unit.label, str)
assert isinstance(unit.current, (float, int, type(None)))
assert isinstance(unit.high, (float, int, type(None)))
assert isinstance(unit.critical, (float, int, type(None)))
def test_boot_time(self):
# Duplicate of test_system.py. Keep it anyway.
assert isinstance(psutil.boot_time(), float)
def test_users(self):
# Duplicate of test_system.py. Keep it anyway.
for user in psutil.users():
assert isinstance(user.name, str)
assert isinstance(user.terminal, (str, type(None)))
assert isinstance(user.host, (str, type(None)))
assert isinstance(user.pid, (int, type(None)))
if isinstance(user.pid, int):
assert user.pid > 0
| TestSystemAPITypes |
python | google__jax | tests/tree_util_test.py | {
"start": 2191,
"end": 2668
} | class ____:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __hash__(self):
return hash((self.x, self.y, self.z))
def __repr__(self):
return f"AnObject({self.x},{self.y},{self.z})"
tree_util.register_pytree_node(AnObject, lambda o: ((o.x, o.y), o.z),
lambda z, xy: AnObject(xy[0], xy[1], z))
| AnObject |
python | huggingface__transformers | tests/models/mixtral/test_modeling_mixtral.py | {
"start": 1241,
"end": 4080
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = MixtralModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Mixtral flash attention does not support right padding")
# Ignore copy
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 8
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MixtralForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
@require_torch
| MixtralModelTest |
python | numpy__numpy | numpy/matrixlib/tests/test_regression.py | {
"start": 84,
"end": 934
} | class ____:
def test_kron_matrix(self):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
assert_equal(type(np.kron(x, x)), type(x))
def test_matrix_properties(self):
# Ticket #125
a = np.matrix([1.0], dtype=float)
assert_(type(a.real) is np.matrix)
assert_(type(a.imag) is np.matrix)
c, d = np.matrix([0.0]).nonzero()
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
def test_matrix_multiply_by_1d_vector(self):
# Ticket #473
def mul():
np.asmatrix(np.eye(2)) * np.ones(2)
assert_raises(ValueError, mul)
def test_matrix_std_argmax(self):
# Ticket #83
x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
assert_equal(x.std().shape, ())
assert_equal(x.argmax().shape, ())
| TestRegression |
python | kamyu104__LeetCode-Solutions | Python/permutations-iv.py | {
"start": 824,
"end": 1602
} | class ____(object):
def permute(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
result = []
fact = [1]*(((n-1)+1)//2+1)
for i in xrange(len(fact)-1):
fact[i+1] = fact[i]*(i+1)
lookup = [False]*n
for i in xrange(n):
cnt = fact[(n-1-i)//2]*fact[((n-1-i)+1)//2]
for j in xrange(n):
if not (not lookup[j] and ((i == 0 and n%2 == 0) or (j+1)%2 == (1 if not result else (result[-1]%2)^1))):
continue
if k <= cnt:
break
k -= cnt
else:
return []
lookup[j] = True
result.append(j+1)
return result
| Solution2 |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 76276,
"end": 79859
} | class ____(IndexingAdapter):
"""Wrap a CoordinateTransform as a lazy coordinate array.
Supports explicit indexing (both outer and vectorized).
"""
_transform: CoordinateTransform
_coord_name: Hashable
_dims: tuple[str, ...]
def __init__(
self,
transform: CoordinateTransform,
coord_name: Hashable,
dims: tuple[str, ...] | None = None,
):
self._transform = transform
self._coord_name = coord_name
self._dims = dims or transform.dims
@property
def dtype(self) -> np.dtype:
return self._transform.dtype
@property
def shape(self) -> tuple[int, ...]:
return tuple(self._transform.dim_size.values())
@property
def _in_memory(self) -> bool:
return False
def get_duck_array(self) -> np.ndarray:
all_coords = self._transform.generate_coords(dims=self._dims)
return np.asarray(all_coords[self._coord_name])
def _oindex_get(self, indexer: OuterIndexer):
expanded_indexer_ = OuterIndexer(expanded_indexer(indexer.tuple, self.ndim))
array_indexer = _arrayize_outer_indexer(expanded_indexer_, self.shape)
positions = np.meshgrid(*array_indexer.tuple, indexing="ij")
dim_positions = dict(zip(self._dims, positions, strict=False))
result = self._transform.forward(dim_positions)
return np.asarray(result[self._coord_name]).squeeze()
def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None:
raise TypeError(
"setting values is not supported on coordinate transform arrays."
)
def _vindex_get(self, indexer: VectorizedIndexer):
expanded_indexer_ = VectorizedIndexer(
expanded_indexer(indexer.tuple, self.ndim)
)
array_indexer = _arrayize_vectorized_indexer(expanded_indexer_, self.shape)
dim_positions = {}
for i, (dim, pos) in enumerate(
zip(self._dims, array_indexer.tuple, strict=False)
):
pos = _posify_indices(pos, self.shape[i])
_check_bounds(pos, self.shape[i])
dim_positions[dim] = pos
result = self._transform.forward(dim_positions)
return np.asarray(result[self._coord_name])
def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None:
raise TypeError(
"setting values is not supported on coordinate transform arrays."
)
def __getitem__(self, indexer: ExplicitIndexer):
# TODO: make it lazy (i.e., re-calculate and re-wrap the transform) when possible?
self._check_and_raise_if_non_basic_indexer(indexer)
# also works with basic indexing
return self._oindex_get(OuterIndexer(indexer.tuple))
def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None:
raise TypeError(
"setting values is not supported on coordinate transform arrays."
)
def transpose(self, order: Iterable[int]) -> Self:
new_dims = tuple(self._dims[i] for i in order)
return type(self)(self._transform, self._coord_name, new_dims)
def __repr__(self: Any) -> str:
return f"{type(self).__name__}(transform={self._transform!r})"
def _repr_inline_(self, max_width: int) -> str:
# we want to display values in the inline repr for this lazy coordinate
# `format_array_flat` prevents loading the whole array in memory.
from xarray.core.formatting import format_array_flat
return format_array_flat(self, max_width)
| CoordinateTransformIndexingAdapter |
python | mlflow__mlflow | mlflow/genai/datasets/evaluation_dataset.py | {
"start": 421,
"end": 10962
} | class ____(Dataset, PyFuncConvertibleDatasetMixin):
"""
The public API for evaluation datasets in MLflow's GenAI module.
This class provides a unified interface for evaluation datasets, supporting both:
- Standard MLflow evaluation datasets (backed by MLflow's tracking store)
- Databricks managed datasets (backed by Unity Catalog tables) through the
databricks-agents library
"""
def __init__(self, dataset):
"""
Initialize the wrapper with either a managed dataset or an MLflow dataset.
Args:
dataset: Either a Databricks managed dataset (databricks.agents.datasets.Dataset)
or an MLflow EvaluationDataset entity
(mlflow.entities.evaluation_dataset.EvaluationDataset).
The type is determined at runtime.
"""
if isinstance(dataset, _EntityEvaluationDataset):
self._databricks_dataset = None
self._mlflow_dataset = dataset
else:
self._databricks_dataset = dataset
self._mlflow_dataset = None
self._df = None
def __eq__(self, other):
"""Check equality with another dataset."""
if isinstance(other, _EntityEvaluationDataset) and self._mlflow_dataset:
return self._mlflow_dataset == other
if isinstance(other, EvaluationDataset):
if self._mlflow_dataset and other._mlflow_dataset:
return self._mlflow_dataset == other._mlflow_dataset
if self._databricks_dataset and other._databricks_dataset:
return self._databricks_dataset == other._databricks_dataset
return False
def __setattr__(self, name, value):
"""Allow setting internal attributes on the wrapped dataset."""
object.__setattr__(self, name, value)
if name == "_records" and hasattr(self, "_mlflow_dataset") and self._mlflow_dataset:
self._mlflow_dataset._records = value
def __getattr__(self, name):
"""
Dynamic attribute delegation for simple pass-through properties.
This handles attributes that don't require special logic and can be
directly delegated to the underlying dataset implementation.
"""
if name.startswith("_"):
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
if self._mlflow_dataset and hasattr(self._mlflow_dataset, name):
return getattr(self._mlflow_dataset, name)
elif self._databricks_dataset and hasattr(self._databricks_dataset, name):
return getattr(self._databricks_dataset, name)
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
@property
def digest(self) -> str | None:
"""String digest (hash) of the dataset provided by the caller that uniquely identifies"""
if self._mlflow_dataset:
return self._mlflow_dataset.digest
if self._databricks_dataset.digest is None:
from mlflow.data.digest_utils import compute_pandas_digest
return compute_pandas_digest(self.to_df())
return self._databricks_dataset.digest
@property
def name(self) -> str:
"""The name of the dataset."""
if self._mlflow_dataset:
return self._mlflow_dataset.name
return self._databricks_dataset.name if self._databricks_dataset else None
@property
def dataset_id(self) -> str:
"""The unique identifier of the dataset."""
if self._mlflow_dataset:
return self._mlflow_dataset.dataset_id
return self._databricks_dataset.dataset_id if self._databricks_dataset else None
@property
def source(self):
"""Source information for the dataset."""
if self._mlflow_dataset:
return self._mlflow_dataset.source
return DatabricksEvaluationDatasetSource(table_name=self.name, dataset_id=self.dataset_id)
@property
def source_type(self) -> str | None:
"""The type of the dataset source."""
if self._mlflow_dataset:
return self._mlflow_dataset.source._get_source_type()
return self._databricks_dataset.source_type
@property
def created_time(self) -> int | str | None:
"""The time the dataset was created."""
if self._mlflow_dataset:
return self._mlflow_dataset.created_time
return self._databricks_dataset.create_time
@property
def create_time(self) -> int | str | None:
"""Alias for created_time (for backward compatibility with managed datasets)."""
return self.created_time
@property
def tags(self) -> dict[str, Any] | None:
"""The tags for the dataset (MLflow only)."""
if self._mlflow_dataset:
return self._mlflow_dataset.tags
raise NotImplementedError(
"Tags are not available for Databricks managed datasets. "
"Tags are managed through Unity Catalog. Use Unity Catalog APIs to manage dataset tags."
)
@property
def experiment_ids(self) -> list[str]:
"""The experiment IDs associated with the dataset (MLflow only)."""
if self._mlflow_dataset:
return self._mlflow_dataset.experiment_ids
raise NotImplementedError(
"Experiment associations are not available for Databricks managed datasets. "
"Dataset associations are managed through Unity Catalog."
)
@property
def records(self):
"""The records in the dataset (MLflow only)."""
if self._mlflow_dataset:
return self._mlflow_dataset.records
raise NotImplementedError("Records access is not supported for Databricks managed datasets")
@property
def schema(self) -> str | None:
"""The schema of the dataset."""
if self._mlflow_dataset:
return self._mlflow_dataset.schema
return self._databricks_dataset.schema if self._databricks_dataset else None
@property
def profile(self) -> str | None:
"""The profile of the dataset."""
if self._mlflow_dataset:
return self._mlflow_dataset.profile
return self._databricks_dataset.profile if self._databricks_dataset else None
def set_profile(self, profile: str) -> "EvaluationDataset":
"""Set the profile of the dataset."""
if self._mlflow_dataset:
self._mlflow_dataset._profile = profile
return self
dataset = self._databricks_dataset.set_profile(profile)
return EvaluationDataset(dataset)
def merge_records(
self,
records: "list[dict[str, Any]] | pd.DataFrame | pyspark.sql.DataFrame",
) -> "EvaluationDataset":
"""Merge records into the dataset."""
if self._mlflow_dataset:
self._mlflow_dataset.merge_records(records)
return self
from mlflow.genai.datasets import _databricks_profile_env
with _databricks_profile_env():
dataset = self._databricks_dataset.merge_records(records)
return EvaluationDataset(dataset)
def to_df(self) -> "pd.DataFrame":
"""Convert the dataset to a pandas DataFrame."""
if self._mlflow_dataset:
return self._mlflow_dataset.to_df()
if self._df is None:
from mlflow.genai.datasets import _databricks_profile_env
with _databricks_profile_env():
self._df = self._databricks_dataset.to_df()
return self._df
def has_records(self) -> bool:
"""Check if dataset records are loaded without triggering a load."""
if self._mlflow_dataset:
return self._mlflow_dataset.has_records()
return self._df is not None
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary representation."""
if self._mlflow_dataset:
return self._mlflow_dataset.to_dict()
raise NotImplementedError(
"Serialization to dict is not supported for Databricks managed datasets. "
"Databricks datasets are persisted in Unity Catalog tables and don't "
"require serialization."
)
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "EvaluationDataset":
"""
Create instance from dictionary representation.
Note: This creates an MLflow dataset from serialized data.
Databricks managed datasets are loaded directly from Unity Catalog, not from dict.
"""
mlflow_dataset = _EntityEvaluationDataset.from_dict(data)
return cls(mlflow_dataset)
def to_proto(self):
"""Convert to protobuf representation."""
if self._mlflow_dataset:
return self._mlflow_dataset.to_proto()
raise NotImplementedError(
"Protobuf serialization is not supported for Databricks managed datasets. "
"Databricks datasets are persisted in Unity Catalog tables and don't "
"require serialization."
)
@classmethod
def from_proto(cls, proto):
"""
Create instance from protobuf representation.
Note: This creates an MLflow dataset from serialized protobuf data.
Databricks managed datasets are loaded directly from Unity Catalog, not from protobuf.
"""
mlflow_dataset = _EntityEvaluationDataset.from_proto(proto)
return cls(mlflow_dataset)
def _to_pyfunc_dataset(self):
"""Support for PyFuncConvertibleDatasetMixin."""
return self.to_evaluation_dataset()
def to_evaluation_dataset(self, path=None, feature_names=None):
"""
Converts the dataset to the legacy EvaluationDataset for model evaluation.
Required for use with mlflow.evaluate().
"""
from mlflow.data.evaluation_dataset import EvaluationDataset as LegacyEvaluationDataset
return LegacyEvaluationDataset(
data=self.to_df(),
path=path,
feature_names=feature_names,
name=self.name,
digest=self.digest,
)
def _to_mlflow_entity(self):
"""Convert to MLflow Dataset entity for logging."""
from mlflow.entities import Dataset as DatasetEntity
return DatasetEntity(
name=self.name,
digest=self.digest,
source_type=self.source_type,
source=self.source.to_json(),
schema=self.schema,
profile=self.profile,
)
| EvaluationDataset |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 308,
"end": 722
} | class ____:
def __init__(self, one=None, two=None, three=None, four=None, five=None):
self.one = one
self.two = two
self.three = three
self.four = four
self.five = five
def as_dict(self):
return dict(
one=self.one,
two=self.two,
three=self.three,
four=self.four,
five=self.five,
)
| TestObject |
python | getsentry__sentry | src/sentry/integrations/source_code_management/commit_context.py | {
"start": 2752,
"end": 2825
} | class ____(SourceLineInfo):
commit: CommitInfo
@dataclass
| FileBlameInfo |
python | pytorch__pytorch | torch/export/_unlift.py | {
"start": 16644,
"end": 17121
} | class ____(type):
"""
Metaclass that ensures a private constructor for _StatefulGraphModule
"""
def __call__(cls, *args, **kwargs):
raise TypeError(
f"{cls.__module__}.{cls.__qualname__} has no public constructor. "
)
def _create(cls, root, graph, range_constraints=None):
return super().__call__(
root,
graph,
range_constraints=range_constraints,
)
| _StatefulGraphModuleFactory |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 14234,
"end": 15782
} | class ____(Benchmark):
r"""
Mishra 8 objective function.
This class defines the Mishra 8 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra08}}(x) = 0.001 \left[\lvert x_1^{10} - 20x_1^9
+ 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3
+ 11520x_1^2 - 5120x_1 + 2624 \rvert \lvert x_2^4 + 12x_2^3 + 54x_2^2
+ 108x_2 + 81 \rvert \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1065
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]
self.global_optimum = [[2.0, -3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = abs(x[0] ** 10 - 20 * x[0] ** 9 + 180 * x[0] ** 8
- 960 * x[0] ** 7 + 3360 * x[0] ** 6 - 8064 * x[0] ** 5
+ 13340 * x[0] ** 4 - 15360 * x[0] ** 3 + 11520 * x[0] ** 2
- 5120 * x[0] + 2624)
val += abs(x[1] ** 4 + 12 * x[1] ** 3 +
54 * x[1] ** 2 + 108 * x[1] + 81)
return 0.001 * val ** 2
| Mishra08 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/managed_kafka.py | {
"start": 38316,
"end": 41852
} | class ____(ManagedKafkaBaseOperator):
"""
List the consumer groups in a given cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose consumer groups are to be listed.
:param page_size: Optional. The maximum number of consumer groups to return. The service may return
fewer than this value. If unset or zero, all consumer groups for the parent is returned.
:param page_token: Optional. A page token, received from a previous ``ListConsumerGroups`` call.
Provide this to retrieve the subsequent page. When paginating, all other parameters provided to
``ListConsumerGroups`` must match the call that provided the page token.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple({"cluster_id"} | set(ManagedKafkaBaseOperator.template_fields))
operator_extra_links = (ApacheKafkaClusterLink(),)
def __init__(
self,
cluster_id: str,
page_size: int | None = None,
page_token: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.cluster_id = cluster_id
self.page_size = page_size
self.page_token = page_token
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location": self.location,
"cluster_id": self.cluster_id,
"project_id": self.project_id,
}
def execute(self, context: Context):
ApacheKafkaClusterLink.persist(context=context)
self.log.info("Listing Consumer Groups for cluster %s.", self.cluster_id)
try:
consumer_group_list_pager = self.hook.list_consumer_groups(
project_id=self.project_id,
location=self.location,
cluster_id=self.cluster_id,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
context["ti"].xcom_push(
key="consumer_group_page",
value=types.ListConsumerGroupsResponse.to_dict(consumer_group_list_pager._response),
)
except Exception as error:
raise AirflowException(error)
return [types.ConsumerGroup.to_dict(consumer_group) for consumer_group in consumer_group_list_pager]
| ManagedKafkaListConsumerGroupsOperator |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 2354,
"end": 4205
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def exchange(
self, algorithm: ECDH, peer_public_key: EllipticCurvePublicKey
) -> bytes:
"""
Performs a key exchange operation using the provided algorithm with the
provided peer's public key.
"""
@abc.abstractmethod
def public_key(self) -> EllipticCurvePublicKey:
"""
The EllipticCurvePublicKey for this private key.
"""
@property
@abc.abstractmethod
def curve(self) -> EllipticCurve:
"""
The EllipticCurve that this key is on.
"""
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
Bit size of a secret scalar for the curve.
"""
@abc.abstractmethod
def sign(
self,
data: utils.Buffer,
signature_algorithm: EllipticCurveSignatureAlgorithm,
) -> bytes:
"""
Signs the data
"""
@abc.abstractmethod
def private_numbers(self) -> EllipticCurvePrivateNumbers:
"""
Returns an EllipticCurvePrivateNumbers.
"""
@abc.abstractmethod
def private_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PrivateFormat,
encryption_algorithm: _serialization.KeySerializationEncryption,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def __copy__(self) -> EllipticCurvePrivateKey:
"""
Returns a copy.
"""
@abc.abstractmethod
def __deepcopy__(self, memo: dict) -> EllipticCurvePrivateKey:
"""
Returns a deep copy.
"""
EllipticCurvePrivateKeyWithSerialization = EllipticCurvePrivateKey
EllipticCurvePrivateKey.register(rust_openssl.ec.ECPrivateKey)
| EllipticCurvePrivateKey |
python | jazzband__django-model-utils | tests/models.py | {
"start": 2415,
"end": 2644
} | class ____(InheritanceManagerTestParent):
parent_ptr = models.OneToOneField(
InheritanceManagerTestParent, related_name='manual_onetoone',
parent_link=True, on_delete=models.CASCADE)
| InheritanceManagerTestChild3 |
python | astropy__astropy | astropy/coordinates/transformations/affine.py | {
"start": 12590,
"end": 14186
} | class ____(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
| DynamicMatrixTransform |
python | doocs__leetcode | lcof2/剑指 Offer II 103. 最少的硬币数目/Solution.py | {
"start": 0,
"end": 314
} | class ____:
def coinChange(self, coins: List[int], amount: int) -> int:
dp = [amount + 1] * (amount + 1)
dp[0] = 0
for coin in coins:
for j in range(coin, amount + 1):
dp[j] = min(dp[j], dp[j - coin] + 1)
return -1 if dp[-1] > amount else dp[-1]
| Solution |
python | ray-project__ray | python/ray/util/tpu.py | {
"start": 1477,
"end": 9154
} | class ____:
"""
A handle to a placement group reservation for a TPU slice.
The following definitions are added for clarity:
- Accelerator type: A string describing the accelerator type and version (e.g. TPU-V2, TPU-V6E).
- Accelerator version: The accelerator generation only (e.g. v6e, v5p, v5litepod).
- Pod type: The TPU accelerator version and the number of chips in a topology. (e.g. v6e-128, v5p-8).
- Accelerator topology: The physical topology representing the structure (e.g. 2x2x2, 16x16).
Args:
topology: The TPU topology string (e.g. "2x2x2").
accelerator_version: The TPU accelerator generation (e.g. "v6e", "v5p", "v4").
strategy: PlacementGroup parameter. The strategy to create the placement group. Currently default to "SPREAD"
- "PACK": Packs Bundles into as few nodes as possible.
- "SPREAD": Places Bundles across distinct nodes as even as possible.
- "STRICT_PACK": Packs Bundles into one node. The group is
not allowed to span multiple nodes.
- "STRICT_SPREAD": Packs Bundles across distinct nodes.
lifetime: PlacementGroup parameter. Either `None`, which defaults to the placement group
will fate share with its creator and will be deleted once its
creator is dead, or "detached", which means the placement group
will live as a global object independent of the creator.
num_slices: Number of TPU slices in the SlicePlacementGroup. Defaults to 1 when unspecified.
Examples:
.. testcode:: python
:skipif: True
import ray
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from ray.util.tpu import SlicePlacementGroup
slice_handle = SlicePlacementGroup(topology="4x4", accelerator_version="v6e")
slice_pg = slice_handle.placement_group
ray.get(slice_pg.ready(), timeout=10)
@ray.remote(num_cpus=0, resources={'TPU': 4})
def spmd_task(world, rank):
print(f"Current TPU is rank {rank} of {world}")
tasks = [
spmd_task.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=slice_pg,
)
).remote(world=4, rank=i)
for i in range(slice_handle.num_workers)
]
"""
def __init__(
self,
topology: str,
accelerator_version: str,
# below are args related to PG
strategy: str = "SPREAD",
name: str = "",
lifetime: Optional[str] = None,
# default
num_slices=1,
):
self._topology = topology.strip().lower()
self._accelerator_version = accelerator_version.strip().lower()
self._num_slices = num_slices
self._validate_tpu_config()
# Reserve a TPU slice of the provided accelerator version and topology.
self._placement_group = self._reserve_slice(
strategy,
name,
lifetime,
)
def _accelerator_version_check(self, accelerator_version: str):
if accelerator_version not in VALID_TPU_TYPES:
raise ValueError(
f"Invalid accelerator version: {accelerator_version}. Must be one of: {VALID_TPU_TYPES}"
)
def _validate_tpu_config(self):
# Should validate topology and generation values, calculate and
# set self._num_workers, and self._chips_per_host, and return a
# ValueError if invalid.
self._accelerator_version_check(self.accelerator_version)
if not TPUAcceleratorManager.is_valid_tpu_accelerator_topology(
tpu_accelerator_version=self.accelerator_version,
tpu_topology=self._topology,
):
raise ValueError(
f"Invalid accelerator topology: '{self._topology}' for "
f"accelerator version: '{self.accelerator_version}'"
)
total_chips = 1
for value in self._topology.strip().lower().split("x"):
total_chips *= int(value)
self._chips_per_host = get_chips_per_host(
self._topology, self.accelerator_version
)
self._num_workers_per_slice = total_chips // self._chips_per_host
self._num_workers = self._num_workers_per_slice * self._num_slices
def _reserve_slice(
self,
strategy: str = "SPREAD",
name: str = "",
lifetime: Optional[str] = None,
) -> PlacementGroup:
"""Performs the two-step scheduling to reserve a TPU slice."""
bundle_label_selector = []
bundles = []
# Construct accelerator format for reserve_tpu_slice. e.g. From "v6e" to "TPU-V6E", "v5p" to "TPU-V5P".
accelerator_type = "TPU-" + self.accelerator_version.upper()
for _ in range(self.num_slices):
# Reserving a slice is done through constructing num_workers bundles, each with a label selector for
# the unique name of an available TPU slice.
slice_name = reserve_tpu_slice(self._topology, accelerator_type)
bundle_label_selector += [
{ray._raylet.RAY_NODE_TPU_SLICE_NAME_KEY: slice_name}
] * self._num_workers_per_slice
bundles += [{"TPU": self._chips_per_host}] * self._num_workers_per_slice
pg = placement_group(
bundles=bundles,
strategy=strategy,
name=name,
lifetime=lifetime,
bundle_label_selector=bundle_label_selector,
)
return pg
@property
def placement_group(self) -> PlacementGroup:
"""The underlying PlacementGroup object."""
return self._placement_group
@property
def chips_per_host(self) -> int:
"""The number of chips per host for this TPU slice."""
# This is the same value as resources per worker for TPU.
return self._chips_per_host
@property
def num_workers(self) -> int:
"""The total number of hosts in the SlicePlacementGroup."""
return self._num_workers
@property
def topology(self) -> str:
"""The physical topology of the TPU slice."""
return self._topology
@property
def accelerator_version(self) -> str:
"""The TPU accelerator type of the slice."""
return self._accelerator_version
@property
def num_slices(self) -> int:
"""The number of TPU slices this SlicePlacementGroup spans."""
return self._num_slices
@PublicAPI(stability="alpha")
@client_mode_wrap
def slice_placement_group(
topology: str,
accelerator_version: str,
num_slices: int = 1,
**kwargs,
) -> SlicePlacementGroup:
"""Asynchronously creates a PlacementGroup for a TPU slice.
A slice placement group reserves num_slices TPU slice(s) and creates a placement
group for scheduling tasks.
Args:
topology: The desired TPU pod topology (e.g. "4x4", "2x8").
accelerator_version: The TPU accelerator generation, (e.g. "V4", "V5P", "V6E").
num_slices: The number of tpu slices within the placement group
**kwargs: Additional arguments for the placement group, such as 'name', 'lifetime', or 'strategy'.
Returns:
The handle for the created SlicePlacementGroup.
"""
return SlicePlacementGroup(
topology=topology,
accelerator_version=accelerator_version,
num_slices=num_slices,
**kwargs,
)
| SlicePlacementGroup |
python | astropy__astropy | astropy/coordinates/tests/test_transformations.py | {
"start": 7489,
"end": 21467
} | class ____:
rep = r.CartesianRepresentation(np.arange(3) * u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3) * u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize(
"transfunc",
[
transfunc.both,
transfunc.no_matrix,
transfunc.no_pos,
transfunc.no_vel,
transfunc.just_matrix,
],
)
@pytest.mark.parametrize(
"rep",
(
CARTESIAN_POS,
CARTESIAN_POS_AND_VEL,
CARTESIAN_POS_AND_VEL.represent_as(
r.CylindricalRepresentation, r.CylindricalDifferential
),
),
)
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
expected_rep = rep.to_cartesian().with_differentials(
{
k: diff.represent_as(r.CartesianDifferential, rep)
for k, diff in rep.differentials.items()
}
)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos += offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials["s"]
if offset and offset.differentials:
expected_vel += offset.differentials["s"]
# register and do the transformation and check against expected
trans = AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert quantity_allclose(
c2.data.to_cartesian().xyz, expected_pos.to_cartesian().xyz
)
if expected_vel is not None:
diff = c2.data.differentials["s"].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize("transfunc", [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
c = TCoo1(CARTESIAN_POS_AND_VEL)
# register and do the transformation and check against expected
trans = AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif2 = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr**2)
rep = CARTESIAN_POS_AND_VEL.with_differentials(dif2)
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({"s": CARTESIAN_VEL, "s2": dif2})
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize(
"rep",
(
UNIT_SPHERICAL_POS.with_differentials(SPHERICAL_COS_LAT_VEL),
r.UnitSphericalRepresentation(
UNIT_SPHERICAL_POS, differentials={"s": RADIAL_VEL}
),
SPHERICAL_POS.with_differentials(RADIAL_VEL),
),
)
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert "s" in rep.differentials
assert isinstance(c2.data.differentials["s"], type(rep.differentials["s"]))
if isinstance(rep.differentials["s"], r.RadialDifferential):
assert c2.data.differentials["s"] is rep.differentials["s"]
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
diff = r.CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
rep = r.CartesianRepresentation([1, 2, 3] * u.au, differentials=diff)
aaf = AltAz(obstime="J2010", location=_GREENWICH)
aaf2 = AltAz(obstime=aaf.obstime + 3 * u.day, location=_GREENWICH)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3) * u.day, location=_GREENWICH)
aaf4 = AltAz(obstime=aaf.obstime, location=_GREENWICH)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert "cannot transform" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert "cannot transform" in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = FunctionTransform(tfun, TCoo3, TCoo2, register_graph=frame_transform_graph)
t3 = TCoo3(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=1 * u.marcsec / u.yr,
)
with pytest.warns(AstropyWarning, match=r".*they have been dropped.*") as w:
t3.transform_to(TCoo2())
assert len(w) == 1
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert (
"BorkedFrame" in exc.value.args[0]
and "'ra'" in exc.value.args[0]
and "'dec'" in exc.value.args[0]
)
def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z"), ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z").T, AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x"), ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x").T, BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123 * u.deg, 45 * u.deg)
c_direct = c.transform_to(BFrame())
c_through_A = c.transform_to(AFrame()).transform_to(BFrame())
c_through_ICRS = c.transform_to(ICRS()).transform_to(BFrame())
assert quantity_allclose(c_direct.lon, c_through_A.lon)
assert quantity_allclose(c_direct.lat, c_through_A.lat)
assert quantity_allclose(c_direct.lon, c_through_ICRS.lon)
assert quantity_allclose(c_direct.lat, c_through_ICRS.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph)
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = r.SphericalRepresentation
def tfun(c, f):
return f.__class__(lon=c.lon, lat=c.lat)
# Register a transform
graph = TransformGraph()
_ = FunctionTransform(
tfun, MultipleAliasesFrame, MultipleAliasesFrame, register_graph=graph
)
# Test that both aliases have been added to the transform graph
assert graph.lookup_name("alias_1") == MultipleAliasesFrame
assert graph.lookup_name("alias_2") == MultipleAliasesFrame
# Test that both aliases appear in the graphviz DOT format output
dotstr = graph.to_dot_graph()
assert "`alias_1`\\n`alias_2`" in dotstr
def test_remove_transform_and_unregister():
def tfun(c, f):
f.__class__(ra=c.ra, dec=c.dec)
# Register transforms
graph = TransformGraph()
ftrans1 = FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
ftrans2 = FunctionTransform(tfun, TCoo2, TCoo2, register_graph=graph)
_ = FunctionTransform(tfun, TCoo1, TCoo2, register_graph=graph)
# Confirm that the frames are part of the graph
assert TCoo1 in graph.frame_set
assert TCoo2 in graph.frame_set
# Use all three ways to remove a transform
# Remove the only transform with TCoo2 as the "from" frame
ftrans2.unregister(graph)
# TCoo2 should still be part of the graph because it is the "to" frame of a transform
assert TCoo2 in graph.frame_set
# Remove the remaining transform that involves TCoo2
graph.remove_transform(TCoo1, TCoo2, None)
# Now TCoo2 should not be part of the graph
assert TCoo2 not in graph.frame_set
# Remove the remaining transform that involves TCoo1
graph.remove_transform(None, None, ftrans1)
# Now TCoo1 should not be part of the graph
assert TCoo1 not in graph.frame_set
def test_remove_transform_errors():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
graph = TransformGraph()
_ = FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
# Test bad calls to remove_transform
with pytest.raises(ValueError):
graph.remove_transform(None, TCoo1, None)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, 1)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, TCoo1, 1)
def test_impose_finite_difference_dt():
class H1(HCRS):
pass
class H2(HCRS):
pass
class H3(HCRS):
pass
graph = TransformGraph()
tfun = lambda c, f: type(f)(ra=c.ra, dec=c.dec)
# Set up a number of transforms with different time steps
old_dt = 1 * u.min
transform1 = FunctionTransformWithFiniteDifference(
tfun, H1, H1, register_graph=graph, finite_difference_dt=old_dt
)
transform2 = FunctionTransformWithFiniteDifference(
tfun, H2, H2, register_graph=graph, finite_difference_dt=old_dt * 2
)
transform3 = FunctionTransformWithFiniteDifference(
tfun, H2, H3, register_graph=graph, finite_difference_dt=old_dt * 3
)
# Check that all of the transforms have the same new time step
new_dt = 1 * u.yr
with graph.impose_finite_difference_dt(new_dt):
assert transform1.finite_difference_dt == new_dt
assert transform2.finite_difference_dt == new_dt
assert transform3.finite_difference_dt == new_dt
# Check that all of the original time steps have been restored
assert transform1.finite_difference_dt == old_dt
assert transform2.finite_difference_dt == old_dt * 2
assert transform3.finite_difference_dt == old_dt * 3
@pytest.mark.parametrize(
"first,second,check",
(
([ROT_30, None], [ROT_45, None], [ROT_75, None]),
([ROT_30, None], [ROT_45, OFFSET_Z], [ROT_75, OFFSET_Z]),
([ROT_30, OFFSET_123], [None, OFFSET_456], [ROT_30, OFFSET_579]),
([None, OFFSET_123], [None, OFFSET_456], [None, OFFSET_579]),
([ROT_30, OFFSET_X], [None, None], [ROT_30, OFFSET_X]),
([None, None], [ROT_45, OFFSET_Z], [ROT_45, OFFSET_Z]),
([None, None], [None, None], [None, None]),
(
[ROT_30, OFFSET_X],
[ROT_45, None],
[ROT_75, r.CartesianRepresentation([1 / SQRT_2, -1 / SQRT_2, 0])],
),
(
[ROT_30, OFFSET_X],
[ROT_45, OFFSET_Z],
[ROT_75, r.CartesianRepresentation([1 / SQRT_2, -1 / SQRT_2, 1])],
),
(
[None, OFFSET_123],
[ROT_45, OFFSET_456],
[ROT_45, r.CartesianRepresentation([3 / SQRT_2 + 4, 1 / SQRT_2 + 5, 9])],
),
),
)
def test_combine_affine_params(first, second, check):
result = _combine_affine_params(first, second)
if check[0] is None:
assert result[0] is None
else:
assert_allclose(result[0], check[0])
if check[1] is None:
assert result[1] is None
else:
assert_allclose(result[1].xyz, check[1].xyz)
| transfunc |
python | ray-project__ray | rllib/policy/tests/test_compute_log_likelihoods.py | {
"start": 3774,
"end": 5094
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_ppo_cont(self):
"""Tests PPO's (cont. actions) compute_log_likelihoods method."""
config = (
ppo.PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.training(
model={
"fcnet_hiddens": [10],
"fcnet_activation": "linear",
}
)
.debugging(seed=42)
)
prev_a = np.array([0.0])
do_test_log_likelihood(ppo.PPO, config, prev_a, continuous=True)
def test_ppo_discr(self):
"""Tests PPO's (discr. actions) compute_log_likelihoods method."""
config = ppo.PPOConfig()
config.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
config.debugging(seed=42)
prev_a = np.array(0)
do_test_log_likelihood(ppo.PPO, config, prev_a)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestComputeLogLikelihood |
python | networkx__networkx | networkx/classes/graph.py | {
"start": 1492,
"end": 2326
} | class ____:
"""Data Descriptor class for _node that resets ``nodes`` cached_property when needed
This assumes that the ``cached_property`` ``G.node`` should be reset whenever
``G._node`` is set to a new value.
This object sits on a class and ensures that any instance of that
class clears its cached property "nodes" whenever the underlying
instance attribute "_node" is set to a new object. It only affects
the set process of the obj._adj attribute. All get/del operations
act as they normally would.
For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html
"""
def __set__(self, obj, value):
od = obj.__dict__
od["_node"] = value
# reset cached properties
if "nodes" in od:
del od["nodes"]
| _CachedPropertyResetterNode |
python | google__pytype | pytype/tools/analyze_project/config_test.py | {
"start": 3813,
"end": 6066
} | class ____(unittest.TestCase):
"""Test config.generate_sample_config_or_die."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.parser = parse_args.make_parser()
def test_bad_location(self):
with self.assertRaises(SystemExit):
config.generate_sample_config_or_die(
file_utils.replace_separator('/does/not/exist/sample.cfg'),
self.parser.pytype_single_args)
def test_existing_file(self):
with test_utils.Tempdir() as d:
f = d.create_file('sample.cfg')
with self.assertRaises(SystemExit):
config.generate_sample_config_or_die(f, self.parser.pytype_single_args)
def test_generate(self):
conf = config.FileConfig()
with test_utils.Tempdir() as d:
f = path_utils.join(d.path, 'sample.cfg')
config.generate_sample_config_or_die(f, self.parser.pytype_single_args)
# Test that we've generated a valid config and spot-check a pytype-all
# and a pytype-single argument.
conf.read_from_file(f)
with file_utils.cd(d.path):
expected_pythonpath = [
path_utils.realpath(p)
for p in config.ITEMS['pythonpath'].sample.split(os.pathsep)
]
expected_protocols = config._PYTYPE_SINGLE_ITEMS['protocols'].sample
self.assertEqual(conf.pythonpath, expected_pythonpath)
self.assertEqual(conf.protocols, expected_protocols)
self.assertEqual(conf.python_version,
'{}.{}'.format(*sys.version_info[:2]))
def test_read(self):
with test_utils.Tempdir() as d:
f = path_utils.join(d.path, 'test.cfg')
config.generate_sample_config_or_die(f, self.parser.pytype_single_args)
conf = config.read_config_file_or_die(f)
# Smoke test and spot check for string conversion and postprocessing
self.parser.convert_strings(conf)
self.parser.postprocess(conf)
self.assertIsInstance(conf.report_errors, bool)
def test_keep_going_file_default(self):
conf = config.FileConfig()
with test_utils.Tempdir() as d:
f = path_utils.join(d.path, 'sample.cfg')
config.generate_sample_config_or_die(f, self.parser.pytype_single_args)
conf.read_from_file(f)
self.assertIsInstance(conf.keep_going, bool)
| TestGenerateConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.