language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 31208,
"end": 31964
} | class ____(TestCase):
def test_basic(self):
for coefficients, x, expected in [
([1, -4, -17, 60], 2, 18),
([1, -4, -17, 60], 2.5, 8.125),
([1, -4, -17, 60], Fraction(2, 3), Fraction(1274, 27)),
([1, -4, -17, 60], Decimal('1.75'), Decimal('23.359375')),
([], 2, 0),
([], 2.5, 0.0),
([], Fraction(2, 3), Fraction(0, 1)),
([], Decimal('1.75'), Decimal('0.00')),
([11], 7, 11),
([11, 2], 7, 79),
]:
with self.subTest(x=x):
actual = mi.polynomial_eval(coefficients, x)
self.assertEqual(actual, expected)
self.assertEqual(type(actual), type(x))
| PolynomialEvalTests |
python | pypa__pip | tests/unit/test_configuration.py | {
"start": 7240,
"end": 10245
} | class ____(ConfigurationMixin):
# Tests for methods to that modify the state of a Configuration
def test_no_specific_given_modification(self) -> None:
self.configuration.load()
with pytest.raises(ConfigurationError):
self.configuration.set_value("test.hello", "10")
def test_site_modification(self) -> None:
self.configuration.load_only = kinds.SITE
self.configuration.load()
# Mock out the method
mymock = MagicMock(spec=self.configuration._mark_as_modified)
# https://github.com/python/mypy/issues/2427
self.configuration._mark_as_modified = mymock # type: ignore[method-assign]
self.configuration.set_value("test.hello", "10")
# get the path to site config file
assert mymock.call_count == 1
assert mymock.call_args[0][0] == (get_configuration_files()[kinds.SITE][0])
def test_user_modification(self) -> None:
# get the path to local config file
self.configuration.load_only = kinds.USER
self.configuration.load()
# Mock out the method
mymock = MagicMock(spec=self.configuration._mark_as_modified)
# https://github.com/python/mypy/issues/2427
self.configuration._mark_as_modified = mymock # type: ignore[method-assign]
self.configuration.set_value("test.hello", "10")
# get the path to user config file
assert mymock.call_count == 1
assert mymock.call_args[0][0] == (
# Use new config file
get_configuration_files()[kinds.USER][1]
)
def test_global_modification(self) -> None:
# get the path to local config file
self.configuration.load_only = kinds.GLOBAL
self.configuration.load()
# Mock out the method
mymock = MagicMock(spec=self.configuration._mark_as_modified)
# https://github.com/python/mypy/issues/2427
self.configuration._mark_as_modified = mymock # type: ignore[method-assign]
self.configuration.set_value("test.hello", "10")
# get the path to user config file
assert mymock.call_count == 1
assert mymock.call_args[0][0] == (get_configuration_files()[kinds.GLOBAL][-1])
def test_normalization(self) -> None:
# underscores and dashes can be used interchangeably.
# internally, underscores get converted into dashes before reading/writing file
self.configuration.load_only = kinds.GLOBAL
self.configuration.load()
self.configuration.set_value("global.index_url", "example.org")
assert self.configuration.get_value("global.index_url") == "example.org"
assert self.configuration.get_value("global.index-url") == "example.org"
self.configuration.unset_value("global.index-url")
pat = r"^No such key - global\.index-url$"
with pytest.raises(ConfigurationError, match=pat):
self.configuration.get_value("global.index-url")
| TestConfigurationModification |
python | getsentry__sentry | src/sentry/grouping/fingerprinting/utils.py | {
"start": 637,
"end": 715
} | class ____(TypedDict):
type: str | None
value: str | None
| _ExceptionInfo |
python | walkccc__LeetCode | solutions/3190. Find Minimum Operations to Make All Elements Divisible by Three/3190.py | {
"start": 0,
"end": 116
} | class ____:
def minimumOperations(self, nums: list[int]) -> int:
return sum(num % 3 != 0 for num in nums)
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 60234,
"end": 60393
} | class ____(AnsiFunction[datetime.datetime]):
"""The localtimestamp() SQL function."""
type = sqltypes.DateTime()
inherit_cache = True
| localtimestamp |
python | pytorch__pytorch | torch/optim/_adafactor.py | {
"start": 383,
"end": 28633
} | class ____(Optimizer):
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1e-2,
beta2_decay: float = -0.8,
eps: tuple[Optional[float], float] = (None, 1e-3),
d: float = 1.0,
weight_decay: float = 0.0,
*,
foreach: Optional[bool] = None,
maximize: bool = False,
) -> None:
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Learning rate should be >= 0 but is: {lr}")
if not 0.0 >= beta2_decay:
raise ValueError(f"beta2_decay should be <= 0 but is: {beta2_decay}")
if eps[0] is not None and not 0.0 <= eps[0]:
raise ValueError(f"epsilon1 should be >= 0 but is: {eps[0]}")
if not 0.0 <= eps[1]:
raise ValueError(f"epsilon2 should be >= 0 but is: {eps[1]}")
if not 1.0 <= d:
raise ValueError(f"Clipping threshold d should be >= 1 but is: {d}")
if not 0.0 <= weight_decay:
raise ValueError(f"weight_decay should be >= 0 but is: {weight_decay}")
defaults = {
"lr": lr,
"beta2_decay": beta2_decay,
"eps": eps,
"d": d,
"weight_decay": weight_decay,
"foreach": foreach,
"maximize": maximize,
}
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("foreach", None)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = torch.tensor(step_val, dtype=_get_scalar_dtype())
def _init_group(
self,
group,
params_with_grad,
grads,
row_vars,
col_vars,
variances,
state_steps,
) -> bool:
for p in group["params"]:
if p.grad is None:
continue
if torch.is_complex(p):
raise RuntimeError("Adafactor does not support complex parameters")
if p.grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients")
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
# note(crcrpar): Deliberately host `step` on CPU if both capturable and fused are off.
# This is because kernel launches are costly on CUDA and XLA.
state["step"] = torch.tensor(0.0, dtype=_get_scalar_dtype())
if p.grad.dim() > 1:
row_shape = list(p.grad.shape)
row_shape[-1] = 1
# Row factor of variance, NOT the same shape as grads (will be reduced along last dim)
state["row_var"] = p.grad.new_zeros(row_shape)
col_shape = list(p.grad.shape)
col_shape[-2] = 1
# Col factor of variance, NOT the same shape as grads (will be reduced along penultimate dim)
state["col_var"] = p.grad.new_zeros(col_shape)
else:
state["variance"] = torch.zeros_like(
p.grad, memory_format=torch.preserve_format
)
row_vars.append(state.get("row_var", None))
col_vars.append(state.get("col_var", None))
variances.append(state.get("variance", None))
state_steps.append(state["step"])
return False # has_complex
@torch.no_grad()
def step(self, closure=None):
r"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad: list[Tensor] = []
grads: list[Tensor] = []
row_vars: list[Optional[Tensor]] = []
col_vars: list[Optional[Tensor]] = []
variances: list[Optional[Tensor]] = []
state_steps: list[Tensor] = []
eps1, eps2 = group["eps"]
has_complex = self._init_group(
group,
params_with_grad,
grads,
row_vars,
col_vars,
variances,
state_steps,
)
adafactor(
params_with_grad,
grads,
row_vars,
col_vars,
variances,
state_steps,
d=group["d"],
lr=group["lr"],
beta2_decay=group["beta2_decay"],
weight_decay=group["weight_decay"],
eps1=eps1,
eps2=eps2,
foreach=group["foreach"],
maximize=group["maximize"],
grad_scale=getattr(self, "grad_scale", None),
found_inf=getattr(self, "found_inf", None),
has_complex=has_complex,
)
return loss
Adafactor.__doc__ = (
r"""Implements Adafactor algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{(lr)}, \: \tau
\text{(}\beta_2\text{ decay)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, \\
&\hspace{15mm} \: \epsilon_1, \epsilon_2 \text{ (epsilons)}, \: d \text{(clipping threshold)}, \\
&\hspace{15mm} \: \lambda \text{(weight decay)},
\: \textit{maximize} \\
&\textbf{initialize} : \: R_0 \leftarrow 0 \text{ (second moment row factor)}, \\
&\hspace{23mm} \: C_0 \leftarrow 0 \text{ (second moment col factor)}, \\
&\hspace{23mm} \: \widehat{V}_0 \leftarrow 0 \text{ (second moment for vectors)} \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
&\hspace{10mm}G_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}G_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\widehat{\beta}_{2_t} \leftarrow 1 - t^{\tau} \\
&\hspace{5mm}\rho_t \leftarrow min(lr, \frac{1}{\sqrt{t}}) \\
&\hspace{5mm}\alpha_t \leftarrow max(\epsilon_2,
\text{RMS}(\theta_{t-1}))\rho_t \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
&\hspace{5mm}\textbf{if} \: \text{dim}(G_t) > 1: \\
&\hspace{10mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
(1-\widehat{\beta}_{2_t})(G_t \odot G_t) \cdot 1_m \\
&\hspace{10mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
(1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t) \\
&\hspace{10mm}\widehat{V}_t \leftarrow
\frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)} \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}\widehat{V}_t \leftarrow \widehat{\beta}_{2_t}\widehat{V}_{t-1}+
(1-\widehat{\beta}_{2_t}) \cdot (G_t \odot G_t) \\
&\hspace{5mm}U_t \leftarrow
\frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)} \\
&\hspace{5mm}\widehat{U}_t \leftarrow \frac{U_t}{max(1, \frac{\text{RMS}(U_t)}{d})} \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \alpha_t \widehat{U}_t \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`_.
"""
+ rf"""
Args:
{_params_doc}
lr (float, Tensor, optional): unlike other optimizers, Adafactor does not require a
learning rate, and Noam Shazeer and Mitchell Stern do not use lr at all.
Deviating from the paper, this implementation uses lr for applying weight
decay and as the maximum value for relative step size rho_t. Note that in
the paper, a constant of 0.01 is used as the maximum value for relative
step size, and so we set 0.01 as the default value. (default: 1e-2)
beta2_decay (float, optional): the decay rate of beta2. beta2 standardly refers
to the coefficient used for computing the running average of the gradient
squared. (default: -0.8)
eps (Tuple[float, float], optional): epsilon1 is the term added to the denominator
of the update calculation to improve numerical stability. This use of epsilon1
deviates from the algorithm written in the paper! See note below for more details.
epsilon2 is the term used to avoid having too small a weight update when applying
parameter scaling. (default: (None, 1e-3))
d (float, optional): the clipping threshold, used to avoid larger-than-desired
updates.
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
foreach (bool, optional): whether foreach implementation of optimizer is used. Note
that the foreach implementation uses ~ sizeof(params) more peak memory than the
for-loop version due to the intermediates being a tensorlist vs just one tensor.
As Adafactor is commonly used when memory is prohibitive, Adafactor will default
to the slower single tensor for-loop implementation unless this flag is explicitly
True. This behavior is contrary to other optimizers, which will attempt defaulting
to foreach on CUDA for faster runtime. (default: None)
{_maximize_doc}"""
+ r"""
.. Note::
The implementation of Adafactor subtly differs from Noam Shazeer and Mitchell Stern
and implementations in some other frameworks with its use of learning rate and
:math:`\epsilon_1`.
Regarding the learning rate hyperparameter: Noam Shazeer and Mitchell Stern do not
use lr at all, as the stated algorithm uses :math:`\rho_t` and update clipping to
affect the step size.
This implementation allows `lr` to influence the maximum value for :math:`\rho_t`:
.. math::
\begin{aligned}
&\hspace{5mm}\rho_t \leftarrow min(lr, \frac{1}{\sqrt{t}})
\end{aligned}
This differs from Noam Shazeer and Mitchell Stern, who use a constant of 0.01 as
the maximum value of :math:`\rho_t`
.. math::
\begin{aligned}
&\hspace{5mm}\rho_t \leftarrow min(0.01, \frac{1}{\sqrt{t}})
\end{aligned}
Noam Shazeer and Mitchell Stern do not enforce an opinion on how weight decay should
be computed, and so we use the learning rate as a coefficient for decoupled weight
decay, similar to what is suggested in `Decoupled Weight Decay Regularization`_.
Regarding the use of :math:`\epsilon_1`: The implementation attempts to replicate the
presumed intention of Noam Shazeer and Mitchell Stern to use :math:`\epsilon_1` as
a stabilizing term when the squared gradient becomes small.
This stabilization can be written as
.. math::
\begin{aligned}
&\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
(1-\widehat{\beta}_{2_t})(G_t \odot G_t + 1_n \cdot 1^\top_m) \cdot 1_m \\
&\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
(1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + 1_n \cdot 1^\top_m) \\
&\hspace{5mm}\widehat{V}_t \leftarrow
\frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)} \\
&\hspace{5mm}U_t \leftarrow \frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)} \\
\end{aligned}
where the row and column factors of gradient squared :math:`R_t` and :math:`C_t`
are left alone, and we apply :math:`\epsilon_1` at the final calculation of
the variance estimate :math:`\widehat{V}_t` and for the update :math:`U_t`.
This is in contrast to Noam Shazeer and Mitchell Stern and other frameworks which
apply :math:`\epsilon_1` to both row and column factors of the squared gradient, but
not in the calculations after:
.. math::
\begin{aligned}
&\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
(1-\widehat{\beta}_{2_t})(G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m) \cdot 1_m \\
&\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
(1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m) \\
&\hspace{5mm}\widehat{V}_t \leftarrow \frac{R_t \cdot C_t}{1^\top_n \cdot R_t} \\
&\hspace{5mm}U_t \leftarrow \frac{G_t}{\sqrt{\widehat{V}_t}} \\
\end{aligned}
You may note that Noam Shazeer and Mitchell Stern describe using the sum of squared gradients,
while this implementation uses the mean instead. This choice is mathematically equivalent and
allows for greater numerical stability for large sums.
.. _Adafactor\: Adaptive Learning Rates with Sublinear Memory Cost:
https://arxiv.org/pdf/1804.04235
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
"""
)
def _single_tensor_adafactor(
params: list[Tensor],
grads: list[Tensor],
# If grad is 1-dimensional (aka a vector), there is no factorization necessary
# so row_var and col_var will be None while variance will be filled.
# Contrarily, for a grad with multiple dimensions, we will factor along the last
# 2 dimensions, and so row_var and col_var will be filled and variance will be None.
row_vars: list[Optional[Tensor]],
col_vars: list[Optional[Tensor]],
variances: list[Optional[Tensor]],
state_steps: list[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
d: float,
lr: Union[Tensor, float],
beta2_decay: float,
weight_decay: float,
eps1: Optional[float],
eps2: float,
maximize: bool,
has_complex: bool,
) -> None:
if grad_scale is not None or found_inf is not None:
raise AssertionError("Grad scaling should occur outside of optimizer.step()")
if torch.jit.is_scripting():
# this assert is due to JIT being dumb and not realizing that the ops below
# have overloads to handle both float and Tensor lrs, so we just assert it's
# a float since most people using JIT are using floats
if not isinstance(lr, float):
raise AssertionError(f"Expected lr to be a float, but got {type(lr)}")
else:
lr = _to_scalar(lr)
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
step_t = state_steps[i]
row_var = row_vars[i]
col_var = col_vars[i]
variance = variances[i]
if eps1 is None:
eps1 = torch.finfo(param.dtype).eps
# update step
step_t += 1
step_float = step_t.item()
one_minus_beta2_t = step_float**beta2_decay
rho_t = min(lr, 1 / (step_float**0.5))
alpha = max(eps2, param.norm(2).item() / (param.numel() ** 0.5)) * rho_t
# Perform stepweight decay
if weight_decay != 0:
param.mul_(1 - lr * weight_decay)
if grad.dim() > 1:
if row_var is None or col_var is None:
raise AssertionError(
"row_var and col_var should be defined when grad is multidimensional"
)
# same as (g * g).mean(dim=-1) w/o materializing an intermediate size g
row_mean = (
torch.norm(grad, dim=-1, keepdim=True).square_().div_(grad.size(-1))
)
row_var.lerp_(row_mean, one_minus_beta2_t)
# same as (g * g).mean(dim=-2) w/o materializing an intermediate size g
col_mean = (
torch.norm(grad, dim=-2, keepdim=True).square_().div_(grad.size(-2))
)
col_var.lerp_(col_mean, one_minus_beta2_t)
var_estimate = row_var @ col_var
var_estimate.div_(row_var.mean(dim=-2, keepdim=True).clamp_(min=eps1))
else:
if variance is None:
raise AssertionError("variance should be defined when grad is a vector")
grad_squared = grad * grad
variance.lerp_(grad_squared, one_minus_beta2_t)
# avoid writing into variance during update
var_estimate = variance.clone()
# square the eps1 as we sqrt after to keep eps1's magnitude
update = var_estimate.clamp_(min=eps1 * eps1).rsqrt_()
update.mul_(grad)
denom = max(1.0, update.norm(2).item() / ((update.numel() ** 0.5) * d))
param.add_(update, alpha=-alpha / denom)
def _group_tensors_by_device_dtype_and_is_multidim(
tensorlists: TensorListList,
) -> dict[
tuple[Optional[torch.device], Optional[torch.dtype], bool],
list[list[Optional[Tensor]]],
]:
"""Groups tensors by device, dtype, AND multidimensionality -- whether the tensor
has multiple dims or just one dim (is a vector). This allows the foreach impl of
Adafactor to assume that every group of params will either be factored or not."""
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(tensorlists)
ultra_grouped_tensors: dict[
tuple[Optional[torch.device], Optional[torch.dtype], bool],
list[list[Optional[Tensor]]],
] = {}
for (device, dtype), (tensorlists, _) in grouped_tensors.items():
matrix_key = (device, dtype, True)
vector_key = (device, dtype, False)
# assumes grad is the second tensorlist
for j, tensor in enumerate(tensorlists[1]):
if tensor is None:
raise AssertionError("grad should not be None")
if tensor.dim() > 1:
if matrix_key not in ultra_grouped_tensors:
ultra_grouped_tensors[matrix_key] = [[] for _ in tensorlists]
for i in range(len(tensorlists)):
ultra_grouped_tensors[matrix_key][i].append(tensorlists[i][j])
else:
if vector_key not in ultra_grouped_tensors:
ultra_grouped_tensors[vector_key] = [[] for _ in tensorlists]
for i in range(len(tensorlists)):
ultra_grouped_tensors[vector_key][i].append(tensorlists[i][j])
return ultra_grouped_tensors
def _multi_tensor_adafactor(
params: list[Tensor],
grads: list[Tensor],
# If grad is 1-dimensional (aka a vector), there is no factorization necessary
# so row_var and col_var will be None while variance will be filled.
# Contrarily, for a grad with multiple dimensions, we will factor along the last
# 2 dimensions, and so row_var and col_var will be filled and variance will be None.
row_vars: list[Optional[Tensor]],
col_vars: list[Optional[Tensor]],
variances: list[Optional[Tensor]],
state_steps: list[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
d: float,
lr: Union[Tensor, float],
beta2_decay: float,
weight_decay: float,
eps1: Optional[float],
eps2: float,
maximize: bool,
has_complex: bool,
) -> None:
if len(params) == 0:
return
if grad_scale is not None or found_inf is not None:
raise AssertionError("Grad scaling should occur outside of optimizer.step()")
lr = _to_scalar(lr)
grouped_tensors = _group_tensors_by_device_dtype_and_is_multidim(
[params, grads, row_vars, col_vars, variances, state_steps] # type: ignore[list-item]
)
for (_, dtype, is_multidim), (
(
device_params_,
device_grads_,
device_row_vars_,
device_col_vars_,
device_variances_,
device_state_steps_,
)
) in grouped_tensors.items():
device_params = cast(list[Tensor], device_params_)
device_grads = cast(list[Tensor], device_grads_)
device_state_steps = cast(list[Tensor], device_state_steps_)
if eps1 is None:
if dtype is None:
raise AssertionError(
"dtype is needed to compute eps1 when eps1 is unset"
)
eps1 = torch.finfo(dtype).eps
if TYPE_CHECKING:
assert device_state_steps[0] is not None
if maximize:
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch.compiler.is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(
device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(device_state_steps, 1.0)
one_minus_beta2_ts = []
beta2_ts = []
rho_ts = []
for s in device_state_steps:
one_minus_beta2_ts.append(s.item() ** beta2_decay)
beta2_ts.append(1 - s.item() ** beta2_decay)
rho_ts.append(min(lr, 1 / (s.item() ** 0.5)))
alphas = [
max(eps2, p.norm(2).item() / (p.numel() ** 0.5)) * r
for p, r in zip(device_params, rho_ts, strict=True)
]
# Perform stepweight decay
if weight_decay != 0:
torch._foreach_mul_(device_params, 1 - lr * weight_decay)
if is_multidim:
device_row_vars = cast(list[Tensor], device_row_vars_)
device_col_vars = cast(list[Tensor], device_col_vars_)
if device_row_vars[0] is None or device_col_vars[0] is None:
raise AssertionError(
"row_var and col_var should be defined when grad is multidimensional"
)
# same as (g * g).mean(dim=-1) w/o materializing an intermediate size g
row_means = [
torch.norm(grad, dim=-1, keepdim=True) for grad in device_grads
]
torch._foreach_mul_(row_means, row_means)
torch._foreach_div_(row_means, [grad.size(-1) for grad in device_grads])
torch._foreach_lerp_(device_row_vars, row_means, one_minus_beta2_ts)
del row_means
# same as (g * g).mean(dim=-2) w/o materializing an intermediate size g
col_means = [
torch.norm(grad, dim=-2, keepdim=True) for grad in device_grads
]
torch._foreach_mul_(col_means, col_means)
torch._foreach_div_(col_means, [grad.size(-2) for grad in device_grads])
torch._foreach_lerp_(device_col_vars, col_means, one_minus_beta2_ts)
del col_means
var_estimates = [
row_var @ col_var
for row_var, col_var in zip(
device_row_vars, device_col_vars, strict=True
)
]
row_var_means = [
row_var.mean(dim=-2, keepdim=True) for row_var in device_row_vars
]
torch._foreach_clamp_min_(row_var_means, eps1)
torch._foreach_div_(var_estimates, row_var_means)
del row_var_means
else:
device_variances = cast(list[Tensor], device_variances_)
if device_variances[0] is None:
raise AssertionError("variance should be defined when grad is a vector")
grads_squared = torch._foreach_mul(device_grads, device_grads)
torch._foreach_lerp_(device_variances, grads_squared, one_minus_beta2_ts)
del grads_squared
# avoid writing into variance during update
var_estimates = [v.clone() for v in device_variances]
# square the eps1 as we sqrt after to keep eps1's magnitude
torch._foreach_clamp_min_(var_estimates, eps1 * eps1)
torch._foreach_rsqrt_(var_estimates)
torch._foreach_mul_(var_estimates, device_grads)
updates = var_estimates
alphas = [
-a / (max(1.0, update.norm(2).item() / ((update.numel() ** 0.5) * d)))
for a, update in zip(alphas, updates, strict=True)
]
torch._foreach_mul_(updates, alphas)
torch._foreach_add_(device_params, updates)
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adafactor)
def adafactor(
params: list[Tensor],
grads: list[Tensor],
row_vars: list[Optional[Tensor]],
col_vars: list[Optional[Tensor]],
variances: list[Optional[Tensor]],
state_steps: list[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
grad_scale: Optional[Tensor] = None,
found_inf: Optional[Tensor] = None,
has_complex: bool = False,
*,
d: float,
lr: Union[float, Tensor],
beta2_decay: float,
weight_decay: float,
eps1: float,
eps2: float,
maximize: bool,
) -> None:
r"""Functional API that performs Adafactor algorithm computation.
See :class:`~torch.optim.Adafactor` for details.
"""
if not torch.compiler.is_compiling() and not all(
isinstance(t, torch.Tensor) for t in state_steps
):
raise RuntimeError(
"`state_steps` argument must contain a list of singleton tensors"
)
if foreach:
func = _multi_tensor_adafactor
else:
func = _single_tensor_adafactor
func(
params,
grads,
row_vars,
col_vars,
variances,
state_steps,
d=d,
lr=lr,
beta2_decay=beta2_decay,
weight_decay=weight_decay,
eps1=eps1,
eps2=eps2,
maximize=maximize,
grad_scale=grad_scale,
found_inf=found_inf,
has_complex=has_complex,
)
| Adafactor |
python | marshmallow-code__marshmallow | examples/inflection_example.py | {
"start": 242,
"end": 610
} | class ____(Schema):
"""Schema that uses camel-case for its external representation
and snake-case for its internal representation.
"""
def on_bind_field(self, field_name, field_obj):
field_obj.data_key = camelcase(field_obj.data_key or field_name)
# -----------------------------------------------------------------------------
| CamelCaseSchema |
python | RaRe-Technologies__gensim | gensim/test/test_fasttext.py | {
"start": 52000,
"end": 54376
} | class ____(unittest.TestCase):
"""Loosely based on the test described here:
https://github.com/RaRe-Technologies/gensim/issues/2059#issuecomment-432300777
With a broken hash, vectors for non-ASCII keywords don't match when loaded
from a native model.
"""
def setUp(self):
#
# ./fasttext skipgram -minCount 0 -bucket 100 -input crime-and-punishment.txt -output crime-and-punishment -dim 5 # noqa: E501
#
self.model = gensim.models.fasttext.load_facebook_model(datapath('crime-and-punishment.bin'))
with utils.open(datapath('crime-and-punishment.vec'), 'r', encoding='utf-8') as fin:
self.expected = dict(load_vec(fin))
def test_ascii(self):
word = u'landlady'
expected = self.expected[word]
actual = self.model.wv[word]
self.assertTrue(np.allclose(expected, actual, atol=1e-5))
def test_unicode(self):
word = u'хозяйка'
expected = self.expected[word]
actual = self.model.wv[word]
self.assertTrue(np.allclose(expected, actual, atol=1e-5))
def test_out_of_vocab(self):
longword = u'rechtsschutzversicherungsgesellschaften' # many ngrams
expected = {
u'steamtrain': np.array([0.031988, 0.022966, 0.059483, 0.094547, 0.062693]),
u'паровоз': np.array([-0.0033987, 0.056236, 0.036073, 0.094008, 0.00085222]),
longword: np.array([-0.012889, 0.029756, 0.018020, 0.099077, 0.041939]),
}
actual = {w: self.model.wv[w] for w in expected}
self.assertTrue(np.allclose(expected[u'steamtrain'], actual[u'steamtrain'], atol=1e-5))
self.assertTrue(np.allclose(expected[u'паровоз'], actual[u'паровоз'], atol=1e-5))
self.assertTrue(np.allclose(expected[longword], actual[longword], atol=1e-5))
def hash_main(alg):
"""Generate hash values for test from standard input."""
hashmap = {
'cy_bytes': ft_hash_bytes,
}
try:
fun = hashmap[alg]
except KeyError:
raise KeyError('invalid alg: %r expected one of %r' % (alg, sorted(hashmap)))
for line in sys.stdin:
if 'bytes' in alg:
words = line.encode('utf-8').rstrip().split(b' ')
else:
words = line.rstrip().split(' ')
for word in words:
print('u%r: %r,' % (word, fun(word)))
| FTHashResultsTest |
python | numpy__numpy | numpy/_core/tests/test_cpu_features.py | {
"start": 13782,
"end": 15128
} | class ____(AbstractTest):
features = [
"SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = {
"NEON_FP16": ["NEON", "HALF"],
"NEON_VFPV4": ["NEON", "VFPV4"],
}
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel without linux32
is_rootfs_v8 = (
not re.match(r"^armv[0-9]+l$", machine) and
(int('0' + next(iter(arch))) > 7 if arch else 0)
)
if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = {
"NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD"
}
else:
self.features_map = {
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
"ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32")
}
is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch")
| Test_ARM_Features |
python | getsentry__sentry | src/sentry/utils/retries.py | {
"start": 274,
"end": 743
} | class ____(Exception):
def __init__(self, message, exception):
super().__init__(message)
self.message = message
self.exception = exception
def __reduce__(self):
return RetryException, (self.message, self.exception)
def __str__(self) -> str:
return force_bytes(self.message, errors="replace")
def __repr__(self) -> str:
return f"<{type(self).__name__}: {self.message!r}>"
T = TypeVar("T")
| RetryException |
python | pypa__hatch | src/hatch/utils/shells.py | {
"start": 583,
"end": 5243
} | class ____:
def __init__(self, environment: EnvironmentInterface) -> None:
self.environment = environment
def enter_cmd(self, path: str, args: Iterable[str], exe_dir: Path) -> None: # noqa: ARG002
self.environment.platform.exit_with_command([path or "cmd", "/k", str(exe_dir / "activate.bat")])
def enter_powershell(self, path: str, args: Iterable[str], exe_dir: Path) -> None: # noqa: ARG002
self.environment.platform.exit_with_command([
path or "powershell",
"-executionpolicy",
"bypass",
"-NoExit",
"-NoLogo",
"-File",
str(exe_dir / "activate.ps1"),
])
def enter_pwsh(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.enter_powershell(path or "pwsh", args, exe_dir)
def enter_xonsh(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
if self.environment.platform.windows:
with self.environment:
self.environment.platform.exit_with_command([
path or "xonsh",
*(args or ["-i"]),
"-D",
f"VIRTUAL_ENV={exe_dir.parent.name}",
])
else:
self.spawn_linux_shell(
path or "xonsh",
[*(args or ["-i"]), "-D", f"VIRTUAL_ENV={exe_dir.parent.name}"],
# Just in case pyenv works with xonsh, supersede it.
callback=lambda terminal: terminal.sendline(f"$PATH.insert(0, {str(exe_dir)!r})"),
)
def enter_bash(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
if self.environment.platform.windows:
self.environment.platform.exit_with_command([
path or "bash",
"--init-file",
exe_dir / "activate",
*(args or ["-i"]),
])
else:
self.spawn_linux_shell(path or "bash", args or ["-i"], script=exe_dir / "activate")
def enter_fish(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.spawn_linux_shell(path or "fish", args or ["-i"], script=exe_dir / "activate.fish")
def enter_zsh(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.spawn_linux_shell(path or "zsh", args or ["-i"], script=exe_dir / "activate")
def enter_ash(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.spawn_linux_shell(path or "ash", args or ["-i"], script=exe_dir / "activate")
def enter_nu(self, path: str, args: Iterable[str], exe_dir: Path) -> None: # noqa: ARG002
executable = path or "nu"
activation_script = exe_dir / "activate.nu"
self.environment.platform.exit_with_command([executable, "-e", f"overlay use {str(activation_script)!r}"])
def enter_tcsh(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.spawn_linux_shell(path or "tcsh", args or ["-i"], script=exe_dir / "activate.csh")
def enter_csh(self, path: str, args: Iterable[str], exe_dir: Path) -> None:
self.spawn_linux_shell(path or "csh", args or ["-i"], script=exe_dir / "activate.csh")
if sys.platform == "win32":
def spawn_linux_shell(
self,
path: str,
args: Iterable[str] | None = None,
*,
script: Path | None = None,
callback: Callable | None = None,
) -> None:
raise NotImplementedError
else:
def spawn_linux_shell(
self,
path: str,
args: Iterable[str] | None = None,
*,
script: Path | None = None,
callback: Callable | None = None,
) -> None:
import shutil
import signal
import pexpect
columns, lines = shutil.get_terminal_size()
# pexpect only accepts lists
terminal = pexpect.spawn(path, args=list(args or ()), dimensions=(lines, columns))
def sigwinch_passthrough(sig: int, data: FrameType | None) -> None: # noqa: ARG001
new_columns, new_lines = shutil.get_terminal_size()
terminal.setwinsize(new_lines, new_columns)
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
if script is not None:
terminal.sendline(f'source "{script}"')
if callback is not None:
callback(terminal)
terminal.interact(escape_character=None)
terminal.close()
self.environment.platform.exit_with_code(terminal.exitstatus)
| ShellManager |
python | django__django | tests/cache/tests.py | {
"start": 52904,
"end": 57334
} | class ____(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches["prefix"]._cache = cache._cache
caches["prefix"]._expire_info = cache._expire_info
caches["v2"]._cache = cache._cache
caches["v2"]._expire_info = cache._expire_info
caches["custom_key"]._cache = cache._cache
caches["custom_key"]._expire_info = cache._expire_info
caches["custom_key2"]._cache = cache._cache
caches["custom_key2"]._expire_info = cache._expire_info
@override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
"other": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "other",
},
}
)
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set("value", 42)
self.assertEqual(caches["default"].get("value"), 42)
self.assertIsNone(caches["other"].get("value"))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set("set", bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
self.assertIs(cache.add("add", bad_obj), True)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""
incr/decr does not modify expiry time (matches memcached behavior)
"""
key = "value"
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
self.assertEqual(cache.incr(key), 2)
self.assertEqual(expire, cache._expire_info[_key])
self.assertEqual(cache.decr(key), 1)
self.assertEqual(expire, cache._expire_info[_key])
@retry()
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@retry()
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.incr(key), key + 1)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached and redis backends aren't guaranteed to be available.
# To check the backends, the test settings file will need to contain at least
# one cache backend setting that points at your cache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params["BACKEND"]] = _cache_params
PyLibMCCache_params = configured_caches.get(
"django.core.cache.backends.memcached.PyLibMCCache"
)
PyMemcacheCache_params = configured_caches.get(
"django.core.cache.backends.memcached.PyMemcacheCache"
)
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {"cull", "zero_cull"}
RedisCache_params = configured_caches.get("django.core.cache.backends.redis.RedisCache")
# The redis backend does not support cull-related options like `MAX_ENTRIES`.
redis_excluded_caches = {"cull", "zero_cull"}
| LocMemCacheTests |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore.py | {
"start": 17033,
"end": 17334
} | class ____(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run(MyTrainableClass, name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
| AutoInitTest |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 32033,
"end": 32209
} | class ____(Structure):
_fields_ = (("name", lc_str), ("header_addr", p_uint32))
def describe(self):
return {"header_addr": int(self.header_addr)}
| fvmfile_command |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_exec_order.py | {
"start": 964,
"end": 3275
} | class ____(torch.nn.Module):
"""
Model that supports two computation paths: `layer0` -> `layer1` and
`layer0` -> `layer2`. Notably, both `layer1` and `layer2` have 36 elements
when flattened, which means that their corresponding all-gathers and
reduce-scatters may be silently matched if we do not perform any checks.
"""
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(5, 6)
self.layer1 = torch.nn.Linear(6, 6, bias=False)
self.layer2 = torch.nn.Sequential(
torch.nn.Linear(6, 3, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(3, 6, bias=False),
)
self.relu = torch.nn.ReLU()
self.use_alt_path = False
for param in self.layer2.parameters():
param.requires_grad = False
def forward(self, x):
# `layer0` -> `layer1` (normal)
# `layer0` -> `layer2` (alternate)
z = self.relu(self.layer0(x))
z = (
self.relu(self.layer2(z))
if self.use_alt_path
else self.relu(self.layer1(z))
)
return z
def get_input(self, device):
return (torch.randn((8, 5)).to(device),)
def get_loss(self, input, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
def flip_path(self):
params_to_freeze = (
self.layer2.parameters() if self.use_alt_path else self.layer1.parameters()
)
params_to_unfreeze = (
self.layer1.parameters() if self.use_alt_path else self.layer2.parameters()
)
for param in params_to_freeze:
param.requires_grad = False
for param in params_to_unfreeze:
param.requires_grad = True
self.use_alt_path = not self.use_alt_path
@staticmethod
def wrap(sharding_strategy: ShardingStrategy, device):
model = Model()
model.layer1 = FSDP(
model.layer1, sharding_strategy=sharding_strategy, device_id=device
)
model.layer2 = FSDP(
model.layer2, sharding_strategy=sharding_strategy, device_id=device
)
fsdp_model = FSDP(model, sharding_strategy=sharding_strategy, device_id=device)
return fsdp_model.to(device)
| Model |
python | huggingface__transformers | src/transformers/models/olmo2/modular_olmo2.py | {
"start": 14609,
"end": 14764
} | class ____(OlmoForCausalLM):
pass
__all__ = [
"Olmo2Config",
"Olmo2ForCausalLM",
"Olmo2Model",
"Olmo2PreTrainedModel",
]
| Olmo2ForCausalLM |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_5408.py | {
"start": 360,
"end": 642
} | class ____:
sub_class = MySubClass()
def get_unpatched_class(cls):
return cls
def get_unpatched(item):
lookup = get_unpatched_class if isinstance(item, type) else lambda item: None
return lookup(item)
_Child = get_unpatched(MyClass.sub_class.inner_class)
| MyClass |
python | mitmproxy__pdoc | test/testdata/demo_long.py | {
"start": 5759,
"end": 6398
} | class ____:
"""
This is an example for a dataclass.
As usual, you can link to individual properties: `DataDemo.a`.
"""
a: int
"""Again, we can document individual properties with docstrings."""
a2: Sequence[str]
# This property has a type annotation but is not documented.
a3 = "a3"
# This property has a default value but is not documented.
a4: str = "a4"
# This property has a type annotation and a default value but is not documented.
b: bool = field(repr=False, default=True)
"""This property is assigned to `dataclasses.field()`, which works just as well."""
@dataclass
| DataDemo |
python | getsentry__sentry | src/sentry/search/eap/types.py | {
"start": 516,
"end": 680
} | class ____:
functions: set[str] = field(default_factory=set)
attributes: set[str] = field(default_factory=set)
@dataclass(frozen=True, kw_only=True)
| FieldsACL |
python | walkccc__LeetCode | solutions/909. Snakes and Ladders/909.py | {
"start": 0,
"end": 699
} | class ____:
def snakesAndLadders(self, board: list[list[int]]) -> int:
n = len(board)
q = collections.deque([1])
seen = set()
arr = [0] * (1 + n * n) # 2D -> 1D
for i in range(n):
for j in range(n):
arr[(n - 1 - i) * n + (n - j if (n - i) % 2 == 0 else j + 1)] = board[i][j]
step = 1
while q:
for _ in range(len(q)):
curr = q.popleft()
for next in range(curr + 1, min(curr + 6, n * n) + 1):
dest = arr[next] if arr[next] > 0 else next
if dest == n * n:
return step
if dest in seen:
continue
q.append(dest)
seen.add(dest)
step += 1
return -1
| Solution |
python | jina-ai__jina | tests/unit/orchestrate/deployments/test_deployments.py | {
"start": 8644,
"end": 12492
} | class ____(Executor):
def __init__(self, runtime_args, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shard_id = runtime_args['shard_id']
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs.append(Document(text=str(self.shard_id)))
return docs
def test_pod_naming_with_shards():
args = set_deployment_parser().parse_args(
[
'--name',
'pod',
'--shards',
'2',
'--replicas',
'3',
]
)
with Deployment(args, include_gateway=False) as pod:
assert pod.head_pod.name == 'pod/head'
assert pod.shards[0].args[0].name == 'pod/shard-0/rep-0'
assert pod.shards[0].args[1].name == 'pod/shard-0/rep-1'
assert pod.shards[0].args[2].name == 'pod/shard-0/rep-2'
assert pod.shards[1].args[0].name == 'pod/shard-1/rep-0'
assert pod.shards[1].args[1].name == 'pod/shard-1/rep-1'
assert pod.shards[1].args[2].name == 'pod/shard-1/rep-2'
@pytest.mark.slow
def test_pod_activates_shards():
args_list = ['--replicas', '3']
args_list.extend(['--shards', '3'])
args = set_deployment_parser().parse_args(args_list)
args.uses = 'AppendShardExecutor'
args.polling = PollingType.ALL
with Deployment(args, include_gateway=False) as pod:
assert pod.num_pods == 3 * 3 + 1
response_texts = set()
# replicas are used in a round robin fashion, so sending 3 requests should hit each one time
response = send_request_sync(
_create_test_data_message(),
f'{pod.head_args.host}:{pod.head_args.port[0]}',
)
response_texts.update(response.response.docs.texts)
assert 4 == len(response.response.docs.texts)
assert 4 == len(response_texts)
assert all(text in response_texts for text in ['0', '1', '2', 'client'])
Deployment(args, include_gateway=False).start().close()
@pytest.mark.slow
@pytest.mark.skipif(
'GITHUB_WORKFLOW' in os.environ,
reason='for unknown reason, this test is flaky on Github action, '
'but locally it SHOULD work fine',
)
@pytest.mark.parametrize(
'protocol, uses',
[
('grpc', 'GRPCGateway'),
],
)
def test_gateway_pod(protocol, uses, graph_description):
args = set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
'{"pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with Deployment(args, include_gateway=False) as p:
assert len(p.all_args) == 1
assert p.all_args[0].uses == uses
Deployment(args, include_gateway=False).start().close()
def test_pod_naming_with_replica():
args = set_deployment_parser().parse_args(['--name', 'pod', '--replicas', '2'])
with Deployment(args, include_gateway=False) as bp:
assert bp.head_pod is None
assert bp.shards[0]._pods[0].name == 'pod/rep-0'
assert bp.shards[0]._pods[1].name == 'pod/rep-1'
def test_pod_args_remove_uses_ba():
args = set_deployment_parser().parse_args([])
with Deployment(args, include_gateway=False) as p:
assert p.num_pods == 1
args = set_deployment_parser().parse_args(
['--uses-before', __default_executor__, '--uses-after', __default_executor__]
)
with Deployment(args, include_gateway=False) as p:
assert p.num_pods == 1
args = set_deployment_parser().parse_args(
[
'--uses-before',
__default_executor__,
'--uses-after',
__default_executor__,
'--replicas',
'2',
]
)
with Deployment(args, include_gateway=False) as p:
assert p.num_pods == 2
| AppendShardExecutor |
python | ray-project__ray | python/ray/data/_internal/execution/operators/map_transformer.py | {
"start": 10582,
"end": 12349
} | class ____(MapTransformFn):
"""A batch-to-batch MapTransformFn."""
def __init__(
self,
batch_fn: MapTransformCallable[DataBatch, DataBatch],
*,
is_udf: bool = False,
batch_size: Optional[int] = None,
batch_format: Optional[BatchFormat] = None,
zero_copy_batch: bool = True,
output_block_size_option: Optional[OutputBlockSizeOption] = None,
):
super().__init__(
input_type=MapTransformFnDataType.Batch,
is_udf=is_udf,
output_block_size_option=output_block_size_option,
)
self._batch_size = batch_size
self._batch_format = batch_format
self._zero_copy_batch = zero_copy_batch
self._ensure_copy = not zero_copy_batch and batch_size is not None
self._batch_fn = batch_fn
def _pre_process(self, blocks: Iterable[Block]) -> Iterable[MapTransformFnData]:
# TODO make batch-udf zero-copy by default
ensure_copy = not self._zero_copy_batch and self._batch_size is not None
return batch_blocks(
blocks=iter(blocks),
stats=None,
batch_size=self._batch_size,
batch_format=self._batch_format,
ensure_copy=ensure_copy,
)
def _apply_transform(
self, ctx: TaskContext, batches: Iterable[MapTransformFnData]
) -> Iterable[MapTransformFnData]:
yield from self._batch_fn(batches, ctx)
def _post_process(self, results: Iterable[MapTransformFnData]) -> Iterable[Block]:
return self._shape_blocks(results)
def __repr__(self) -> str:
return f"BatchMapTransformFn({self._batch_fn=}, {self._batch_format=}, {self._batch_size=}, {self._zero_copy_batch=})"
| BatchMapTransformFn |
python | doocs__leetcode | solution/3000-3099/3070.Count Submatrices with Top-Left Element and Sum Less Than k/Solution.py | {
"start": 0,
"end": 385
} | class ____:
def countSubmatrices(self, grid: List[List[int]], k: int) -> int:
s = [[0] * (len(grid[0]) + 1) for _ in range(len(grid) + 1)]
ans = 0
for i, row in enumerate(grid, 1):
for j, x in enumerate(row, 1):
s[i][j] = s[i - 1][j] + s[i][j - 1] - s[i - 1][j - 1] + x
ans += s[i][j] <= k
return ans
| Solution |
python | redis__redis-py | redis/commands/search/field.py | {
"start": 55,
"end": 2123
} | class ____:
"""
A class representing a field in a document.
"""
NUMERIC = "NUMERIC"
TEXT = "TEXT"
WEIGHT = "WEIGHT"
GEO = "GEO"
TAG = "TAG"
VECTOR = "VECTOR"
SORTABLE = "SORTABLE"
NOINDEX = "NOINDEX"
AS = "AS"
GEOSHAPE = "GEOSHAPE"
INDEX_MISSING = "INDEXMISSING"
INDEX_EMPTY = "INDEXEMPTY"
def __init__(
self,
name: str,
args: List[str] = None,
sortable: bool = False,
no_index: bool = False,
index_missing: bool = False,
index_empty: bool = False,
as_name: str = None,
):
"""
Create a new field object.
Args:
name: The name of the field.
args:
sortable: If `True`, the field will be sortable.
no_index: If `True`, the field will not be indexed.
index_missing: If `True`, it will be possible to search for documents that
have this field missing.
index_empty: If `True`, it will be possible to search for documents that
have this field empty.
as_name: If provided, this alias will be used for the field.
"""
if args is None:
args = []
self.name = name
self.args = args
self.args_suffix = list()
self.as_name = as_name
if no_index:
self.args_suffix.append(Field.NOINDEX)
if index_missing:
self.args_suffix.append(Field.INDEX_MISSING)
if index_empty:
self.args_suffix.append(Field.INDEX_EMPTY)
if sortable:
self.args_suffix.append(Field.SORTABLE)
if no_index and not sortable:
raise ValueError("Non-Sortable non-Indexable fields are ignored")
def append_arg(self, value):
self.args.append(value)
def redis_args(self):
args = [self.name]
if self.as_name:
args += [self.AS, self.as_name]
args += self.args
args += self.args_suffix
return args
| Field |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/return_in_init.py | {
"start": 99,
"end": 187
} | class ____:
def __init__(self):
return 3
def gen(self):
return 5
| B |
python | pytorch__pytorch | test/inductor/test_cutedsl_template.py | {
"start": 1911,
"end": 16383
} | class ____(TestCase):
"""Test cases for CuteDSL template functionality."""
def test_gen_imports(self):
kernel = CuteDSLTemplateKernel(
kernel_name="test_kernel",
input_nodes=[],
output_node=None,
)
imports = kernel.gen_imports()
self.assertIn("import torch", imports)
self.assertIn("import cutlass", imports)
self.assertIn("import cutlass.cute as cute", imports)
self.assertIn("from cutlass.cute.runtime import from_dlpack", imports)
self.assertIsInstance(imports, str)
lines = imports.strip().split("\n")
self.assertEqual(len(lines), 7)
def test_render_includes_imports(self):
template_source = """@cute.kernel
def {{kernel_name}}_kernel():
pass
{{def_kernel("input", "output")}}
return output"""
mock_template = MagicMock()
mock_template.render = MagicMock(return_value=template_source)
kernel = CuteDSLTemplateKernel(
kernel_name="test_kernel",
input_nodes=[],
output_node=None,
)
result = kernel.render(mock_template)
self.assertIsInstance(result, PartialRender)
rendered_code = result._code
# The imports might have leading whitespace, so strip it
rendered_code_stripped = rendered_code.lstrip()
self.assertTrue(
rendered_code_stripped.startswith("import torch"),
f"Code should start with 'import torch', got: {rendered_code_stripped[:50]}",
)
self.assertIn("import cutlass", rendered_code)
self.assertIn("import cutlass.cute as cute", rendered_code)
self.assertIn("from cutlass.cute.runtime import from_dlpack", rendered_code)
self.assertIn("@cute.kernel", rendered_code)
def test_template_env_contains_hooks(self):
kernel = CuteDSLTemplateKernel(
kernel_name="test_kernel",
input_nodes=[],
output_node=None,
)
captured_env = {}
def mock_render(**kwargs):
captured_env.update(kwargs)
return "rendered"
mock_template = MagicMock()
mock_template.render = mock_render
kernel.render(mock_template)
self.assertIn("def_kernel", captured_env)
self.assertIn("kernel_name", captured_env)
self.assertTrue(callable(captured_env["def_kernel"]))
def test_multiple_templates_unique_names(self):
# Clean registry first
test_name = f"unique_test_{id(self)}"
if test_name in CuteDSLTemplate.all_templates:
del CuteDSLTemplate.all_templates[test_name]
_ = CuteDSLTemplate(
name=test_name,
source="template1",
)
with self.assertRaises(AssertionError):
_ = CuteDSLTemplate(
name=test_name,
source="template2",
)
def test_indented_buffer_usage(self):
kernel = CuteDSLTemplateKernel(
kernel_name="test_kernel",
input_nodes=[],
output_node=None,
)
imports = kernel.gen_imports()
lines = imports.strip().split("\n")
for line in lines:
if line:
self.assertFalse(
line.startswith(" "), f"Line should not be indented: '{line}'"
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_cutedsl_add_e2e(self):
"""End-to-end test with CuteDSL template including code generation verification."""
from torch._inductor.ir import TensorBox
from torch._inductor.lowering import lowerings
from torch._inductor.utils import run_and_get_code
template = CuteDSLTemplate(
name="test_add_e2e",
source=CUTEDSL_ADD_TEMPLATE,
)
def cutedsl_add_lowering(a: TensorBox, b: TensorBox) -> TensorBox:
choices = []
error = template.maybe_append_choice(
choices,
input_nodes=[a, b],
layout=a.get_layout(),
THREADS_PER_BLOCK=256,
)
if error or not choices:
default_lowering = lowerings[torch.ops.aten.add.Tensor]
return default_lowering(a, b)
# Use the single choice directly (no autotuning)
return choices[0].output_node()
with patch.dict(lowerings, {torch.ops.aten.add.Tensor: cutedsl_add_lowering}):
# Test function
def test_add(x, y):
return x + y
device = "cuda"
x = torch.randn(128, 4, device=device, dtype=torch.float32)
y = torch.randn(128, 4, device=device, dtype=torch.float32)
# Compile and get generated code
compiled_fn = torch.compile(test_add, backend="inductor")
result, (code,) = run_and_get_code(compiled_fn, x, y)
# Verify CuteDSL code is present
self.assertIn(
"cute", code.lower(), "CuteDSL code should be in generated code"
)
# Verify parameter generation worked
self.assertIn(
"THREADS_PER_BLOCK", code, "Parameter should be in generated code"
)
# Verify correctness
expected = x + y
self.assertTrue(torch.allclose(result, expected, atol=1e-5))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_cutedsl_add_e2e_autotune(self):
"""E2E test with multiple CuteDSL template variants for autotuning."""
from torch._inductor.ir import TensorBox
from torch._inductor.lowering import lowerings
from torch._inductor.select_algorithm import autotune_select_algorithm
template = CuteDSLTemplate(
name="test_add_autotune",
source=CUTEDSL_ADD_TEMPLATE,
)
def cutedsl_add_lowering(a: TensorBox, b: TensorBox) -> TensorBox:
choices = []
# Add multiple variants with different thread counts for autotuning
thread_variants = [128, 256, 512]
for threads in thread_variants:
error = template.maybe_append_choice(
choices,
input_nodes=[a, b],
layout=a.get_layout(),
THREADS_PER_BLOCK=threads,
)
if error:
# Skip this variant if it fails
continue
if not choices:
default_lowering = lowerings[torch.ops.aten.add.Tensor]
return default_lowering(a, b)
# Use autotuning to select the best variant
return autotune_select_algorithm(
"cutedsl_add_autotune",
choices,
[a, b],
a.get_layout(),
)
with patch.dict(lowerings, {torch.ops.aten.add.Tensor: cutedsl_add_lowering}):
# Test function
def test_add(x, y):
return x + y
device = "cuda"
x = torch.randn(128, 128, device=device, dtype=torch.float32)
y = torch.randn(128, 128, device=device, dtype=torch.float32)
# Compile and run
compiled_fn = torch.compile(test_add, backend="inductor")
result = compiled_fn(x, y)
# Verify correctness
expected = x + y
self.assertTrue(torch.allclose(result, expected, atol=1e-5))
def test_gen_defines(self):
"""Test that gen_defines correctly generates CuteDSL parameter definitions."""
kernel = CuteDSLTemplateKernel(
kernel_name="test_kernel",
input_nodes=[],
output_node=None,
)
# Test integer parameters
params = kernel.gen_defines(
THREADS_PER_BLOCK=256,
BLOCK_SIZE=128,
ENABLE_FEATURE=True,
)
assert_expected_inline(
params,
"""\
THREADS_PER_BLOCK: cutlass.Constexpr = 256
BLOCK_SIZE: cutlass.Constexpr = 128
ENABLE_FEATURE: cutlass.Constexpr = True
""",
)
params_float = kernel.gen_defines(SCALE_FACTOR=1.5)
assert_expected_inline(
params_float,
"""\
SCALE_FACTOR: cutlass.Constexpr = 1.5
""",
)
def test_template_aliasing(self):
"""Test that template variables are correctly aliased to function arguments."""
from torch._inductor.ir import Buffer
mock_input1 = MagicMock(spec=Buffer)
mock_input1.get_name.return_value = "buf_input1"
mock_input2 = MagicMock(spec=Buffer)
mock_input2.get_name.return_value = "buf_input2"
mock_output = MagicMock(spec=Buffer)
mock_output.get_name.return_value = "buf_output"
mock_graph = MockGraphHandler()
with V.set_graph_handler(mock_graph):
kernel = CuteDSLTemplateKernel(
kernel_name="test_aliasing",
input_nodes=[mock_input1, mock_input2],
output_node=mock_output,
)
def_kernel_hook = kernel.def_kernel("custom_a", "custom_b")
self.assertEqual(def_kernel_hook, "<DEF_KERNEL>")
self.assertIn("<DEF_KERNEL>", kernel.render_hooks)
hook_fn = kernel.render_hooks["<DEF_KERNEL>"]
generated_code = hook_fn()
# Check that the generated code contains the expected aliasing statements
self.assertIn("custom_a = arg_custom_a", generated_code)
self.assertIn("custom_b = arg_custom_b", generated_code)
def test_get_output_hook(self):
"""Test the get_output() template hook."""
from torch._inductor.ir import Buffer
mock_output = MagicMock(spec=Buffer)
mock_output.get_name.return_value = "buf_test_output"
mock_graph = MockGraphHandler()
with V.set_graph_handler(mock_graph):
kernel = CuteDSLTemplateKernel(
kernel_name="test_output",
input_nodes=[],
output_node=mock_output,
)
with self.assertRaises(ValueError):
# error if no output buffer
result = kernel.get_output()
kernel.args.output_buffers["buf_test_output"] = "arg_buf_test_output"
result = kernel.get_output()
self.assertEqual(result, "arg_buf_test_output")
def test_modification_subgraph(self):
"""Test the modification() method and subgraph processing."""
from torch._inductor.ir import Buffer
mock_subgraph1 = MagicMock(spec=Buffer)
mock_subgraph2 = MagicMock(spec=Buffer)
subgraphs = [mock_subgraph1, mock_subgraph2]
mock_output = MagicMock(spec=Buffer)
mock_output.get_name.return_value = "buf_output"
kernel = CuteDSLTemplateKernel(
kernel_name="test_modification",
input_nodes=[],
output_node=mock_output,
subgraphs=subgraphs,
)
result = kernel._get_subgraph(0)
self.assertEqual(result, mock_subgraph1)
result = kernel._get_subgraph(1)
self.assertEqual(result, mock_subgraph2)
with self.assertRaises(AssertionError):
kernel._get_subgraph(2)
def test_cutedsl_op_overrides(self):
"""Test the new CuteDSLOpOverrides class."""
import torch
from torch._inductor.codegen.common import CSEVariable
from torch._inductor.codegen.cutedsl.cutedsl_op_overrides import (
CuteDSLOpOverrides,
)
from torch.utils._sympy.value_ranges import ValueRanges
mock_cse_a = MagicMock(spec=CSEVariable)
mock_cse_a.__str__.return_value = "tensor_a"
mock_cse_a.dtype = torch.float32
mock_cse_a.bounds = ValueRanges.unknown()
mock_cse_b = MagicMock(spec=CSEVariable)
mock_cse_b.__str__.return_value = "tensor_b"
mock_cse_b.dtype = torch.float32
mock_cse_b.bounds = ValueRanges.unknown()
mock_graph = MockGraphHandler()
with V.set_graph_handler(mock_graph):
kernel = CuteDSLTemplateKernel(
kernel_name="test_ops",
input_nodes=[],
output_node=None,
)
with V.set_kernel_handler(kernel):
result = CuteDSLOpOverrides.add(mock_cse_a, mock_cse_b)
self.assertIsInstance(result, CSEVariable)
result = CuteDSLOpOverrides.mul(mock_cse_a, mock_cse_b)
self.assertIsInstance(result, CSEVariable)
result = CuteDSLOpOverrides.truediv(mock_cse_a, mock_cse_b)
self.assertIsInstance(result, CSEVariable)
result = CuteDSLOpOverrides.exp(mock_cse_a)
self.assertIsInstance(result, CSEVariable)
result = CuteDSLOpOverrides.sqrt(mock_cse_a)
self.assertIsInstance(result, CSEVariable)
with self.assertRaises(NotImplementedError):
result = CuteDSLOpOverrides.maximum(mock_cse_a, mock_cse_b)
result = CuteDSLOpOverrides.minimum(mock_cse_a, mock_cse_b)
scalar_result = CuteDSLOpOverrides._ensure_tensor_ssa("5.0", mock_cse_a)
self.assertEqual(scalar_result, "cute.full_like(tensor_a, 5.0)")
tensor_result = CuteDSLOpOverrides._ensure_tensor_ssa(mock_cse_a, mock_cse_b)
self.assertEqual(tensor_result, "tensor_a")
def test_cse_integration(self):
"""Test CSE (Common Subexpression Elimination) integration."""
from torch._inductor.codegen.common import CSE
mock_graph = MockGraphHandler()
with V.set_graph_handler(mock_graph):
kernel = CuteDSLTemplateKernel(
kernel_name="test_cse",
input_nodes=[],
output_node=None,
)
self.assertIsInstance(kernel.cse, CSE)
self.assertEqual(kernel.cse.name_prefix, "tmp")
with V.set_kernel_handler(kernel):
test_expr = "x"
var = kernel.cse.generate(kernel.body, test_expr, dtype=None)
self.assertTrue(str(var).startswith("tmp"))
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
| TestCuteDSLTemplate |
python | ansible__ansible | lib/ansible/module_utils/facts/system/loadavg.py | {
"start": 256,
"end": 740
} | class ____(BaseFactCollector):
name = 'loadavg'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
facts = {}
try:
# (0.58, 0.82, 0.98)
loadavg = os.getloadavg()
facts['loadavg'] = {
'1m': loadavg[0],
'5m': loadavg[1],
'15m': loadavg[2]
}
except OSError:
pass
return facts
| LoadAvgFactCollector |
python | apache__airflow | airflow-e2e-tests/tests/airflow_e2e_tests/basic_tests/test_basic_dag_operations.py | {
"start": 946,
"end": 1833
} | class ____:
"""Test basic DAG functionality using the Airflow REST API."""
airflow_client = AirflowClient()
def test_dag_unpause(self):
self.airflow_client.un_pause_dag(
"example_xcom_test",
)
def test_xcom_value(self):
resp = self.airflow_client.trigger_dag(
"example_xcom_test", json={"logical_date": datetime.now(timezone.utc).isoformat()}
)
self.airflow_client.wait_for_dag_run(
dag_id="example_xcom_test",
run_id=resp["dag_run_id"],
)
xcom_value_resp = self.airflow_client.get_xcom_value(
dag_id="example_xcom_test",
task_id="bash_push",
key="manually_pushed_value",
run_id=resp["dag_run_id"],
)
assert xcom_value_resp["value"] == "manually_pushed_value", xcom_value_resp
| TestBasicDagFunctionality |
python | django__django | tests/apps/tests.py | {
"start": 1210,
"end": 15460
} | class ____(SimpleTestCase):
def test_singleton_main(self):
"""
Only one main registry can exist.
"""
with self.assertRaises(RuntimeError):
Apps(installed_apps=None)
def test_ready(self):
"""
Tests the ready property of the main registry.
"""
# The main app registry is always ready when the tests run.
self.assertIs(apps.ready, True)
# Non-main app registries are populated in __init__.
self.assertIs(Apps().ready, True)
# The condition is set when apps are ready
self.assertIs(apps.ready_event.is_set(), True)
self.assertIs(Apps().ready_event.is_set(), True)
def test_bad_app_config(self):
"""
Tests when INSTALLED_APPS contains an incorrect app config.
"""
msg = "'apps.apps.BadConfig' must supply a name attribute."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.BadConfig"]):
pass
def test_not_an_app_config(self):
"""
Tests when INSTALLED_APPS contains a class that isn't an app config.
"""
msg = "'apps.apps.NotAConfig' isn't a subclass of AppConfig."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NotAConfig"]):
pass
def test_no_such_app(self):
"""
Tests when INSTALLED_APPS contains an app that doesn't exist, either
directly or via an app config.
"""
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=["there is no such app"]):
pass
msg = (
"Cannot import 'there is no such app'. Check that "
"'apps.apps.NoSuchApp.name' is correct."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NoSuchApp"]):
pass
def test_no_such_app_config(self):
msg = "Module 'apps' does not contain a 'NoSuchConfig' class."
with self.assertRaisesMessage(ImportError, msg):
with self.settings(INSTALLED_APPS=["apps.NoSuchConfig"]):
pass
def test_no_such_app_config_with_choices(self):
msg = (
"Module 'apps.apps' does not contain a 'NoSuchConfig' class. "
"Choices are: 'BadConfig', 'ModelPKAppsConfig', 'MyAdmin', "
"'MyAuth', 'NoSuchApp', 'PlainAppsConfig', 'RelabeledAppsConfig'."
)
with self.assertRaisesMessage(ImportError, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NoSuchConfig"]):
pass
def test_no_config_app(self):
"""Load an app that doesn't provide an AppConfig class."""
with self.settings(INSTALLED_APPS=["apps.no_config_app"]):
config = apps.get_app_config("no_config_app")
self.assertIsInstance(config, AppConfig)
def test_one_config_app(self):
"""Load an app that provides an AppConfig class."""
with self.settings(INSTALLED_APPS=["apps.one_config_app"]):
config = apps.get_app_config("one_config_app")
self.assertIsInstance(config, OneConfig)
def test_two_configs_app(self):
"""Load an app that provides two AppConfig classes."""
with self.settings(INSTALLED_APPS=["apps.two_configs_app"]):
config = apps.get_app_config("two_configs_app")
self.assertIsInstance(config, AppConfig)
def test_two_default_configs_app(self):
"""Load an app that provides two default AppConfig classes."""
msg = (
"'apps.two_default_configs_app.apps' declares more than one "
"default AppConfig: 'TwoConfig', 'TwoConfigBis'."
)
with self.assertRaisesMessage(RuntimeError, msg):
with self.settings(INSTALLED_APPS=["apps.two_default_configs_app"]):
pass
def test_two_configs_one_default_app(self):
"""
Load an app that provides two AppConfig classes, one being the default.
"""
with self.settings(INSTALLED_APPS=["apps.two_configs_one_default_app"]):
config = apps.get_app_config("two_configs_one_default_app")
self.assertIsInstance(config, TwoConfig)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_configs(self):
"""
Tests apps.get_app_configs().
"""
app_configs = apps.get_app_configs()
self.assertEqual(
[app_config.name for app_config in app_configs], SOME_INSTALLED_APPS_NAMES
)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_config(self):
"""
Tests apps.get_app_config().
"""
app_config = apps.get_app_config("admin")
self.assertEqual(app_config.name, "django.contrib.admin")
app_config = apps.get_app_config("staticfiles")
self.assertEqual(app_config.name, "django.contrib.staticfiles")
with self.assertRaises(LookupError):
apps.get_app_config("admindocs")
msg = "No installed app with label 'django.contrib.auth'. Did you mean 'myauth'"
with self.assertRaisesMessage(LookupError, msg):
apps.get_app_config("django.contrib.auth")
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_is_installed(self):
"""
Tests apps.is_installed().
"""
self.assertIs(apps.is_installed("django.contrib.admin"), True)
self.assertIs(apps.is_installed("django.contrib.auth"), True)
self.assertIs(apps.is_installed("django.contrib.staticfiles"), True)
self.assertIs(apps.is_installed("django.contrib.admindocs"), False)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_model(self):
"""
Tests apps.get_model().
"""
self.assertEqual(apps.get_model("admin", "LogEntry"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("admin", "LogExit")
# App label is case-sensitive, Model name is case-insensitive.
self.assertEqual(apps.get_model("admin", "loGentrY"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("Admin", "LogEntry")
# A single argument is accepted.
self.assertEqual(apps.get_model("admin.LogEntry"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("admin.LogExit")
with self.assertRaises(ValueError):
apps.get_model("admin_LogEntry")
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_clear_cache(self):
# Set cache.
self.assertIsNone(apps.get_swappable_settings_name("admin.LogEntry"))
apps.get_models()
apps.clear_cache()
self.assertEqual(apps.get_swappable_settings_name.cache_info().currsize, 0)
self.assertEqual(apps.get_models.cache_info().currsize, 0)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_cached_properties_cleared_after_cache_clear(self):
opts = apps.get_model("admin", "LogEntry")._meta
cached_properties = [
name
for name, attr in models.options.Options.__dict__.items()
if isinstance(attr, cached_property)
]
# Access each cached property to populate the cache.
for attr_name in cached_properties:
getattr(opts, attr_name)
self.assertIn(attr_name, opts.__dict__)
apps.clear_cache()
for attr_name in cached_properties:
with self.subTest(property=attr_name):
self.assertNotIn(attr_name, opts.__dict__)
@override_settings(INSTALLED_APPS=["apps.apps.RelabeledAppsConfig"])
def test_relabeling(self):
self.assertEqual(apps.get_app_config("relabeled").name, "apps")
def test_duplicate_labels(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Application labels aren't unique"
):
with self.settings(INSTALLED_APPS=["apps.apps.PlainAppsConfig", "apps"]):
pass
def test_duplicate_names(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Application names aren't unique"
):
with self.settings(
INSTALLED_APPS=["apps.apps.RelabeledAppsConfig", "apps"]
):
pass
def test_import_exception_is_not_masked(self):
"""
App discovery should preserve stack traces. Regression test for #22920.
"""
with self.assertRaisesMessage(ImportError, "Oops"):
with self.settings(INSTALLED_APPS=["import_error_package"]):
pass
def test_models_py(self):
"""
The models in the models.py file were loaded correctly.
"""
self.assertEqual(apps.get_model("apps", "TotallyNormal"), TotallyNormal)
with self.assertRaises(LookupError):
apps.get_model("apps", "SoAlternative")
with self.assertRaises(LookupError):
new_apps.get_model("apps", "TotallyNormal")
self.assertEqual(new_apps.get_model("apps", "SoAlternative"), SoAlternative)
def test_models_not_loaded(self):
"""
apps.get_models() raises an exception if apps.models_ready isn't True.
"""
apps.models_ready = False
try:
# The cache must be cleared to trigger the exception.
apps.get_models.cache_clear()
with self.assertRaisesMessage(
AppRegistryNotReady, "Models aren't loaded yet."
):
apps.get_models()
finally:
apps.models_ready = True
def test_dynamic_load(self):
"""
Makes a new model at runtime and ensures it goes into the right place.
"""
old_models = list(apps.get_app_config("apps").get_models())
# Construct a new model in a new app registry
body = {}
new_apps = Apps(["apps"])
meta_contents = {
"app_label": "apps",
"apps": new_apps,
}
meta = type("Meta", (), meta_contents)
body["Meta"] = meta
body["__module__"] = TotallyNormal.__module__
temp_model = type("SouthPonies", (models.Model,), body)
# Make sure it appeared in the right place!
self.assertEqual(list(apps.get_app_config("apps").get_models()), old_models)
with self.assertRaises(LookupError):
apps.get_model("apps", "SouthPonies")
self.assertEqual(new_apps.get_model("apps", "SouthPonies"), temp_model)
def test_model_clash(self):
"""
Test for behavior when two models clash in the app registry.
"""
new_apps = Apps(["apps"])
meta_contents = {
"app_label": "apps",
"apps": new_apps,
}
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__
type("SouthPonies", (models.Model,), body)
# When __name__ and __module__ match we assume the module
# was reloaded and issue a warning. This use-case is
# useful for REPL. Refs #23621.
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__
msg = (
"Model 'apps.southponies' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models."
)
with self.assertRaisesMessage(RuntimeWarning, msg):
type("SouthPonies", (models.Model,), body)
# If it doesn't appear to be a reloaded module then we expect
# a RuntimeError.
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__ + ".whatever"
with self.assertRaisesMessage(
RuntimeError, "Conflicting 'southponies' models in application 'apps':"
):
type("SouthPonies", (models.Model,), body)
def test_get_containing_app_config_apps_not_ready(self):
"""
apps.get_containing_app_config() should raise an exception if
apps.apps_ready isn't True.
"""
apps.apps_ready = False
try:
with self.assertRaisesMessage(
AppRegistryNotReady, "Apps aren't loaded yet"
):
apps.get_containing_app_config("foo")
finally:
apps.apps_ready = True
@isolate_apps("apps", kwarg_name="apps")
def test_lazy_model_operation(self, apps):
"""
Tests apps.lazy_model_operation().
"""
model_classes = []
initial_pending = set(apps._pending_operations)
def test_func(*models):
model_classes[:] = models
class LazyA(models.Model):
pass
# Test models appearing twice, and models appearing consecutively
model_keys = [
("apps", model_name)
for model_name in ["lazya", "lazyb", "lazyb", "lazyc", "lazya"]
]
apps.lazy_model_operation(test_func, *model_keys)
# LazyModelA shouldn't be waited on since it's already registered,
# and LazyModelC shouldn't be waited on until LazyModelB exists.
self.assertEqual(
set(apps._pending_operations) - initial_pending, {("apps", "lazyb")}
)
# Multiple operations can wait on the same model
apps.lazy_model_operation(test_func, ("apps", "lazyb"))
class LazyB(models.Model):
pass
self.assertEqual(model_classes, [LazyB])
# Now we are just waiting on LazyModelC.
self.assertEqual(
set(apps._pending_operations) - initial_pending, {("apps", "lazyc")}
)
class LazyC(models.Model):
pass
# Everything should be loaded - make sure the callback was executed
# properly.
self.assertEqual(model_classes, [LazyA, LazyB, LazyB, LazyC, LazyA])
| AppsTests |
python | streamlit__streamlit | lib/streamlit/components/lib/local_component_registry.py | {
"start": 1069,
"end": 3016
} | class ____(BaseComponentRegistry):
def __init__(self) -> None:
self._components: dict[str, BaseCustomComponent] = {}
self._lock = threading.Lock()
def __repr__(self) -> str:
return util.repr_(self)
def register_component(self, component: BaseCustomComponent) -> None:
"""Register a CustomComponent.
Parameters
----------
component : BaseCustomComponent
The component to register.
"""
# Validate the component's path
abspath = component.abspath
if abspath is not None and not os.path.isdir(abspath):
raise StreamlitAPIException(f"No such component directory: '{abspath}'")
with self._lock:
existing = self._components.get(component.name)
self._components[component.name] = component
if existing is not None and component != existing:
_LOGGER.warning(
"%s overriding previously-registered %s",
component,
existing,
)
_LOGGER.debug("Registered component %s", component)
def get_component_path(self, name: str) -> str | None:
"""Return the filesystem path for the component with the given name.
If no such component is registered, or if the component exists but is
being served from a URL, return None instead.
"""
component = self._components.get(name, None)
return component.abspath if component is not None else None
def get_module_name(self, name: str) -> str | None:
component = self._components.get(name, None)
return component.module_name if component is not None else None
def get_component(self, name: str) -> BaseCustomComponent | None:
return self._components.get(name, None)
def get_components(self) -> list[BaseCustomComponent]:
return list(self._components.values())
| LocalComponentRegistry |
python | ipython__ipython | IPython/core/splitinput.py | {
"start": 2822,
"end": 5006
} | class ____:
"""A single line of input and associated info.
Includes the following as properties:
line
The original, raw line
continue_prompt
Is this line a continuation in a sequence of multiline input?
pre
Any leading whitespace.
esc
The escape character(s) in pre or the empty string if there isn't one.
Note that '!!' and '??' are possible values for esc. Otherwise it will
always be a single character.
ifun
The 'function part', which is basically the maximal initial sequence
of valid python identifiers and the '.' character. This is what is
checked for alias and magic transformations, used for auto-calling,
etc. In contrast to Python identifiers, it may start with "%" and contain
"*".
the_rest
Everything else on the line.
raw_the_rest
the_rest without whitespace stripped.
"""
def __init__(self, line, continue_prompt=False):
self.line = line
self.continue_prompt = continue_prompt
self.pre, self.esc, self.ifun, self.raw_the_rest = split_user_input(line)
self.the_rest = self.raw_the_rest.lstrip()
self.pre_char = self.pre.strip()
if self.pre_char:
self.pre_whitespace = '' # No whitespace allowed before esc chars
else:
self.pre_whitespace = self.pre
def ofind(self, ip) -> OInfo:
"""Do a full, attribute-walking lookup of the ifun in the various
namespaces for the given IPython InteractiveShell instance.
Return a dict with keys: {found, obj, ospace, ismagic}
Note: can cause state changes because of calling getattr, but should
only be run if autocall is on and if the line hasn't matched any
other, less dangerous handlers.
Does cache the results of the call, so can be called multiple times
without worrying about *further* damaging state.
"""
return ip._ofind(self.ifun)
def __str__(self):
return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
def __repr__(self):
return "<" + str(self) + ">"
| LineInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIn1.py | {
"start": 3287,
"end": 3322
} | class ____(TypedDict):
x: str
| TD1 |
python | getsentry__sentry | tests/sentry/models/test_apitoken.py | {
"start": 791,
"end": 7449
} | class ____(TestCase):
def test_is_expired(self) -> None:
token = ApiToken(expires_at=None)
assert not token.is_expired()
token = ApiToken(expires_at=timezone.now() + timedelta(days=1))
assert not token.is_expired()
token = ApiToken(expires_at=timezone.now() - timedelta(days=1))
assert token.is_expired()
def test_get_scopes(self) -> None:
token = ApiToken(scopes=1)
assert token.get_scopes() == ["project:read"]
token = ApiToken(scopes=4, scope_list=["project:read"])
assert token.get_scopes() == ["project:read"]
token = ApiToken(scope_list=["project:read"])
assert token.get_scopes() == ["project:read"]
def test_enforces_scope_hierarchy(self) -> None:
user = self.create_user()
# Ensure hierarchy is enforced for all tokens
for scope in SENTRY_SCOPES:
token = ApiToken.objects.create(
user_id=user.id,
scope_list=[scope],
)
assert set(token.get_scopes()) == SENTRY_SCOPE_HIERARCHY_MAPPING[scope]
def test_organization_id_for_non_internal(self) -> None:
install = self.create_sentry_app_installation()
token = install.api_token
org_id = token.organization_id
with assume_test_silo_mode(SiloMode.REGION):
assert ApiTokenReplica.objects.get(apitoken_id=token.id).organization_id == org_id
with outbox_runner():
install.delete()
with assume_test_silo_mode(SiloMode.REGION):
assert ApiTokenReplica.objects.get(apitoken_id=token.id).organization_id is None
assert token.organization_id is None
def test_last_chars_are_set(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
assert token.token_last_characters == token.token[-4:]
def test_hash_exists_on_token(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
assert token.hashed_token is not None
assert token.hashed_refresh_token is not None
def test_hash_exists_on_user_token(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
assert token.hashed_token is not None
assert len(token.hashed_token) == 64 # sha256 hash
assert token.hashed_refresh_token is None # user auth tokens don't have refresh tokens
def test_plaintext_values_only_available_immediately_after_create(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
assert token.plaintext_token is not None
assert token.plaintext_refresh_token is not None
# we accessed the tokens above when we asserted it was not None
# accessing them again should throw an exception
with pytest.raises(PlaintextSecretAlreadyRead):
_ = token.plaintext_token
with pytest.raises(PlaintextSecretAlreadyRead):
_ = token.plaintext_refresh_token
def test_error_when_accessing_refresh_token_on_user_token(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
with pytest.raises(NotSupported):
assert token.plaintext_refresh_token is not None
def test_user_auth_token_refresh_raises_error(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
with pytest.raises(NotSupported):
token.refresh()
def test_user_auth_token_sha256_hash(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
expected_hash = hashlib.sha256(token.plaintext_token.encode()).hexdigest()
assert expected_hash == token.hashed_token
def test_hash_updated_when_calling_update(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
initial_expected_hash = hashlib.sha256(token.plaintext_token.encode()).hexdigest()
assert initial_expected_hash == token.hashed_token
new_token = "abc1234"
new_token_expected_hash = hashlib.sha256(new_token.encode()).hexdigest()
with assume_test_silo_mode(SiloMode.CONTROL):
with outbox_runner():
token.update(token=new_token)
token.refresh_from_db()
assert token.token_last_characters == "1234"
assert token.hashed_token == new_token_expected_hash
def test_default_string_serialization(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
assert f"{token} is cool" == f"token_id={token.id} is cool"
def test_replica_string_serialization(self) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id)
with assume_test_silo_mode(SiloMode.REGION):
replica = ApiTokenReplica.objects.get(apitoken_id=token.id)
assert (
f"{replica} is swug"
== f"replica_token_id={replica.id}, token_id={token.id} is swug"
)
def test_delete_token_removes_replica(self) -> None:
user = self.create_user()
with outbox_runner():
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
token.save()
# Verify replica exists
with assume_test_silo_mode(SiloMode.REGION):
assert ApiTokenReplica.objects.filter(apitoken_id=token.id).exists()
# Delete token and verify replica is removed
with outbox_runner():
token.delete()
with assume_test_silo_mode(SiloMode.REGION):
assert not ApiTokenReplica.objects.filter(apitoken_id=token.id).exists()
@mock.patch(
"sentry.hybridcloud.services.replica.region_replica_service.delete_replicated_api_token"
)
def test_handle_async_deletion_called(self, mock_delete_replica: mock.MagicMock) -> None:
user = self.create_user()
token = ApiToken.objects.create(user_id=user.id, token_type=AuthTokenType.USER)
token_id = token.id
# Delete token and verify handle_async_deletion was called
with outbox_runner():
token.delete()
mock_delete_replica.assert_called_once_with(
apitoken_id=token_id,
region_name=mock.ANY,
)
@control_silo_test
| ApiTokenTest |
python | sympy__sympy | sympy/printing/tests/test_pycode.py | {
"start": 11335,
"end": 19525
} | class ____(Expr):
def _numpycode(self, printer):
return 'numpy'
def _mpmathcode(self, printer):
return 'mpmath'
def test_printmethod():
obj = CustomPrintedObject()
assert NumPyPrinter().doprint(obj) == 'numpy'
assert MpmathPrinter().doprint(obj) == 'mpmath'
def test_codegen_ast_nodes():
assert pycode(none) == 'None'
def test_issue_14283():
prntr = PythonCodePrinter()
assert prntr.doprint(zoo) == "math.nan"
assert prntr.doprint(-oo) == "float('-inf')"
def test_NumPyPrinter_print_seq():
n = NumPyPrinter()
assert n._print_seq(range(2)) == '(0, 1,)'
def test_issue_16535_16536():
from sympy.functions.special.gamma_functions import (lowergamma, uppergamma)
a = symbols('a')
expr1 = lowergamma(a, x)
expr2 = uppergamma(a, x)
prntr = SciPyPrinter()
assert prntr.doprint(expr1) == 'scipy.special.gamma(a)*scipy.special.gammainc(a, x)'
assert prntr.doprint(expr2) == 'scipy.special.gamma(a)*scipy.special.gammaincc(a, x)'
p_numpy = NumPyPrinter()
p_pycode = PythonCodePrinter({'strict': False})
for expr in [expr1, expr2]:
with raises(NotImplementedError):
p_numpy.doprint(expr1)
assert "Not supported" in p_pycode.doprint(expr)
def test_Integral():
from sympy.functions.elementary.exponential import exp
from sympy.integrals.integrals import Integral
single = Integral(exp(-x), (x, 0, oo))
double = Integral(x**2*exp(x*y), (x, -z, z), (y, 0, z))
indefinite = Integral(x**2, x)
evaluateat = Integral(x**2, (x, 1))
prntr = SciPyPrinter()
assert prntr.doprint(single) == 'scipy.integrate.quad(lambda x: numpy.exp(-x), 0, numpy.inf)[0]'
assert prntr.doprint(double) == 'scipy.integrate.nquad(lambda x, y: x**2*numpy.exp(x*y), ((-z, z), (0, z)))[0]'
raises(NotImplementedError, lambda: prntr.doprint(indefinite))
raises(NotImplementedError, lambda: prntr.doprint(evaluateat))
prntr = MpmathPrinter()
assert prntr.doprint(single) == 'mpmath.quad(lambda x: mpmath.exp(-x), (0, mpmath.inf))'
assert prntr.doprint(double) == 'mpmath.quad(lambda x, y: x**2*mpmath.exp(x*y), (-z, z), (0, z))'
raises(NotImplementedError, lambda: prntr.doprint(indefinite))
raises(NotImplementedError, lambda: prntr.doprint(evaluateat))
def test_fresnel_integrals():
from sympy.functions.special.error_functions import (fresnelc, fresnels)
expr1 = fresnelc(x)
expr2 = fresnels(x)
prntr = SciPyPrinter()
assert prntr.doprint(expr1) == 'scipy.special.fresnel(x)[1]'
assert prntr.doprint(expr2) == 'scipy.special.fresnel(x)[0]'
p_numpy = NumPyPrinter()
p_pycode = PythonCodePrinter()
p_mpmath = MpmathPrinter()
for expr in [expr1, expr2]:
with raises(NotImplementedError):
p_numpy.doprint(expr)
with raises(NotImplementedError):
p_pycode.doprint(expr)
assert p_mpmath.doprint(expr1) == 'mpmath.fresnelc(x)'
assert p_mpmath.doprint(expr2) == 'mpmath.fresnels(x)'
def test_beta():
from sympy.functions.special.beta_functions import beta
expr = beta(x, y)
prntr = SciPyPrinter()
assert prntr.doprint(expr) == 'scipy.special.beta(x, y)'
prntr = NumPyPrinter()
assert prntr.doprint(expr) == '(math.gamma(x)*math.gamma(y)/math.gamma(x + y))'
prntr = PythonCodePrinter()
assert prntr.doprint(expr) == '(math.gamma(x)*math.gamma(y)/math.gamma(x + y))'
prntr = PythonCodePrinter({'allow_unknown_functions': True})
assert prntr.doprint(expr) == '(math.gamma(x)*math.gamma(y)/math.gamma(x + y))'
prntr = MpmathPrinter()
assert prntr.doprint(expr) == 'mpmath.beta(x, y)'
def test_airy():
from sympy.functions.special.bessel import (airyai, airybi)
expr1 = airyai(x)
expr2 = airybi(x)
prntr = SciPyPrinter()
assert prntr.doprint(expr1) == 'scipy.special.airy(x)[0]'
assert prntr.doprint(expr2) == 'scipy.special.airy(x)[2]'
prntr = NumPyPrinter({'strict': False})
assert "Not supported" in prntr.doprint(expr1)
assert "Not supported" in prntr.doprint(expr2)
prntr = PythonCodePrinter({'strict': False})
assert "Not supported" in prntr.doprint(expr1)
assert "Not supported" in prntr.doprint(expr2)
def test_airy_prime():
from sympy.functions.special.bessel import (airyaiprime, airybiprime)
expr1 = airyaiprime(x)
expr2 = airybiprime(x)
prntr = SciPyPrinter()
assert prntr.doprint(expr1) == 'scipy.special.airy(x)[1]'
assert prntr.doprint(expr2) == 'scipy.special.airy(x)[3]'
prntr = NumPyPrinter({'strict': False})
assert "Not supported" in prntr.doprint(expr1)
assert "Not supported" in prntr.doprint(expr2)
prntr = PythonCodePrinter({'strict': False})
assert "Not supported" in prntr.doprint(expr1)
assert "Not supported" in prntr.doprint(expr2)
def test_numerical_accuracy_functions():
prntr = SciPyPrinter()
assert prntr.doprint(expm1(x)) == 'numpy.expm1(x)'
assert prntr.doprint(log1p(x)) == 'numpy.log1p(x)'
assert prntr.doprint(cosm1(x)) == 'scipy.special.cosm1(x)'
def test_array_printer():
A = ArraySymbol('A', (4,4,6,6,6))
I = IndexedBase('I')
i,j,k = Idx('i', (0,1)), Idx('j', (2,3)), Idx('k', (4,5))
prntr = NumPyPrinter()
assert prntr.doprint(ZeroArray(5)) == 'numpy.zeros((5,))'
assert prntr.doprint(OneArray(5)) == 'numpy.ones((5,))'
assert prntr.doprint(ArrayContraction(A, [2,3])) == 'numpy.einsum("abccd->abd", A)'
assert prntr.doprint(I) == 'I'
assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'numpy.einsum("abccc->abc", A)'
assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'numpy.einsum("aabbc->cab", A)'
assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'numpy.einsum("abcde->abe", A)'
assert prntr.doprint(Assignment(I[i,j,k], I[i,j,k])) == 'I = I'
prntr = TensorflowPrinter()
assert prntr.doprint(ZeroArray(5)) == 'tensorflow.zeros((5,))'
assert prntr.doprint(OneArray(5)) == 'tensorflow.ones((5,))'
assert prntr.doprint(ArrayContraction(A, [2,3])) == 'tensorflow.linalg.einsum("abccd->abd", A)'
assert prntr.doprint(I) == 'I'
assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'tensorflow.linalg.einsum("abccc->abc", A)'
assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'tensorflow.linalg.einsum("aabbc->cab", A)'
assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'tensorflow.linalg.einsum("abcde->abe", A)'
assert prntr.doprint(Assignment(I[i,j,k], I[i,j,k])) == 'I = I'
def test_custom_Derivative_methods():
class MyPrinter(SciPyPrinter):
def _print_Derivative_cosm1(self, args, seq_orders):
arg, = args
order, = seq_orders
return 'my_custom_cosm1(%s, deriv_order=%d)' % (self._print(arg), order)
def _print_Derivative_atan2(self, args, seq_orders):
arg1, arg2 = args
ord1, ord2 = seq_orders
return 'my_custom_atan2(%s, %s, deriv1=%d, deriv2=%d)' % (
self._print(arg1), self._print(arg2), ord1, ord2
)
p = MyPrinter()
cosm1_1 = cosm1(x).diff(x, evaluate=False)
assert p.doprint(cosm1_1) == 'my_custom_cosm1(x, deriv_order=1)'
atan2_2_3 = atan2(x, y).diff(x, 2, y, 3, evaluate=False)
assert p.doprint(atan2_2_3) == 'my_custom_atan2(x, y, deriv1=2, deriv2=3)'
try:
p.doprint(expm1(x).diff(x, evaluate=False))
except PrintMethodNotImplementedError as e:
assert '_print_Derivative_expm1' in repr(e)
else:
assert False # should have thrown
try:
p.doprint(Derivative(cosm1(x**2),x))
except ValueError as e:
assert '_print_Derivative(' in repr(e)
else:
assert False # should have thrown
def test_piecewise_assign_to():
x, a, b, c = symbols('x a b c')
pyprinter = PythonCodePrinter()
symprinter = SymPyPrinter()
expr = Piecewise((a + b, c), (0, True))
pyprint = pyprinter.doprint(expr, assign_to=x)
symprint = symprinter.doprint(expr, assign_to=x)
assert pyprint == 'x = ((a + b) if c else (0))'
assert symprint == 'x = ((a + b) if c else (0))'
| CustomPrintedObject |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 11338,
"end": 11458
} | class ____:
def process_spider_output(self, response, result):
return
| ProcessSpiderOutputNonIterableMiddleware |
python | allegroai__clearml | clearml/backend_api/services/v2_9/workers.py | {
"start": 58040,
"end": 60187
} | class ____(Response):
"""
Response of workers.get_metric_keys endpoint.
:param categories: List of unique metric categories found in the statistics of
the requested workers.
:type categories: Sequence[MetricsCategory]
"""
_service = "workers"
_action = "get_metric_keys"
_version = "2.9"
_schema = {
"definitions": {
"metrics_category": {
"properties": {
"metric_keys": {
"description": "The names of the metrics in the category.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Name of the metrics category.",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"categories": {
"description": "List of unique metric categories found in the statistics of the requested workers.",
"items": {"$ref": "#/definitions/metrics_category"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, categories: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetMetricKeysResponse, self).__init__(**kwargs)
self.categories = categories
@schema_property("categories")
def categories(self) -> Optional[List[Any]]:
return self._property_categories
@categories.setter
def categories(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_categories = None
return
self.assert_isinstance(value, "categories", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricsCategory.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "categories", MetricsCategory, is_array=True)
self._property_categories = value
| GetMetricKeysResponse |
python | aio-libs__aiohttp | aiohttp/connector.py | {
"start": 60107,
"end": 62645
} | class ____(BaseConnector):
"""Named pipe connector.
Only supported by the proactor event loop.
See also: https://docs.python.org/3/library/asyncio-eventloop.html
path - Windows named pipe path.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
loop - Optional event loop.
"""
allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"npipe"})
def __init__(
self,
path: str,
force_close: bool = False,
keepalive_timeout: _SENTINEL | float | None = sentinel,
limit: int = 100,
limit_per_host: int = 0,
) -> None:
super().__init__(
force_close=force_close,
keepalive_timeout=keepalive_timeout,
limit=limit,
limit_per_host=limit_per_host,
)
if not isinstance(
self._loop,
asyncio.ProactorEventLoop, # type: ignore[attr-defined]
):
raise RuntimeError(
"Named Pipes only available in proactor loop under windows"
)
self._path = path
@property
def path(self) -> str:
"""Path to the named pipe."""
return self._path
async def _create_connection(
self, req: ClientRequest, traces: list["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
try:
async with ceil_timeout(
timeout.sock_connect, ceil_threshold=timeout.ceil_threshold
):
_, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined]
self._factory, self._path
)
# the drain is required so that the connection_made is called
# and transport is set otherwise it is not set before the
# `assert conn.transport is not None`
# in client.py's _request method
await asyncio.sleep(0)
# other option is to manually set transport like
# `proto.transport = trans`
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise ClientConnectorError(req.connection_key, exc) from exc
return cast(ResponseHandler, proto)
| NamedPipeConnector |
python | networkx__networkx | networkx/algorithms/tests/test_reciprocity.py | {
"start": 39,
"end": 1296
} | class ____:
# test overall reciprocity by passing whole graph
def test_reciprocity_digraph(self):
DG = nx.DiGraph([(1, 2), (2, 1)])
reciprocity = nx.reciprocity(DG)
assert reciprocity == 1.0
# test empty graph's overall reciprocity which will throw an error
def test_overall_reciprocity_empty_graph(self):
with pytest.raises(nx.NetworkXError):
DG = nx.DiGraph()
nx.overall_reciprocity(DG)
# test for reciprocity for a list of nodes
def test_reciprocity_graph_nodes(self):
DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)])
reciprocity = nx.reciprocity(DG, [1, 2])
expected_reciprocity = {1: 0.0, 2: 0.6666666666666666}
assert reciprocity == expected_reciprocity
# test for reciprocity for a single node
def test_reciprocity_graph_node(self):
DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)])
reciprocity = nx.reciprocity(DG, 2)
assert reciprocity == 0.6666666666666666
# test for reciprocity for an isolated node
def test_reciprocity_graph_isolated_nodes(self):
with pytest.raises(nx.NetworkXError):
DG = nx.DiGraph([(1, 2)])
DG.add_node(4)
nx.reciprocity(DG, 4)
| TestReciprocity |
python | openai__openai-python | src/openai/resources/completions.py | {
"start": 29536,
"end": 58207
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCompletionsWithStreamingResponse(self)
@overload
async def create(
self,
*,
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None],
best_of: Optional[int] | Omit = omit,
echo: Optional[bool] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
n: Optional[int] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
seed: Optional[int] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
suffix: Optional[str] | Omit = omit,
temperature: Optional[float] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion:
"""
Creates a completion for the provided prompt and parameters.
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
Note that <|endoftext|> is the document separator that the model sees during
training, so if a prompt is not specified the model will generate as if from the
beginning of a new document.
best_of: Generates `best_of` completions server-side and returns the "best" (the one with
the highest log probability per token). Results cannot be streamed.
When used with `n`, `best_of` controls the number of candidate completions and
`n` specifies how many to return – `best_of` must be greater than `n`.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
echo: Echo back the prompt in addition to the completion
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
Mathematically, the bias is added to the logits generated by the model prior to
sampling. The exact effect will vary per model, but values between -1 and 1
should decrease or increase likelihood of selection; values like -100 or 100
should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
list of the 5 most likely tokens. The API will always return the `logprob` of
the sampled token, so there may be up to `logprobs+1` elements in the response.
The maximum value for `logprobs` is 5.
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
completion.
The token count of your prompt plus `max_tokens` cannot exceed the model's
context length.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
n: How many completions to generate for each prompt.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
response parameter to monitor changes in the backend.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
stream: Whether to stream back partial progress. If set, tokens will be sent as
data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
stream_options: Options for streaming response. Only set this when you set `stream: true`.
suffix: The suffix that comes after a completion of inserted text.
This parameter is only supported for `gpt-3.5-turbo-instruct`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
We generally recommend altering this or `top_p` but not both.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None],
stream: Literal[True],
best_of: Optional[int] | Omit = omit,
echo: Optional[bool] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
n: Optional[int] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
seed: Optional[int] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
suffix: Optional[str] | Omit = omit,
temperature: Optional[float] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[Completion]:
"""
Creates a completion for the provided prompt and parameters.
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
Note that <|endoftext|> is the document separator that the model sees during
training, so if a prompt is not specified the model will generate as if from the
beginning of a new document.
stream: Whether to stream back partial progress. If set, tokens will be sent as
data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
best_of: Generates `best_of` completions server-side and returns the "best" (the one with
the highest log probability per token). Results cannot be streamed.
When used with `n`, `best_of` controls the number of candidate completions and
`n` specifies how many to return – `best_of` must be greater than `n`.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
echo: Echo back the prompt in addition to the completion
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
Mathematically, the bias is added to the logits generated by the model prior to
sampling. The exact effect will vary per model, but values between -1 and 1
should decrease or increase likelihood of selection; values like -100 or 100
should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
list of the 5 most likely tokens. The API will always return the `logprob` of
the sampled token, so there may be up to `logprobs+1` elements in the response.
The maximum value for `logprobs` is 5.
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
completion.
The token count of your prompt plus `max_tokens` cannot exceed the model's
context length.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
n: How many completions to generate for each prompt.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
response parameter to monitor changes in the backend.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
suffix: The suffix that comes after a completion of inserted text.
This parameter is only supported for `gpt-3.5-turbo-instruct`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
We generally recommend altering this or `top_p` but not both.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None],
stream: bool,
best_of: Optional[int] | Omit = omit,
echo: Optional[bool] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
n: Optional[int] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
seed: Optional[int] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
suffix: Optional[str] | Omit = omit,
temperature: Optional[float] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion | AsyncStream[Completion]:
"""
Creates a completion for the provided prompt and parameters.
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
Note that <|endoftext|> is the document separator that the model sees during
training, so if a prompt is not specified the model will generate as if from the
beginning of a new document.
stream: Whether to stream back partial progress. If set, tokens will be sent as
data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
best_of: Generates `best_of` completions server-side and returns the "best" (the one with
the highest log probability per token). Results cannot be streamed.
When used with `n`, `best_of` controls the number of candidate completions and
`n` specifies how many to return – `best_of` must be greater than `n`.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
echo: Echo back the prompt in addition to the completion
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
Mathematically, the bias is added to the logits generated by the model prior to
sampling. The exact effect will vary per model, but values between -1 and 1
should decrease or increase likelihood of selection; values like -100 or 100
should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
list of the 5 most likely tokens. The API will always return the `logprob` of
the sampled token, so there may be up to `logprobs+1` elements in the response.
The maximum value for `logprobs` is 5.
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
completion.
The token count of your prompt plus `max_tokens` cannot exceed the model's
context length.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
n: How many completions to generate for each prompt.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
response parameter to monitor changes in the backend.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
suffix: The suffix that comes after a completion of inserted text.
This parameter is only supported for `gpt-3.5-turbo-instruct`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
We generally recommend altering this or `top_p` but not both.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["model", "prompt"], ["model", "prompt", "stream"])
async def create(
self,
*,
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None],
best_of: Optional[int] | Omit = omit,
echo: Optional[bool] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
n: Optional[int] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
seed: Optional[int] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
suffix: Optional[str] | Omit = omit,
temperature: Optional[float] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Completion | AsyncStream[Completion]:
return await self._post(
"/completions",
body=await async_maybe_transform(
{
"model": model,
"prompt": prompt,
"best_of": best_of,
"echo": echo,
"frequency_penalty": frequency_penalty,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_tokens": max_tokens,
"n": n,
"presence_penalty": presence_penalty,
"seed": seed,
"stop": stop,
"stream": stream,
"stream_options": stream_options,
"suffix": suffix,
"temperature": temperature,
"top_p": top_p,
"user": user,
},
completion_create_params.CompletionCreateParamsStreaming
if stream
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Completion,
stream=stream or False,
stream_cls=AsyncStream[Completion],
)
| AsyncCompletions |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 36732,
"end": 38987
} | class ____(TypedDict, total=False):
type: Required[Literal['time']]
strict: bool
le: time
ge: time
lt: time
gt: time
tz_constraint: Union[Literal['aware', 'naive'], int]
microseconds_precision: Literal['truncate', 'error']
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def time_schema(
*,
strict: bool | None = None,
le: time | None = None,
ge: time | None = None,
lt: time | None = None,
gt: time | None = None,
tz_constraint: Literal['aware', 'naive'] | int | None = None,
microseconds_precision: Literal['truncate', 'error'] = 'truncate',
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> TimeSchema:
"""
Returns a schema that matches a time value, e.g.:
```py
from datetime import time
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.time_schema(le=time(12, 0, 0), ge=time(6, 0, 0))
v = SchemaValidator(schema)
assert v.validate_python(time(9, 0, 0)) == time(9, 0, 0)
```
Args:
strict: Whether the value should be a time or a value that can be converted to a time
le: The value must be less than or equal to this time
ge: The value must be greater than or equal to this time
lt: The value must be strictly less than this time
gt: The value must be strictly greater than this time
tz_constraint: The value must be timezone aware or naive, or an int to indicate required tz offset
microseconds_precision: The behavior when seconds have more than 6 digits or microseconds is too large
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='time',
strict=strict,
le=le,
ge=ge,
lt=lt,
gt=gt,
tz_constraint=tz_constraint,
microseconds_precision=microseconds_precision,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| TimeSchema |
python | spack__spack | lib/spack/spack/compilers/adaptor.py | {
"start": 6583,
"end": 7150
} | class ____(lang.DeprecatedProperty):
def __init__(self) -> None:
super().__init__(name="compiler")
def factory(self, instance, owner) -> CompilerAdaptor:
spec = instance.spec
if not spec.concrete:
raise ValueError("Can only get a compiler for a concrete package.")
compilers = {}
for language in Languages:
deps = spec.dependencies(virtuals=[language.value])
if deps:
compilers[language] = deps[0]
return CompilerAdaptor(instance, compilers)
| DeprecatedCompiler |
python | doocs__leetcode | solution/2500-2599/2536.Increment Submatrices by One/Solution.py | {
"start": 0,
"end": 716
} | class ____:
def rangeAddQueries(self, n: int, queries: List[List[int]]) -> List[List[int]]:
mat = [[0] * n for _ in range(n)]
for x1, y1, x2, y2 in queries:
mat[x1][y1] += 1
if x2 + 1 < n:
mat[x2 + 1][y1] -= 1
if y2 + 1 < n:
mat[x1][y2 + 1] -= 1
if x2 + 1 < n and y2 + 1 < n:
mat[x2 + 1][y2 + 1] += 1
for i in range(n):
for j in range(n):
if i:
mat[i][j] += mat[i - 1][j]
if j:
mat[i][j] += mat[i][j - 1]
if i and j:
mat[i][j] -= mat[i - 1][j - 1]
return mat
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/skill_create_params.py | {
"start": 381,
"end": 978
} | class ____(TypedDict, total=False):
display_title: Optional[str]
"""Display title for the skill.
This is a human-readable label that is not included in the prompt sent to the
model.
"""
files: Optional[SequenceNotStr[FileTypes]]
"""Files to upload for the skill.
All files must be in the same top-level directory and must include a SKILL.md
file at the root of that directory.
"""
betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")]
"""Optional header to specify the beta version(s) you want to use."""
| SkillCreateParams |
python | openai__gym | gym/vector/utils/misc.py | {
"start": 117,
"end": 1587
} | class ____:
"""Wrapper that uses cloudpickle to pickle and unpickle the result."""
def __init__(self, fn: callable):
"""Cloudpickle wrapper for a function."""
self.fn = fn
def __getstate__(self):
"""Get the state using `cloudpickle.dumps(self.fn)`."""
import cloudpickle
return cloudpickle.dumps(self.fn)
def __setstate__(self, ob):
"""Sets the state with obs."""
import pickle
self.fn = pickle.loads(ob)
def __call__(self):
"""Calls the function `self.fn` with no arguments."""
return self.fn()
@contextlib.contextmanager
def clear_mpi_env_vars():
"""Clears the MPI of environment variables.
`from mpi4py import MPI` will call `MPI_Init` by default.
If the child process has MPI environment variables, MPI will think that the child process
is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables
temporarily such as when we are starting multiprocessing Processes.
Yields:
Yields for the context manager
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ["OMPI_", "PMI_"]:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
| CloudpickleWrapper |
python | PrefectHQ__prefect | tests/cli/test_typer_utils.py | {
"start": 129,
"end": 2194
} | class ____:
singular_subcommand = PrefectTyper(name="singular-subcommand")
pluralized_subcommand = PrefectTyper(name="pluralized-subcommand")
app.add_typer(singular_subcommand)
app.add_typer(pluralized_subcommand, aliases=["pluralized-subcommands"])
def test_pluralized_subcommands_have_multiple_valid_invocations(self):
invoke_and_assert(["pluralized-subcommand", "--help"], expected_code=0)
invoke_and_assert(["pluralized-subcommands", "--help"], expected_code=0)
def test_unpluralized_subcommands_have_one_invocation(self):
invoke_and_assert(["singular-subcommand", "--help"], expected_code=0)
invoke_and_assert(["singular-subcommands", "--help"], expected_code=2)
app.add_typer(self.singular_subcommand, aliases=["singular-subcommands"])
invoke_and_assert(["singular-subcommands", "--help"], expected_code=0)
def test_registering_a_command_is_propogated_to_parents(self):
@self.pluralized_subcommand.command()
def exists():
print("hello")
invoke_and_assert(
["pluralized-subcommand", "exists"],
expected_output_contains="hello",
expected_code=0,
)
invoke_and_assert(
["pluralized-subcommands", "exists"],
expected_output_contains="hello",
expected_code=0,
)
def test_command_with_alias(self):
# Add a command with an alias
@self.pluralized_subcommand.command(aliases=["test-cmd-alias"])
def test_cmd():
print("Test Command Executed")
# Test invoking with the original command name
invoke_and_assert(
["pluralized-subcommand", "test-cmd"],
expected_output_contains="Test Command Executed",
expected_code=0,
)
# Test invoking with the alias
invoke_and_assert(
["pluralized-subcommands", "test-cmd-alias"],
expected_output_contains="Test Command Executed",
expected_code=0,
)
| TestPrefectTyper |
python | pandas-dev__pandas | pandas/tests/indexes/period/methods/test_factorize.py | {
"start": 82,
"end": 1425
} | class ____:
def test_factorize_period(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"],
freq="M",
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_factorize_period_nonmonotonic(self):
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"],
freq="M",
)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-03", "2014-02", "2014-01"], freq="M")
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
| TestFactorize |
python | catalyst-team__catalyst | catalyst/contrib/datasets/cifar.py | {
"start": 366,
"end": 1565
} | class ____(object):
def __init__(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return ["{}{}".format(head, lines[0])] + [
"{}{}".format(" " * len(head), line) for line in lines[1:]
]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(
self.target_transform, "Target transform: "
)
return "\n".join(body)
| StandardTransform |
python | psf__requests | tests/test_requests.py | {
"start": 1886,
"end": 81009
} | class ____:
digest_auth_algo = ("MD5", "SHA-256", "SHA-512")
def test_entry_points(self):
requests.session
requests.session().get
requests.session().head
requests.get
requests.head
requests.put
requests.patch
requests.post
# Not really an entry point, but people rely on it.
from requests.packages.urllib3.poolmanager import PoolManager # noqa:F401
@pytest.mark.parametrize(
"exception, url",
(
(MissingSchema, "hiwpefhipowhefopw"),
(InvalidSchema, "localhost:3128"),
(InvalidSchema, "localhost.localdomain:3128/"),
(InvalidSchema, "10.122.1.1:3128/"),
(InvalidURL, "http://"),
(InvalidURL, "http://*example.com"),
(InvalidURL, "http://.example.com"),
),
)
def test_invalid_url(self, exception, url):
with pytest.raises(exception):
requests.get(url)
def test_basic_building(self):
req = requests.Request()
req.url = "http://kennethreitz.org/"
req.data = {"life": "42"}
pr = req.prepare()
assert pr.url == req.url
assert pr.body == "life=42"
@pytest.mark.parametrize("method", ("GET", "HEAD"))
def test_no_content_length(self, httpbin, method):
req = requests.Request(method, httpbin(method.lower())).prepare()
assert "Content-Length" not in req.headers
@pytest.mark.parametrize("method", ("POST", "PUT", "PATCH", "OPTIONS"))
def test_no_body_content_length(self, httpbin, method):
req = requests.Request(method, httpbin(method.lower())).prepare()
assert req.headers["Content-Length"] == "0"
@pytest.mark.parametrize("method", ("POST", "PUT", "PATCH", "OPTIONS"))
def test_empty_content_length(self, httpbin, method):
req = requests.Request(method, httpbin(method.lower()), data="").prepare()
assert req.headers["Content-Length"] == "0"
def test_override_content_length(self, httpbin):
headers = {"Content-Length": "not zero"}
r = requests.Request("POST", httpbin("post"), headers=headers).prepare()
assert "Content-Length" in r.headers
assert r.headers["Content-Length"] == "not zero"
def test_path_is_not_double_encoded(self):
request = requests.Request("GET", "http://0.0.0.0/get/test case").prepare()
assert request.path_url == "/get/test%20case"
@pytest.mark.parametrize(
"url, expected",
(
(
"http://example.com/path#fragment",
"http://example.com/path?a=b#fragment",
),
(
"http://example.com/path?key=value#fragment",
"http://example.com/path?key=value&a=b#fragment",
),
),
)
def test_params_are_added_before_fragment(self, url, expected):
request = requests.Request("GET", url, params={"a": "b"}).prepare()
assert request.url == expected
def test_params_original_order_is_preserved_by_default(self):
param_ordered_dict = collections.OrderedDict(
(("z", 1), ("a", 1), ("k", 1), ("d", 1))
)
session = requests.Session()
request = requests.Request(
"GET", "http://example.com/", params=param_ordered_dict
)
prep = session.prepare_request(request)
assert prep.url == "http://example.com/?z=1&a=1&k=1&d=1"
def test_params_bytes_are_encoded(self):
request = requests.Request(
"GET", "http://example.com", params=b"test=foo"
).prepare()
assert request.url == "http://example.com/?test=foo"
def test_binary_put(self):
request = requests.Request(
"PUT", "http://example.com", data="ööö".encode()
).prepare()
assert isinstance(request.body, bytes)
def test_whitespaces_are_removed_from_url(self):
# Test for issue #3696
request = requests.Request("GET", " http://example.com").prepare()
assert request.url == "http://example.com/"
@pytest.mark.parametrize("scheme", ("http://", "HTTP://", "hTTp://", "HttP://"))
def test_mixed_case_scheme_acceptable(self, httpbin, scheme):
s = requests.Session()
s.proxies = getproxies()
parts = urlparse(httpbin("get"))
url = scheme + parts.netloc + parts.path
r = requests.Request("GET", url)
r = s.send(r.prepare())
assert r.status_code == 200, f"failed for scheme {scheme}"
def test_HTTP_200_OK_GET_ALTERNATIVE(self, httpbin):
r = requests.Request("GET", httpbin("get"))
s = requests.Session()
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_HTTP_302_ALLOW_REDIRECT_GET(self, httpbin):
r = requests.get(httpbin("redirect", "1"))
assert r.status_code == 200
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
def test_HTTP_307_ALLOW_REDIRECT_POST(self, httpbin):
r = requests.post(
httpbin("redirect-to"),
data="test",
params={"url": "post", "status_code": 307},
)
assert r.status_code == 200
assert r.history[0].status_code == 307
assert r.history[0].is_redirect
assert r.json()["data"] == "test"
def test_HTTP_307_ALLOW_REDIRECT_POST_WITH_SEEKABLE(self, httpbin):
byte_str = b"test"
r = requests.post(
httpbin("redirect-to"),
data=io.BytesIO(byte_str),
params={"url": "post", "status_code": 307},
)
assert r.status_code == 200
assert r.history[0].status_code == 307
assert r.history[0].is_redirect
assert r.json()["data"] == byte_str.decode("utf-8")
def test_HTTP_302_TOO_MANY_REDIRECTS(self, httpbin):
try:
requests.get(httpbin("relative-redirect", "50"))
except TooManyRedirects as e:
url = httpbin("relative-redirect", "20")
assert e.request.url == url
assert e.response.url == url
assert len(e.response.history) == 30
else:
pytest.fail("Expected redirect to raise TooManyRedirects but it did not")
def test_HTTP_302_TOO_MANY_REDIRECTS_WITH_PARAMS(self, httpbin):
s = requests.session()
s.max_redirects = 5
try:
s.get(httpbin("relative-redirect", "50"))
except TooManyRedirects as e:
url = httpbin("relative-redirect", "45")
assert e.request.url == url
assert e.response.url == url
assert len(e.response.history) == 5
else:
pytest.fail(
"Expected custom max number of redirects to be respected but was not"
)
def test_http_301_changes_post_to_get(self, httpbin):
r = requests.post(httpbin("status", "301"))
assert r.status_code == 200
assert r.request.method == "GET"
assert r.history[0].status_code == 301
assert r.history[0].is_redirect
def test_http_301_doesnt_change_head_to_get(self, httpbin):
r = requests.head(httpbin("status", "301"), allow_redirects=True)
print(r.content)
assert r.status_code == 200
assert r.request.method == "HEAD"
assert r.history[0].status_code == 301
assert r.history[0].is_redirect
def test_http_302_changes_post_to_get(self, httpbin):
r = requests.post(httpbin("status", "302"))
assert r.status_code == 200
assert r.request.method == "GET"
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
def test_http_302_doesnt_change_head_to_get(self, httpbin):
r = requests.head(httpbin("status", "302"), allow_redirects=True)
assert r.status_code == 200
assert r.request.method == "HEAD"
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
def test_http_303_changes_post_to_get(self, httpbin):
r = requests.post(httpbin("status", "303"))
assert r.status_code == 200
assert r.request.method == "GET"
assert r.history[0].status_code == 303
assert r.history[0].is_redirect
def test_http_303_doesnt_change_head_to_get(self, httpbin):
r = requests.head(httpbin("status", "303"), allow_redirects=True)
assert r.status_code == 200
assert r.request.method == "HEAD"
assert r.history[0].status_code == 303
assert r.history[0].is_redirect
def test_header_and_body_removal_on_redirect(self, httpbin):
purged_headers = ("Content-Length", "Content-Type")
ses = requests.Session()
req = requests.Request("POST", httpbin("post"), data={"test": "data"})
prep = ses.prepare_request(req)
resp = ses.send(prep)
# Mimic a redirect response
resp.status_code = 302
resp.headers["location"] = "get"
# Run request through resolve_redirects
next_resp = next(ses.resolve_redirects(resp, prep))
assert next_resp.request.body is None
for header in purged_headers:
assert header not in next_resp.request.headers
def test_transfer_enc_removal_on_redirect(self, httpbin):
purged_headers = ("Transfer-Encoding", "Content-Type")
ses = requests.Session()
req = requests.Request("POST", httpbin("post"), data=(b"x" for x in range(1)))
prep = ses.prepare_request(req)
assert "Transfer-Encoding" in prep.headers
# Create Response to avoid https://github.com/kevin1024/pytest-httpbin/issues/33
resp = requests.Response()
resp.raw = io.BytesIO(b"the content")
resp.request = prep
setattr(resp.raw, "release_conn", lambda *args: args)
# Mimic a redirect response
resp.status_code = 302
resp.headers["location"] = httpbin("get")
# Run request through resolve_redirect
next_resp = next(ses.resolve_redirects(resp, prep))
assert next_resp.request.body is None
for header in purged_headers:
assert header not in next_resp.request.headers
def test_fragment_maintained_on_redirect(self, httpbin):
fragment = "#view=edit&token=hunter2"
r = requests.get(httpbin("redirect-to?url=get") + fragment)
assert len(r.history) > 0
assert r.history[0].request.url == httpbin("redirect-to?url=get") + fragment
assert r.url == httpbin("get") + fragment
def test_HTTP_200_OK_GET_WITH_PARAMS(self, httpbin):
heads = {"User-agent": "Mozilla/5.0"}
r = requests.get(httpbin("user-agent"), headers=heads)
assert heads["User-agent"] in r.text
assert r.status_code == 200
def test_HTTP_200_OK_GET_WITH_MIXED_PARAMS(self, httpbin):
heads = {"User-agent": "Mozilla/5.0"}
r = requests.get(
httpbin("get") + "?test=true", params={"q": "test"}, headers=heads
)
assert r.status_code == 200
def test_set_cookie_on_301(self, httpbin):
s = requests.session()
url = httpbin("cookies/set?foo=bar")
s.get(url)
assert s.cookies["foo"] == "bar"
def test_cookie_sent_on_redirect(self, httpbin):
s = requests.session()
s.get(httpbin("cookies/set?foo=bar"))
r = s.get(httpbin("redirect/1")) # redirects to httpbin('get')
assert "Cookie" in r.json()["headers"]
def test_cookie_removed_on_expire(self, httpbin):
s = requests.session()
s.get(httpbin("cookies/set?foo=bar"))
assert s.cookies["foo"] == "bar"
s.get(
httpbin("response-headers"),
params={"Set-Cookie": "foo=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT"},
)
assert "foo" not in s.cookies
def test_cookie_quote_wrapped(self, httpbin):
s = requests.session()
s.get(httpbin('cookies/set?foo="bar:baz"'))
assert s.cookies["foo"] == '"bar:baz"'
def test_cookie_persists_via_api(self, httpbin):
s = requests.session()
r = s.get(httpbin("redirect/1"), cookies={"foo": "bar"})
assert "foo" in r.request.headers["Cookie"]
assert "foo" in r.history[0].request.headers["Cookie"]
def test_request_cookie_overrides_session_cookie(self, httpbin):
s = requests.session()
s.cookies["foo"] = "bar"
r = s.get(httpbin("cookies"), cookies={"foo": "baz"})
assert r.json()["cookies"]["foo"] == "baz"
# Session cookie should not be modified
assert s.cookies["foo"] == "bar"
def test_request_cookies_not_persisted(self, httpbin):
s = requests.session()
s.get(httpbin("cookies"), cookies={"foo": "baz"})
# Sending a request with cookies should not add cookies to the session
assert not s.cookies
def test_generic_cookiejar_works(self, httpbin):
cj = cookielib.CookieJar()
cookiejar_from_dict({"foo": "bar"}, cj)
s = requests.session()
s.cookies = cj
r = s.get(httpbin("cookies"))
# Make sure the cookie was sent
assert r.json()["cookies"]["foo"] == "bar"
# Make sure the session cj is still the custom one
assert s.cookies is cj
def test_param_cookiejar_works(self, httpbin):
cj = cookielib.CookieJar()
cookiejar_from_dict({"foo": "bar"}, cj)
s = requests.session()
r = s.get(httpbin("cookies"), cookies=cj)
# Make sure the cookie was sent
assert r.json()["cookies"]["foo"] == "bar"
def test_cookielib_cookiejar_on_redirect(self, httpbin):
"""Tests resolve_redirect doesn't fail when merging cookies
with non-RequestsCookieJar cookiejar.
See GH #3579
"""
cj = cookiejar_from_dict({"foo": "bar"}, cookielib.CookieJar())
s = requests.Session()
s.cookies = cookiejar_from_dict({"cookie": "tasty"})
# Prepare request without using Session
req = requests.Request("GET", httpbin("headers"), cookies=cj)
prep_req = req.prepare()
# Send request and simulate redirect
resp = s.send(prep_req)
resp.status_code = 302
resp.headers["location"] = httpbin("get")
redirects = s.resolve_redirects(resp, prep_req)
resp = next(redirects)
# Verify CookieJar isn't being converted to RequestsCookieJar
assert isinstance(prep_req._cookies, cookielib.CookieJar)
assert isinstance(resp.request._cookies, cookielib.CookieJar)
assert not isinstance(resp.request._cookies, requests.cookies.RequestsCookieJar)
cookies = {}
for c in resp.request._cookies:
cookies[c.name] = c.value
assert cookies["foo"] == "bar"
assert cookies["cookie"] == "tasty"
def test_requests_in_history_are_not_overridden(self, httpbin):
resp = requests.get(httpbin("redirect/3"))
urls = [r.url for r in resp.history]
req_urls = [r.request.url for r in resp.history]
assert urls == req_urls
def test_history_is_always_a_list(self, httpbin):
"""Show that even with redirects, Response.history is always a list."""
resp = requests.get(httpbin("get"))
assert isinstance(resp.history, list)
resp = requests.get(httpbin("redirect/1"))
assert isinstance(resp.history, list)
assert not isinstance(resp.history, tuple)
def test_headers_on_session_with_None_are_not_sent(self, httpbin):
"""Do not send headers in Session.headers with None values."""
ses = requests.Session()
ses.headers["Accept-Encoding"] = None
req = requests.Request("GET", httpbin("get"))
prep = ses.prepare_request(req)
assert "Accept-Encoding" not in prep.headers
def test_headers_preserve_order(self, httpbin):
"""Preserve order when headers provided as OrderedDict."""
ses = requests.Session()
ses.headers = collections.OrderedDict()
ses.headers["Accept-Encoding"] = "identity"
ses.headers["First"] = "1"
ses.headers["Second"] = "2"
headers = collections.OrderedDict([("Third", "3"), ("Fourth", "4")])
headers["Fifth"] = "5"
headers["Second"] = "222"
req = requests.Request("GET", httpbin("get"), headers=headers)
prep = ses.prepare_request(req)
items = list(prep.headers.items())
assert items[0] == ("Accept-Encoding", "identity")
assert items[1] == ("First", "1")
assert items[2] == ("Second", "222")
assert items[3] == ("Third", "3")
assert items[4] == ("Fourth", "4")
assert items[5] == ("Fifth", "5")
@pytest.mark.parametrize("key", ("User-agent", "user-agent"))
def test_user_agent_transfers(self, httpbin, key):
heads = {key: "Mozilla/5.0 (github.com/psf/requests)"}
r = requests.get(httpbin("user-agent"), headers=heads)
assert heads[key] in r.text
def test_HTTP_200_OK_HEAD(self, httpbin):
r = requests.head(httpbin("get"))
assert r.status_code == 200
def test_HTTP_200_OK_PUT(self, httpbin):
r = requests.put(httpbin("put"))
assert r.status_code == 200
def test_BASICAUTH_TUPLE_HTTP_200_OK_GET(self, httpbin):
auth = ("user", "pass")
url = httpbin("basic-auth", "user", "pass")
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 200
@pytest.mark.parametrize(
"username, password",
(
("user", "pass"),
("имя".encode(), "пароль".encode()),
(42, 42),
(None, None),
),
)
def test_set_basicauth(self, httpbin, username, password):
auth = (username, password)
url = httpbin("get")
r = requests.Request("GET", url, auth=auth)
p = r.prepare()
assert p.headers["Authorization"] == _basic_auth_str(username, password)
def test_basicauth_encodes_byte_strings(self):
"""Ensure b'test' formats as the byte string "test" rather
than the unicode string "b'test'" in Python 3.
"""
auth = (b"\xc5\xafsername", b"test\xc6\xb6")
r = requests.Request("GET", "http://localhost", auth=auth)
p = r.prepare()
assert p.headers["Authorization"] == "Basic xa9zZXJuYW1lOnRlc3TGtg=="
@pytest.mark.parametrize(
"url, exception",
(
# Connecting to an unknown domain should raise a ConnectionError
("http://doesnotexist.google.com", ConnectionError),
# Connecting to an invalid port should raise a ConnectionError
("http://localhost:1", ConnectionError),
# Inputing a URL that cannot be parsed should raise an InvalidURL error
("http://fe80::5054:ff:fe5a:fc0", InvalidURL),
),
)
def test_errors(self, url, exception):
with pytest.raises(exception):
requests.get(url, timeout=1)
def test_proxy_error(self):
# any proxy related error (address resolution, no route to host, etc) should result in a ProxyError
with pytest.raises(ProxyError):
requests.get(
"http://localhost:1", proxies={"http": "non-resolvable-address"}
)
def test_proxy_error_on_bad_url(self, httpbin, httpbin_secure):
with pytest.raises(InvalidProxyURL):
requests.get(httpbin_secure(), proxies={"https": "http:/badproxyurl:3128"})
with pytest.raises(InvalidProxyURL):
requests.get(httpbin(), proxies={"http": "http://:8080"})
with pytest.raises(InvalidProxyURL):
requests.get(httpbin_secure(), proxies={"https": "https://"})
with pytest.raises(InvalidProxyURL):
requests.get(httpbin(), proxies={"http": "http:///example.com:8080"})
def test_respect_proxy_env_on_send_self_prepared_request(self, httpbin):
with override_environ(http_proxy=INVALID_PROXY):
with pytest.raises(ProxyError):
session = requests.Session()
request = requests.Request("GET", httpbin())
session.send(request.prepare())
def test_respect_proxy_env_on_send_session_prepared_request(self, httpbin):
with override_environ(http_proxy=INVALID_PROXY):
with pytest.raises(ProxyError):
session = requests.Session()
request = requests.Request("GET", httpbin())
prepared = session.prepare_request(request)
session.send(prepared)
def test_respect_proxy_env_on_send_with_redirects(self, httpbin):
with override_environ(http_proxy=INVALID_PROXY):
with pytest.raises(ProxyError):
session = requests.Session()
url = httpbin("redirect/1")
print(url)
request = requests.Request("GET", url)
session.send(request.prepare())
def test_respect_proxy_env_on_get(self, httpbin):
with override_environ(http_proxy=INVALID_PROXY):
with pytest.raises(ProxyError):
session = requests.Session()
session.get(httpbin())
def test_respect_proxy_env_on_request(self, httpbin):
with override_environ(http_proxy=INVALID_PROXY):
with pytest.raises(ProxyError):
session = requests.Session()
session.request(method="GET", url=httpbin())
def test_proxy_authorization_preserved_on_request(self, httpbin):
proxy_auth_value = "Bearer XXX"
session = requests.Session()
session.headers.update({"Proxy-Authorization": proxy_auth_value})
resp = session.request(method="GET", url=httpbin("get"))
sent_headers = resp.json().get("headers", {})
assert sent_headers.get("Proxy-Authorization") == proxy_auth_value
@pytest.mark.parametrize(
"url,has_proxy_auth",
(
("http://example.com", True),
("https://example.com", False),
),
)
def test_proxy_authorization_not_appended_to_https_request(
self, url, has_proxy_auth
):
session = requests.Session()
proxies = {
"http": "http://test:pass@localhost:8080",
"https": "http://test:pass@localhost:8090",
}
req = requests.Request("GET", url)
prep = req.prepare()
session.rebuild_proxies(prep, proxies)
assert ("Proxy-Authorization" in prep.headers) is has_proxy_auth
def test_basicauth_with_netrc(self, httpbin):
auth = ("user", "pass")
wrong_auth = ("wronguser", "wrongpass")
url = httpbin("basic-auth", "user", "pass")
old_auth = requests.sessions.get_netrc_auth
try:
def get_netrc_auth_mock(url):
return auth
requests.sessions.get_netrc_auth = get_netrc_auth_mock
# Should use netrc and work.
r = requests.get(url)
assert r.status_code == 200
# Given auth should override and fail.
r = requests.get(url, auth=wrong_auth)
assert r.status_code == 401
s = requests.session()
# Should use netrc and work.
r = s.get(url)
assert r.status_code == 200
# Given auth should override and fail.
s.auth = wrong_auth
r = s.get(url)
assert r.status_code == 401
finally:
requests.sessions.get_netrc_auth = old_auth
def test_basicauth_with_netrc_leak(self, httpbin):
url1 = httpbin("basic-auth", "user", "pass")
url = url1[len("http://") :]
domain = url.split(":")[0]
url = f"http://example.com:@{url}"
netrc_file = ""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
fp.write("machine example.com\n")
fp.write("login wronguser\n")
fp.write("password wrongpass\n")
fp.write(f"machine {domain}\n")
fp.write("login user\n")
fp.write("password pass\n")
fp.close()
netrc_file = fp.name
old_netrc = os.environ.get("NETRC", "")
os.environ["NETRC"] = netrc_file
try:
# Should use netrc
# Make sure that we don't use the example.com credentails
# for the request
r = requests.get(url)
assert r.status_code == 200
finally:
os.environ["NETRC"] = old_netrc
os.unlink(netrc_file)
def test_DIGEST_HTTP_200_OK_GET(self, httpbin):
for authtype in self.digest_auth_algo:
auth = HTTPDigestAuth("user", "pass")
url = httpbin("digest-auth", "auth", "user", "pass", authtype, "never")
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
print(r.headers["WWW-Authenticate"])
s = requests.session()
s.auth = HTTPDigestAuth("user", "pass")
r = s.get(url)
assert r.status_code == 200
def test_DIGEST_AUTH_RETURNS_COOKIE(self, httpbin):
for authtype in self.digest_auth_algo:
url = httpbin("digest-auth", "auth", "user", "pass", authtype)
auth = HTTPDigestAuth("user", "pass")
r = requests.get(url)
assert r.cookies["fake"] == "fake_value"
r = requests.get(url, auth=auth)
assert r.status_code == 200
def test_DIGEST_AUTH_SETS_SESSION_COOKIES(self, httpbin):
for authtype in self.digest_auth_algo:
url = httpbin("digest-auth", "auth", "user", "pass", authtype)
auth = HTTPDigestAuth("user", "pass")
s = requests.Session()
s.get(url, auth=auth)
assert s.cookies["fake"] == "fake_value"
def test_DIGEST_STREAM(self, httpbin):
for authtype in self.digest_auth_algo:
auth = HTTPDigestAuth("user", "pass")
url = httpbin("digest-auth", "auth", "user", "pass", authtype)
r = requests.get(url, auth=auth, stream=True)
assert r.raw.read() != b""
r = requests.get(url, auth=auth, stream=False)
assert r.raw.read() == b""
def test_DIGESTAUTH_WRONG_HTTP_401_GET(self, httpbin):
for authtype in self.digest_auth_algo:
auth = HTTPDigestAuth("user", "wrongpass")
url = httpbin("digest-auth", "auth", "user", "pass", authtype)
r = requests.get(url, auth=auth)
assert r.status_code == 401
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 401
def test_DIGESTAUTH_QUOTES_QOP_VALUE(self, httpbin):
for authtype in self.digest_auth_algo:
auth = HTTPDigestAuth("user", "pass")
url = httpbin("digest-auth", "auth", "user", "pass", authtype)
r = requests.get(url, auth=auth)
assert '"auth"' in r.request.headers["Authorization"]
def test_POSTBIN_GET_POST_FILES(self, httpbin):
url = httpbin("post")
requests.post(url).raise_for_status()
post1 = requests.post(url, data={"some": "data"})
assert post1.status_code == 200
with open("requirements-dev.txt") as f:
post2 = requests.post(url, files={"some": f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=["bad file data"])
def test_invalid_files_input(self, httpbin):
url = httpbin("post")
post = requests.post(url, files={"random-file-1": None, "random-file-2": 1})
assert b'name="random-file-1"' not in post.request.body
assert b'name="random-file-2"' in post.request.body
def test_POSTBIN_SEEKED_OBJECT_WITH_NO_ITER(self, httpbin):
class TestStream:
def __init__(self, data):
self.data = data.encode()
self.length = len(self.data)
self.index = 0
def __len__(self):
return self.length
def read(self, size=None):
if size:
ret = self.data[self.index : self.index + size]
self.index += size
else:
ret = self.data[self.index :]
self.index = self.length
return ret
def tell(self):
return self.index
def seek(self, offset, where=0):
if where == 0:
self.index = offset
elif where == 1:
self.index += offset
elif where == 2:
self.index = self.length + offset
test = TestStream("test")
post1 = requests.post(httpbin("post"), data=test)
assert post1.status_code == 200
assert post1.json()["data"] == "test"
test = TestStream("test")
test.seek(2)
post2 = requests.post(httpbin("post"), data=test)
assert post2.status_code == 200
assert post2.json()["data"] == "st"
def test_POSTBIN_GET_POST_FILES_WITH_DATA(self, httpbin):
url = httpbin("post")
requests.post(url).raise_for_status()
post1 = requests.post(url, data={"some": "data"})
assert post1.status_code == 200
with open("requirements-dev.txt") as f:
post2 = requests.post(url, data={"some": "data"}, files={"some": f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=["bad file data"])
def test_post_with_custom_mapping(self, httpbin):
class CustomMapping(MutableMapping):
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __delitem__(self, key):
del self.data[key]
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
data = CustomMapping({"some": "data"})
url = httpbin("post")
found_json = requests.post(url, data=data).json().get("form")
assert found_json == {"some": "data"}
def test_conflicting_post_params(self, httpbin):
url = httpbin("post")
with open("requirements-dev.txt") as f:
with pytest.raises(ValueError):
requests.post(url, data='[{"some": "data"}]', files={"some": f})
def test_request_ok_set(self, httpbin):
r = requests.get(httpbin("status", "404"))
assert not r.ok
def test_status_raising(self, httpbin):
r = requests.get(httpbin("status", "404"))
with pytest.raises(requests.exceptions.HTTPError):
r.raise_for_status()
r = requests.get(httpbin("status", "500"))
assert not r.ok
def test_decompress_gzip(self, httpbin):
r = requests.get(httpbin("gzip"))
r.content.decode("ascii")
@pytest.mark.parametrize(
"url, params",
(
("/get", {"foo": "føø"}),
("/get", {"føø": "føø"}),
("/get", {"føø": "føø"}),
("/get", {"foo": "foo"}),
("ø", {"foo": "foo"}),
),
)
def test_unicode_get(self, httpbin, url, params):
requests.get(httpbin(url), params=params)
def test_unicode_header_name(self, httpbin):
requests.put(
httpbin("put"),
headers={"Content-Type": "application/octet-stream"},
data="\xff",
) # compat.str is unicode.
def test_pyopenssl_redirect(self, httpbin_secure, httpbin_ca_bundle):
requests.get(httpbin_secure("status", "301"), verify=httpbin_ca_bundle)
def test_invalid_ca_certificate_path(self, httpbin_secure):
INVALID_PATH = "/garbage"
with pytest.raises(IOError) as e:
requests.get(httpbin_secure(), verify=INVALID_PATH)
assert str(
e.value
) == "Could not find a suitable TLS CA certificate bundle, invalid path: {}".format(
INVALID_PATH
)
def test_invalid_ssl_certificate_files(self, httpbin_secure):
INVALID_PATH = "/garbage"
with pytest.raises(IOError) as e:
requests.get(httpbin_secure(), cert=INVALID_PATH)
assert str(
e.value
) == "Could not find the TLS certificate file, invalid path: {}".format(
INVALID_PATH
)
with pytest.raises(IOError) as e:
requests.get(httpbin_secure(), cert=(".", INVALID_PATH))
assert str(e.value) == (
f"Could not find the TLS key file, invalid path: {INVALID_PATH}"
)
@pytest.mark.parametrize(
"env, expected",
(
({}, True),
({"REQUESTS_CA_BUNDLE": "/some/path"}, "/some/path"),
({"REQUESTS_CA_BUNDLE": ""}, True),
({"CURL_CA_BUNDLE": "/some/path"}, "/some/path"),
({"CURL_CA_BUNDLE": ""}, True),
({"REQUESTS_CA_BUNDLE": "", "CURL_CA_BUNDLE": ""}, True),
(
{
"REQUESTS_CA_BUNDLE": "/some/path",
"CURL_CA_BUNDLE": "/curl/path",
},
"/some/path",
),
(
{
"REQUESTS_CA_BUNDLE": "",
"CURL_CA_BUNDLE": "/curl/path",
},
"/curl/path",
),
),
)
def test_env_cert_bundles(self, httpbin, env, expected):
s = requests.Session()
with mock.patch("os.environ", env):
settings = s.merge_environment_settings(
url=httpbin("get"), proxies={}, stream=False, verify=True, cert=None
)
assert settings["verify"] == expected
def test_http_with_certificate(self, httpbin):
r = requests.get(httpbin(), cert=".")
assert r.status_code == 200
@pytest.mark.skipif(
SNIMissingWarning is None,
reason="urllib3 2.0 removed that warning and errors out instead",
)
def test_https_warnings(self, nosan_server):
"""warnings are emitted with requests.get"""
host, port, ca_bundle = nosan_server
if HAS_MODERN_SSL or HAS_PYOPENSSL:
warnings_expected = ("SubjectAltNameWarning",)
else:
warnings_expected = (
"SNIMissingWarning",
"InsecurePlatformWarning",
"SubjectAltNameWarning",
)
with pytest.warns() as warning_records:
warnings.simplefilter("always")
requests.get(f"https://localhost:{port}/", verify=ca_bundle)
warning_records = [
item
for item in warning_records
if item.category.__name__ != "ResourceWarning"
]
warnings_category = tuple(item.category.__name__ for item in warning_records)
assert warnings_category == warnings_expected
def test_certificate_failure(self, httpbin_secure):
"""
When underlying SSL problems occur, an SSLError is raised.
"""
with pytest.raises(RequestsSSLError):
# Our local httpbin does not have a trusted CA, so this call will
# fail if we use our default trust bundle.
requests.get(httpbin_secure("status", "200"))
def test_urlencoded_get_query_multivalued_param(self, httpbin):
r = requests.get(httpbin("get"), params={"test": ["foo", "baz"]})
assert r.status_code == 200
assert r.url == httpbin("get?test=foo&test=baz")
def test_form_encoded_post_query_multivalued_element(self, httpbin):
r = requests.Request(
method="POST", url=httpbin("post"), data=dict(test=["foo", "baz"])
)
prep = r.prepare()
assert prep.body == "test=foo&test=baz"
def test_different_encodings_dont_break_post(self, httpbin):
with open(__file__, "rb") as f:
r = requests.post(
httpbin("post"),
data={"stuff": json.dumps({"a": 123})},
params={"blah": "asdf1234"},
files={"file": ("test_requests.py", f)},
)
assert r.status_code == 200
@pytest.mark.parametrize(
"data",
(
{"stuff": "ëlïxr"},
{"stuff": "ëlïxr".encode()},
{"stuff": "elixr"},
{"stuff": b"elixr"},
),
)
def test_unicode_multipart_post(self, httpbin, data):
with open(__file__, "rb") as f:
r = requests.post(
httpbin("post"),
data=data,
files={"file": ("test_requests.py", f)},
)
assert r.status_code == 200
def test_unicode_multipart_post_fieldnames(self, httpbin):
filename = os.path.splitext(__file__)[0] + ".py"
with open(filename, "rb") as f:
r = requests.Request(
method="POST",
url=httpbin("post"),
data={b"stuff": "elixr"},
files={"file": ("test_requests.py", f)},
)
prep = r.prepare()
assert b'name="stuff"' in prep.body
assert b"name=\"b'stuff'\"" not in prep.body
def test_unicode_method_name(self, httpbin):
with open(__file__, "rb") as f:
files = {"file": f}
r = requests.request(
method="POST",
url=httpbin("post"),
files=files,
)
assert r.status_code == 200
def test_unicode_method_name_with_request_object(self, httpbin):
s = requests.Session()
with open(__file__, "rb") as f:
files = {"file": f}
req = requests.Request("POST", httpbin("post"), files=files)
prep = s.prepare_request(req)
assert isinstance(prep.method, builtin_str)
assert prep.method == "POST"
resp = s.send(prep)
assert resp.status_code == 200
def test_non_prepared_request_error(self):
s = requests.Session()
req = requests.Request("POST", "/")
with pytest.raises(ValueError) as e:
s.send(req)
assert str(e.value) == "You can only send PreparedRequests."
def test_custom_content_type(self, httpbin):
with open(__file__, "rb") as f1:
with open(__file__, "rb") as f2:
data = {"stuff": json.dumps({"a": 123})}
files = {
"file1": ("test_requests.py", f1),
"file2": ("test_requests", f2, "text/py-content-type"),
}
r = requests.post(httpbin("post"), data=data, files=files)
assert r.status_code == 200
assert b"text/py-content-type" in r.request.body
def test_hook_receives_request_arguments(self, httpbin):
def hook(resp, **kwargs):
assert resp is not None
assert kwargs != {}
s = requests.Session()
r = requests.Request("GET", httpbin(), hooks={"response": hook})
prep = s.prepare_request(r)
s.send(prep)
def test_session_hooks_are_used_with_no_request_hooks(self, httpbin):
def hook(*args, **kwargs):
pass
s = requests.Session()
s.hooks["response"].append(hook)
r = requests.Request("GET", httpbin())
prep = s.prepare_request(r)
assert prep.hooks["response"] != []
assert prep.hooks["response"] == [hook]
def test_session_hooks_are_overridden_by_request_hooks(self, httpbin):
def hook1(*args, **kwargs):
pass
def hook2(*args, **kwargs):
pass
assert hook1 is not hook2
s = requests.Session()
s.hooks["response"].append(hook2)
r = requests.Request("GET", httpbin(), hooks={"response": [hook1]})
prep = s.prepare_request(r)
assert prep.hooks["response"] == [hook1]
def test_prepared_request_hook(self, httpbin):
def hook(resp, **kwargs):
resp.hook_working = True
return resp
req = requests.Request("GET", httpbin(), hooks={"response": hook})
prep = req.prepare()
s = requests.Session()
s.proxies = getproxies()
resp = s.send(prep)
assert hasattr(resp, "hook_working")
def test_prepared_from_session(self, httpbin):
class DummyAuth(requests.auth.AuthBase):
def __call__(self, r):
r.headers["Dummy-Auth-Test"] = "dummy-auth-test-ok"
return r
req = requests.Request("GET", httpbin("headers"))
assert not req.auth
s = requests.Session()
s.auth = DummyAuth()
prep = s.prepare_request(req)
resp = s.send(prep)
assert resp.json()["headers"]["Dummy-Auth-Test"] == "dummy-auth-test-ok"
def test_prepare_request_with_bytestring_url(self):
req = requests.Request("GET", b"https://httpbin.org/")
s = requests.Session()
prep = s.prepare_request(req)
assert prep.url == "https://httpbin.org/"
def test_request_with_bytestring_host(self, httpbin):
s = requests.Session()
resp = s.request(
"GET",
httpbin("cookies/set?cookie=value"),
allow_redirects=False,
headers={"Host": b"httpbin.org"},
)
assert resp.cookies.get("cookie") == "value"
def test_links(self):
r = requests.Response()
r.headers = {
"cache-control": "public, max-age=60, s-maxage=60",
"connection": "keep-alive",
"content-encoding": "gzip",
"content-type": "application/json; charset=utf-8",
"date": "Sat, 26 Jan 2013 16:47:56 GMT",
"etag": '"6ff6a73c0e446c1f61614769e3ceb778"',
"last-modified": "Sat, 26 Jan 2013 16:22:39 GMT",
"link": (
"<https://api.github.com/users/kennethreitz/repos?"
'page=2&per_page=10>; rel="next", <https://api.github.'
"com/users/kennethreitz/repos?page=7&per_page=10>; "
' rel="last"'
),
"server": "GitHub.com",
"status": "200 OK",
"vary": "Accept",
"x-content-type-options": "nosniff",
"x-github-media-type": "github.beta",
"x-ratelimit-limit": "60",
"x-ratelimit-remaining": "57",
}
assert r.links["next"]["rel"] == "next"
def test_cookie_parameters(self):
key = "some_cookie"
value = "some_value"
secure = True
domain = "test.com"
rest = {"HttpOnly": True}
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value, secure=secure, domain=domain, rest=rest)
assert len(jar) == 1
assert "some_cookie" in jar
cookie = list(jar)[0]
assert cookie.secure == secure
assert cookie.domain == domain
assert cookie._rest["HttpOnly"] == rest["HttpOnly"]
def test_cookie_as_dict_keeps_len(self):
key = "some_cookie"
value = "some_value"
key1 = "some_cookie1"
value1 = "some_value1"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert len(jar) == 2
assert len(d1) == 2
assert len(d2) == 2
assert len(d3) == 2
def test_cookie_as_dict_keeps_items(self):
key = "some_cookie"
value = "some_value"
key1 = "some_cookie1"
value1 = "some_value1"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert d1["some_cookie"] == "some_value"
assert d2["some_cookie"] == "some_value"
assert d3["some_cookie1"] == "some_value1"
def test_cookie_as_dict_keys(self):
key = "some_cookie"
value = "some_value"
key1 = "some_cookie1"
value1 = "some_value1"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
keys = jar.keys()
assert keys == list(keys)
# make sure one can use keys multiple times
assert list(keys) == list(keys)
def test_cookie_as_dict_values(self):
key = "some_cookie"
value = "some_value"
key1 = "some_cookie1"
value1 = "some_value1"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
values = jar.values()
assert values == list(values)
# make sure one can use values multiple times
assert list(values) == list(values)
def test_cookie_as_dict_items(self):
key = "some_cookie"
value = "some_value"
key1 = "some_cookie1"
value1 = "some_value1"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
items = jar.items()
assert items == list(items)
# make sure one can use items multiple times
assert list(items) == list(items)
def test_cookie_duplicate_names_different_domains(self):
key = "some_cookie"
value = "some_value"
domain1 = "test1.com"
domain2 = "test2.com"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value, domain=domain1)
jar.set(key, value, domain=domain2)
assert key in jar
items = jar.items()
assert len(items) == 2
# Verify that CookieConflictError is raised if domain is not specified
with pytest.raises(requests.cookies.CookieConflictError):
jar.get(key)
# Verify that CookieConflictError is not raised if domain is specified
cookie = jar.get(key, domain=domain1)
assert cookie == value
def test_cookie_duplicate_names_raises_cookie_conflict_error(self):
key = "some_cookie"
value = "some_value"
path = "some_path"
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value, path=path)
jar.set(key, value)
with pytest.raises(requests.cookies.CookieConflictError):
jar.get(key)
def test_cookie_policy_copy(self):
class MyCookiePolicy(cookielib.DefaultCookiePolicy):
pass
jar = requests.cookies.RequestsCookieJar()
jar.set_policy(MyCookiePolicy())
assert isinstance(jar.copy().get_policy(), MyCookiePolicy)
def test_time_elapsed_blank(self, httpbin):
r = requests.get(httpbin("get"))
td = r.elapsed
total_seconds = (
td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6
) / 10**6
assert total_seconds > 0.0
def test_empty_response_has_content_none(self):
r = requests.Response()
assert r.content is None
def test_response_is_iterable(self):
r = requests.Response()
io = StringIO.StringIO("abc")
read_ = io.read
def read_mock(amt, decode_content=None):
return read_(amt)
setattr(io, "read", read_mock)
r.raw = io
assert next(iter(r))
io.close()
def test_response_decode_unicode(self):
"""When called with decode_unicode, Response.iter_content should always
return unicode.
"""
r = requests.Response()
r._content_consumed = True
r._content = b"the content"
r.encoding = "ascii"
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
# also for streaming
r = requests.Response()
r.raw = io.BytesIO(b"the content")
r.encoding = "ascii"
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
def test_response_reason_unicode(self):
# check for unicode HTTP status
r = requests.Response()
r.url = "unicode URL"
r.reason = "Komponenttia ei löydy".encode()
r.status_code = 404
r.encoding = None
assert not r.ok # old behaviour - crashes here
def test_response_reason_unicode_fallback(self):
# check raise_status falls back to ISO-8859-1
r = requests.Response()
r.url = "some url"
reason = "Komponenttia ei löydy"
r.reason = reason.encode("latin-1")
r.status_code = 500
r.encoding = None
with pytest.raises(requests.exceptions.HTTPError) as e:
r.raise_for_status()
assert reason in e.value.args[0]
def test_response_chunk_size_type(self):
"""Ensure that chunk_size is passed as None or an integer, otherwise
raise a TypeError.
"""
r = requests.Response()
r.raw = io.BytesIO(b"the content")
chunks = r.iter_content(1)
assert all(len(chunk) == 1 for chunk in chunks)
r = requests.Response()
r.raw = io.BytesIO(b"the content")
chunks = r.iter_content(None)
assert list(chunks) == [b"the content"]
r = requests.Response()
r.raw = io.BytesIO(b"the content")
with pytest.raises(TypeError):
chunks = r.iter_content("1024")
@pytest.mark.parametrize(
"exception, args, expected",
(
(urllib3.exceptions.ProtocolError, tuple(), ChunkedEncodingError),
(urllib3.exceptions.DecodeError, tuple(), ContentDecodingError),
(urllib3.exceptions.ReadTimeoutError, (None, "", ""), ConnectionError),
(urllib3.exceptions.SSLError, tuple(), RequestsSSLError),
),
)
def test_iter_content_wraps_exceptions(self, httpbin, exception, args, expected):
r = requests.Response()
r.raw = mock.Mock()
# ReadTimeoutError can't be initialized by mock
# so we'll manually create the instance with args
r.raw.stream.side_effect = exception(*args)
with pytest.raises(expected):
next(r.iter_content(1024))
def test_request_and_response_are_pickleable(self, httpbin):
r = requests.get(httpbin("get"))
# verify we can pickle the original request
assert pickle.loads(pickle.dumps(r.request))
# verify we can pickle the response and that we have access to
# the original request.
pr = pickle.loads(pickle.dumps(r))
assert r.request.url == pr.request.url
assert r.request.headers == pr.request.headers
def test_prepared_request_is_pickleable(self, httpbin):
p = requests.Request("GET", httpbin("get")).prepare()
# Verify PreparedRequest can be pickled and unpickled
r = pickle.loads(pickle.dumps(p))
assert r.url == p.url
assert r.headers == p.headers
assert r.body == p.body
# Verify unpickled PreparedRequest sends properly
s = requests.Session()
resp = s.send(r)
assert resp.status_code == 200
def test_prepared_request_with_file_is_pickleable(self, httpbin):
with open(__file__, "rb") as f:
r = requests.Request("POST", httpbin("post"), files={"file": f})
p = r.prepare()
# Verify PreparedRequest can be pickled and unpickled
r = pickle.loads(pickle.dumps(p))
assert r.url == p.url
assert r.headers == p.headers
assert r.body == p.body
# Verify unpickled PreparedRequest sends properly
s = requests.Session()
resp = s.send(r)
assert resp.status_code == 200
def test_prepared_request_with_hook_is_pickleable(self, httpbin):
r = requests.Request("GET", httpbin("get"), hooks=default_hooks())
p = r.prepare()
# Verify PreparedRequest can be pickled
r = pickle.loads(pickle.dumps(p))
assert r.url == p.url
assert r.headers == p.headers
assert r.body == p.body
assert r.hooks == p.hooks
# Verify unpickled PreparedRequest sends properly
s = requests.Session()
resp = s.send(r)
assert resp.status_code == 200
def test_cannot_send_unprepared_requests(self, httpbin):
r = requests.Request(url=httpbin())
with pytest.raises(ValueError):
requests.Session().send(r)
def test_http_error(self):
error = requests.exceptions.HTTPError()
assert not error.response
response = requests.Response()
error = requests.exceptions.HTTPError(response=response)
assert error.response == response
error = requests.exceptions.HTTPError("message", response=response)
assert str(error) == "message"
assert error.response == response
def test_session_pickling(self, httpbin):
r = requests.Request("GET", httpbin("get"))
s = requests.Session()
s = pickle.loads(pickle.dumps(s))
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_fixes_1329(self, httpbin):
"""Ensure that header updates are done case-insensitively."""
s = requests.Session()
s.headers.update({"ACCEPT": "BOGUS"})
s.headers.update({"accept": "application/json"})
r = s.get(httpbin("get"))
headers = r.request.headers
assert headers["accept"] == "application/json"
assert headers["Accept"] == "application/json"
assert headers["ACCEPT"] == "application/json"
def test_uppercase_scheme_redirect(self, httpbin):
parts = urlparse(httpbin("html"))
url = "HTTP://" + parts.netloc + parts.path
r = requests.get(httpbin("redirect-to"), params={"url": url})
assert r.status_code == 200
assert r.url.lower() == url.lower()
def test_transport_adapter_ordering(self):
s = requests.Session()
order = ["https://", "http://"]
assert order == list(s.adapters)
s.mount("http://git", HTTPAdapter())
s.mount("http://github", HTTPAdapter())
s.mount("http://github.com", HTTPAdapter())
s.mount("http://github.com/about/", HTTPAdapter())
order = [
"http://github.com/about/",
"http://github.com",
"http://github",
"http://git",
"https://",
"http://",
]
assert order == list(s.adapters)
s.mount("http://gittip", HTTPAdapter())
s.mount("http://gittip.com", HTTPAdapter())
s.mount("http://gittip.com/about/", HTTPAdapter())
order = [
"http://github.com/about/",
"http://gittip.com/about/",
"http://github.com",
"http://gittip.com",
"http://github",
"http://gittip",
"http://git",
"https://",
"http://",
]
assert order == list(s.adapters)
s2 = requests.Session()
s2.adapters = {"http://": HTTPAdapter()}
s2.mount("https://", HTTPAdapter())
assert "http://" in s2.adapters
assert "https://" in s2.adapters
def test_session_get_adapter_prefix_matching(self):
prefix = "https://example.com"
more_specific_prefix = prefix + "/some/path"
url_matching_only_prefix = prefix + "/another/path"
url_matching_more_specific_prefix = more_specific_prefix + "/longer/path"
url_not_matching_prefix = "https://another.example.com/"
s = requests.Session()
prefix_adapter = HTTPAdapter()
more_specific_prefix_adapter = HTTPAdapter()
s.mount(prefix, prefix_adapter)
s.mount(more_specific_prefix, more_specific_prefix_adapter)
assert s.get_adapter(url_matching_only_prefix) is prefix_adapter
assert (
s.get_adapter(url_matching_more_specific_prefix)
is more_specific_prefix_adapter
)
assert s.get_adapter(url_not_matching_prefix) not in (
prefix_adapter,
more_specific_prefix_adapter,
)
def test_session_get_adapter_prefix_matching_mixed_case(self):
mixed_case_prefix = "hTtPs://eXamPle.CoM/MixEd_CAse_PREfix"
url_matching_prefix = mixed_case_prefix + "/full_url"
s = requests.Session()
my_adapter = HTTPAdapter()
s.mount(mixed_case_prefix, my_adapter)
assert s.get_adapter(url_matching_prefix) is my_adapter
def test_session_get_adapter_prefix_matching_is_case_insensitive(self):
mixed_case_prefix = "hTtPs://eXamPle.CoM/MixEd_CAse_PREfix"
url_matching_prefix_with_different_case = (
"HtTpS://exaMPLe.cOm/MiXeD_caSE_preFIX/another_url"
)
s = requests.Session()
my_adapter = HTTPAdapter()
s.mount(mixed_case_prefix, my_adapter)
assert s.get_adapter(url_matching_prefix_with_different_case) is my_adapter
def test_session_get_adapter_prefix_with_trailing_slash(self):
# from issue #6935
prefix = "https://example.com/" # trailing slash
url_matching_prefix = "https://example.com/some/path"
url_not_matching_prefix = "https://example.com.other.com/some/path"
s = requests.Session()
adapter = HTTPAdapter()
s.mount(prefix, adapter)
assert s.get_adapter(url_matching_prefix) is adapter
assert s.get_adapter(url_not_matching_prefix) is not adapter
def test_session_get_adapter_prefix_without_trailing_slash(self):
# from issue #6935
prefix = "https://example.com" # no trailing slash
url_matching_prefix = "https://example.com/some/path"
url_extended_hostname = "https://example.com.other.com/some/path"
s = requests.Session()
adapter = HTTPAdapter()
s.mount(prefix, adapter)
assert s.get_adapter(url_matching_prefix) is adapter
assert s.get_adapter(url_extended_hostname) is adapter
def test_header_remove_is_case_insensitive(self, httpbin):
# From issue #1321
s = requests.Session()
s.headers["foo"] = "bar"
r = s.get(httpbin("get"), headers={"FOO": None})
assert "foo" not in r.request.headers
def test_params_are_merged_case_sensitive(self, httpbin):
s = requests.Session()
s.params["foo"] = "bar"
r = s.get(httpbin("get"), params={"FOO": "bar"})
assert r.json()["args"] == {"foo": "bar", "FOO": "bar"}
def test_long_authinfo_in_url(self):
url = "http://{}:{}@{}:9000/path?query#frag".format(
"E8A3BE87-9E3F-4620-8858-95478E385B5B",
"EA770032-DA4D-4D84-8CE9-29C6D910BF1E",
"exactly-------------sixty-----------three------------characters",
)
r = requests.Request("GET", url).prepare()
assert r.url == url
def test_header_keys_are_native(self, httpbin):
headers = {"unicode": "blah", b"byte": "blah"}
r = requests.Request("GET", httpbin("get"), headers=headers)
p = r.prepare()
# This is testing that they are builtin strings. A bit weird, but there
# we go.
assert "unicode" in p.headers.keys()
assert "byte" in p.headers.keys()
def test_header_validation(self, httpbin):
"""Ensure prepare_headers regex isn't flagging valid header contents."""
valid_headers = {
"foo": "bar baz qux",
"bar": b"fbbq",
"baz": "",
"qux": "1",
}
r = requests.get(httpbin("get"), headers=valid_headers)
for key in valid_headers.keys():
assert valid_headers[key] == r.request.headers[key]
@pytest.mark.parametrize(
"invalid_header, key",
(
({"foo": 3}, "foo"),
({"bar": {"foo": "bar"}}, "bar"),
({"baz": ["foo", "bar"]}, "baz"),
),
)
def test_header_value_not_str(self, httpbin, invalid_header, key):
"""Ensure the header value is of type string or bytes as
per discussion in GH issue #3386
"""
with pytest.raises(InvalidHeader) as excinfo:
requests.get(httpbin("get"), headers=invalid_header)
assert key in str(excinfo.value)
@pytest.mark.parametrize(
"invalid_header",
(
{"foo": "bar\r\nbaz: qux"},
{"foo": "bar\n\rbaz: qux"},
{"foo": "bar\nbaz: qux"},
{"foo": "bar\rbaz: qux"},
{"fo\ro": "bar"},
{"fo\r\no": "bar"},
{"fo\n\ro": "bar"},
{"fo\no": "bar"},
),
)
def test_header_no_return_chars(self, httpbin, invalid_header):
"""Ensure that a header containing return character sequences raise an
exception. Otherwise, multiple headers are created from single string.
"""
with pytest.raises(InvalidHeader):
requests.get(httpbin("get"), headers=invalid_header)
@pytest.mark.parametrize(
"invalid_header",
(
{" foo": "bar"},
{"\tfoo": "bar"},
{" foo": "bar"},
{"foo": " bar"},
{"foo": " bar"},
{"foo": "\tbar"},
{" ": "bar"},
),
)
def test_header_no_leading_space(self, httpbin, invalid_header):
"""Ensure headers containing leading whitespace raise
InvalidHeader Error before sending.
"""
with pytest.raises(InvalidHeader):
requests.get(httpbin("get"), headers=invalid_header)
def test_header_with_subclass_types(self, httpbin):
"""If the subclasses does not behave *exactly* like
the base bytes/str classes, this is not supported.
This test is for backwards compatibility.
"""
class MyString(str):
pass
class MyBytes(bytes):
pass
r_str = requests.get(httpbin("get"), headers={MyString("x-custom"): "myheader"})
assert r_str.request.headers["x-custom"] == "myheader"
r_bytes = requests.get(
httpbin("get"), headers={MyBytes(b"x-custom"): b"myheader"}
)
assert r_bytes.request.headers["x-custom"] == b"myheader"
r_mixed = requests.get(
httpbin("get"), headers={MyString("x-custom"): MyBytes(b"myheader")}
)
assert r_mixed.request.headers["x-custom"] == b"myheader"
@pytest.mark.parametrize("files", ("foo", b"foo", bytearray(b"foo")))
def test_can_send_objects_with_files(self, httpbin, files):
data = {"a": "this is a string"}
files = {"b": files}
r = requests.Request("POST", httpbin("post"), data=data, files=files)
p = r.prepare()
assert "multipart/form-data" in p.headers["Content-Type"]
def test_can_send_file_object_with_non_string_filename(self, httpbin):
f = io.BytesIO()
f.name = 2
r = requests.Request("POST", httpbin("post"), files={"f": f})
p = r.prepare()
assert "multipart/form-data" in p.headers["Content-Type"]
def test_autoset_header_values_are_native(self, httpbin):
data = "this is a string"
length = "16"
req = requests.Request("POST", httpbin("post"), data=data)
p = req.prepare()
assert p.headers["Content-Length"] == length
def test_nonhttp_schemes_dont_check_URLs(self):
test_urls = (
"data:image/gif;base64,R0lGODlhAQABAHAAACH5BAUAAAAALAAAAAABAAEAAAICRAEAOw==",
"file:///etc/passwd",
"magnet:?xt=urn:btih:be08f00302bc2d1d3cfa3af02024fa647a271431",
)
for test_url in test_urls:
req = requests.Request("GET", test_url)
preq = req.prepare()
assert test_url == preq.url
def test_auth_is_stripped_on_http_downgrade(
self, httpbin, httpbin_secure, httpbin_ca_bundle
):
r = requests.get(
httpbin_secure("redirect-to"),
params={"url": httpbin("get")},
auth=("user", "pass"),
verify=httpbin_ca_bundle,
)
assert r.history[0].request.headers["Authorization"]
assert "Authorization" not in r.request.headers
def test_auth_is_retained_for_redirect_on_host(self, httpbin):
r = requests.get(httpbin("redirect/1"), auth=("user", "pass"))
h1 = r.history[0].request.headers["Authorization"]
h2 = r.request.headers["Authorization"]
assert h1 == h2
def test_should_strip_auth_host_change(self):
s = requests.Session()
assert s.should_strip_auth(
"http://example.com/foo", "http://another.example.com/"
)
def test_should_strip_auth_http_downgrade(self):
s = requests.Session()
assert s.should_strip_auth("https://example.com/foo", "http://example.com/bar")
def test_should_strip_auth_https_upgrade(self):
s = requests.Session()
assert not s.should_strip_auth(
"http://example.com/foo", "https://example.com/bar"
)
assert not s.should_strip_auth(
"http://example.com:80/foo", "https://example.com/bar"
)
assert not s.should_strip_auth(
"http://example.com/foo", "https://example.com:443/bar"
)
# Non-standard ports should trigger stripping
assert s.should_strip_auth(
"http://example.com:8080/foo", "https://example.com/bar"
)
assert s.should_strip_auth(
"http://example.com/foo", "https://example.com:8443/bar"
)
def test_should_strip_auth_port_change(self):
s = requests.Session()
assert s.should_strip_auth(
"http://example.com:1234/foo", "https://example.com:4321/bar"
)
@pytest.mark.parametrize(
"old_uri, new_uri",
(
("https://example.com:443/foo", "https://example.com/bar"),
("http://example.com:80/foo", "http://example.com/bar"),
("https://example.com/foo", "https://example.com:443/bar"),
("http://example.com/foo", "http://example.com:80/bar"),
),
)
def test_should_strip_auth_default_port(self, old_uri, new_uri):
s = requests.Session()
assert not s.should_strip_auth(old_uri, new_uri)
def test_manual_redirect_with_partial_body_read(self, httpbin):
s = requests.Session()
r1 = s.get(httpbin("redirect/2"), allow_redirects=False, stream=True)
assert r1.is_redirect
rg = s.resolve_redirects(r1, r1.request, stream=True)
# read only the first eight bytes of the response body,
# then follow the redirect
r1.iter_content(8)
r2 = next(rg)
assert r2.is_redirect
# read all of the response via iter_content,
# then follow the redirect
for _ in r2.iter_content():
pass
r3 = next(rg)
assert not r3.is_redirect
def test_prepare_body_position_non_stream(self):
data = b"the data"
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position is None
def test_rewind_body(self):
data = io.BytesIO(b"the data")
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position == 0
assert prep.body.read() == b"the data"
# the data has all been read
assert prep.body.read() == b""
# rewind it back
requests.utils.rewind_body(prep)
assert prep.body.read() == b"the data"
def test_rewind_partially_read_body(self):
data = io.BytesIO(b"the data")
data.read(4) # read some data
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position == 4
assert prep.body.read() == b"data"
# the data has all been read
assert prep.body.read() == b""
# rewind it back
requests.utils.rewind_body(prep)
assert prep.body.read() == b"data"
def test_rewind_body_no_seek(self):
class BadFileObj:
def __init__(self, data):
self.data = data
def tell(self):
return 0
def __iter__(self):
return
data = BadFileObj("the data")
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position == 0
with pytest.raises(UnrewindableBodyError) as e:
requests.utils.rewind_body(prep)
assert "Unable to rewind request body" in str(e)
def test_rewind_body_failed_seek(self):
class BadFileObj:
def __init__(self, data):
self.data = data
def tell(self):
return 0
def seek(self, pos, whence=0):
raise OSError()
def __iter__(self):
return
data = BadFileObj("the data")
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position == 0
with pytest.raises(UnrewindableBodyError) as e:
requests.utils.rewind_body(prep)
assert "error occurred when rewinding request body" in str(e)
def test_rewind_body_failed_tell(self):
class BadFileObj:
def __init__(self, data):
self.data = data
def tell(self):
raise OSError()
def __iter__(self):
return
data = BadFileObj("the data")
prep = requests.Request("GET", "http://example.com", data=data).prepare()
assert prep._body_position is not None
with pytest.raises(UnrewindableBodyError) as e:
requests.utils.rewind_body(prep)
assert "Unable to rewind request body" in str(e)
def _patch_adapter_gzipped_redirect(self, session, url):
adapter = session.get_adapter(url=url)
org_build_response = adapter.build_response
self._patched_response = False
def build_response(*args, **kwargs):
resp = org_build_response(*args, **kwargs)
if not self._patched_response:
resp.raw.headers["content-encoding"] = "gzip"
self._patched_response = True
return resp
adapter.build_response = build_response
def test_redirect_with_wrong_gzipped_header(self, httpbin):
s = requests.Session()
url = httpbin("redirect/1")
self._patch_adapter_gzipped_redirect(s, url)
s.get(url)
@pytest.mark.parametrize(
"username, password, auth_str",
(
("test", "test", "Basic dGVzdDp0ZXN0"),
(
"имя".encode(),
"пароль".encode(),
"Basic 0LjQvNGPOtC/0LDRgNC+0LvRjA==",
),
),
)
def test_basic_auth_str_is_always_native(self, username, password, auth_str):
s = _basic_auth_str(username, password)
assert isinstance(s, builtin_str)
assert s == auth_str
def test_requests_history_is_saved(self, httpbin):
r = requests.get(httpbin("redirect/5"))
total = r.history[-1].history
i = 0
for item in r.history:
assert item.history == total[0:i]
i += 1
def test_json_param_post_content_type_works(self, httpbin):
r = requests.post(httpbin("post"), json={"life": 42})
assert r.status_code == 200
assert "application/json" in r.request.headers["Content-Type"]
assert {"life": 42} == r.json()["json"]
def test_json_param_post_should_not_override_data_param(self, httpbin):
r = requests.Request(
method="POST",
url=httpbin("post"),
data={"stuff": "elixr"},
json={"music": "flute"},
)
prep = r.prepare()
assert "stuff=elixr" == prep.body
def test_response_iter_lines(self, httpbin):
r = requests.get(httpbin("stream/4"), stream=True)
assert r.status_code == 200
it = r.iter_lines()
next(it)
assert len(list(it)) == 3
def test_response_context_manager(self, httpbin):
with requests.get(httpbin("stream/4"), stream=True) as response:
assert isinstance(response, requests.Response)
assert response.raw.closed
def test_unconsumed_session_response_closes_connection(self, httpbin):
s = requests.session()
with contextlib.closing(s.get(httpbin("stream/4"), stream=True)) as response:
pass
assert response._content_consumed is False
assert response.raw.closed
@pytest.mark.xfail
def test_response_iter_lines_reentrant(self, httpbin):
"""Response.iter_lines() is not reentrant safe"""
r = requests.get(httpbin("stream/4"), stream=True)
assert r.status_code == 200
next(r.iter_lines())
assert len(list(r.iter_lines())) == 3
def test_session_close_proxy_clear(self):
proxies = {
"one": mock.Mock(),
"two": mock.Mock(),
}
session = requests.Session()
with mock.patch.dict(session.adapters["http://"].proxy_manager, proxies):
session.close()
proxies["one"].clear.assert_called_once_with()
proxies["two"].clear.assert_called_once_with()
def test_proxy_auth(self):
adapter = HTTPAdapter()
headers = adapter.proxy_headers("http://user:pass@httpbin.org")
assert headers == {"Proxy-Authorization": "Basic dXNlcjpwYXNz"}
def test_proxy_auth_empty_pass(self):
adapter = HTTPAdapter()
headers = adapter.proxy_headers("http://user:@httpbin.org")
assert headers == {"Proxy-Authorization": "Basic dXNlcjo="}
def test_response_json_when_content_is_None(self, httpbin):
r = requests.get(httpbin("/status/204"))
# Make sure r.content is None
r.status_code = 0
r._content = False
r._content_consumed = False
assert r.content is None
with pytest.raises(ValueError):
r.json()
def test_response_without_release_conn(self):
"""Test `close` call for non-urllib3-like raw objects.
Should work when `release_conn` attr doesn't exist on `response.raw`.
"""
resp = requests.Response()
resp.raw = StringIO.StringIO("test")
assert not resp.raw.closed
resp.close()
assert resp.raw.closed
def test_empty_stream_with_auth_does_not_set_content_length_header(self, httpbin):
"""Ensure that a byte stream with size 0 will not set both a Content-Length
and Transfer-Encoding header.
"""
auth = ("user", "pass")
url = httpbin("post")
file_obj = io.BytesIO(b"")
r = requests.Request("POST", url, auth=auth, data=file_obj)
prepared_request = r.prepare()
assert "Transfer-Encoding" in prepared_request.headers
assert "Content-Length" not in prepared_request.headers
def test_stream_with_auth_does_not_set_transfer_encoding_header(self, httpbin):
"""Ensure that a byte stream with size > 0 will not set both a Content-Length
and Transfer-Encoding header.
"""
auth = ("user", "pass")
url = httpbin("post")
file_obj = io.BytesIO(b"test data")
r = requests.Request("POST", url, auth=auth, data=file_obj)
prepared_request = r.prepare()
assert "Transfer-Encoding" not in prepared_request.headers
assert "Content-Length" in prepared_request.headers
def test_chunked_upload_does_not_set_content_length_header(self, httpbin):
"""Ensure that requests with a generator body stream using
Transfer-Encoding: chunked, not a Content-Length header.
"""
data = (i for i in [b"a", b"b", b"c"])
url = httpbin("post")
r = requests.Request("POST", url, data=data)
prepared_request = r.prepare()
assert "Transfer-Encoding" in prepared_request.headers
assert "Content-Length" not in prepared_request.headers
def test_custom_redirect_mixin(self, httpbin):
"""Tests a custom mixin to overwrite ``get_redirect_target``.
Ensures a subclassed ``requests.Session`` can handle a certain type of
malformed redirect responses.
1. original request receives a proper response: 302 redirect
2. following the redirect, a malformed response is given:
status code = HTTP 200
location = alternate url
3. the custom session catches the edge case and follows the redirect
"""
url_final = httpbin("html")
querystring_malformed = urlencode({"location": url_final})
url_redirect_malformed = httpbin("response-headers?%s" % querystring_malformed)
querystring_redirect = urlencode({"url": url_redirect_malformed})
url_redirect = httpbin("redirect-to?%s" % querystring_redirect)
urls_test = [
url_redirect,
url_redirect_malformed,
url_final,
]
class CustomRedirectSession(requests.Session):
def get_redirect_target(self, resp):
# default behavior
if resp.is_redirect:
return resp.headers["location"]
# edge case - check to see if 'location' is in headers anyways
location = resp.headers.get("location")
if location and (location != resp.url):
return location
return None
session = CustomRedirectSession()
r = session.get(urls_test[0])
assert len(r.history) == 2
assert r.status_code == 200
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
assert r.history[1].status_code == 200
assert not r.history[1].is_redirect
assert r.url == urls_test[2]
| TestRequests |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 42549,
"end": 46467
} | class ____(GoogleCloudBaseOperator):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
In next step, deletes this instance.
Redis will continue serving during this operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreExportAndDeleteInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"output_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
instance: str,
output_config: dict | OutputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.output_config = output_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.export_instance(
location=self.location,
instance=self.instance,
output_config=self.output_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.delete_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| CloudMemorystoreExportAndDeleteInstanceOperator |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 21800,
"end": 27371
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# get queries, keys and values
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
# expand attention_mask
if attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
f" {attention_mask.size()}"
)
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(
attention_mask, -torch.inf
)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (
batch_size * self.num_heads,
target_len,
self.head_dim,
):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
| TestDetrMultiheadAttention |
python | pytransitions__transitions | transitions/extensions/asyncio.py | {
"start": 3942,
"end": 6718
} | class ____(Transition):
"""Representation of an asynchronous transition managed by a ``AsyncMachine`` instance."""
condition_cls = AsyncCondition
async def _eval_conditions(self, event_data):
res = await event_data.machine.await_all([partial(cond.check, event_data) for cond in self.conditions])
if not all(res):
_LOGGER.debug("%sTransition condition failed: Transition halted.", event_data.machine.name)
return False
return True
async def execute(self, event_data):
"""Executes the transition.
Args:
event_data (EventData): An instance of class EventData.
Returns: boolean indicating whether or not the transition was
successfully executed (True if successful, False if not).
"""
_LOGGER.debug("%sInitiating transition from state %s to state %s...",
event_data.machine.name, self.source, self.dest)
await event_data.machine.callbacks(self.prepare, event_data)
_LOGGER.debug("%sExecuted callbacks before conditions.", event_data.machine.name)
if not await self._eval_conditions(event_data):
return False
machine = event_data.machine
# cancel running tasks since the transition will happen
await machine.cancel_running_transitions(event_data.model)
await event_data.machine.callbacks(event_data.machine.before_state_change, event_data)
await event_data.machine.callbacks(self.before, event_data)
_LOGGER.debug("%sExecuted callback before transition.", event_data.machine.name)
if self.dest is not None: # if self.dest is None this is an internal transition with no actual state change
await self._change_state(event_data)
await event_data.machine.callbacks(self.after, event_data)
await event_data.machine.callbacks(event_data.machine.after_state_change, event_data)
_LOGGER.debug("%sExecuted callback after transition.", event_data.machine.name)
return True
async def _change_state(self, event_data):
if hasattr(event_data.machine, "model_graphs"):
graph = event_data.machine.model_graphs[id(event_data.model)]
graph.reset_styling()
graph.set_previous_transition(self.source, self.dest)
await event_data.machine.get_state(self.source).exit(event_data)
event_data.machine.set_state(self.dest, event_data.model)
event_data.update(getattr(event_data.model, event_data.machine.model_attribute))
dest = event_data.machine.get_state(self.dest)
await dest.enter(event_data)
if dest.final:
await event_data.machine.callbacks(event_data.machine.on_final, event_data)
| AsyncTransition |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 3898,
"end": 18489
} | class ____(NonStrictDataModel):
"""
:param id: Model id
:type id: str
:param name: Model name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Model creation time
:type created: datetime.datetime
:param task: Task ID of task in which the model was created
:type task: str
:param parent: Parent model ID
:type parent: str
:param project: Associated project ID
:type project: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Should be identical to
the framework of the task which created the model
:type framework: str
:param design: Json object representing the model design. Should be identical
to the network design of the task which created the model
:type design: dict
:param labels: Json object representing the ids of the labels in the model. The
keys are the layers' names and the values are the ids.
:type labels: dict
:param uri: URI for the model, pointing to the destination storage.
:type uri: str
:param ready: Indication if the model is final and can be used by other tasks
:type ready: bool
:param ui_cache: UI cache for this model
:type ui_cache: dict
"""
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": ["string", "null"]},
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {"description": "Parent model ID", "type": ["string", "null"]},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
task: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
uri: Optional[str] = None,
ready: Optional[bool] = None,
ui_cache: Optional[dict] = None,
metadata: Optional[List[Any]] = None,
last_update: Optional[str] = None,
**kwargs: Any
) -> None:
super(Model, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.task = task
self.parent = parent
self.project = project
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.uri = uri
self.ready = ready
self.ui_cache = ui_cache
self.metadata = metadata
self.last_update = last_update
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("uri")
def uri(self) -> Optional[str]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[str]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("ui_cache")
def ui_cache(self) -> Optional[dict]:
return self._property_ui_cache
@ui_cache.setter
def ui_cache(self, value: Optional[dict]) -> None:
if value is None:
self._property_ui_cache = None
return
self.assert_isinstance(value, "ui_cache", (dict,))
self._property_ui_cache = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
| Model |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 11896,
"end": 12757
} | class ____(TestOutEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = nx.reportviews.InEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = (
"InEdgeDataView([(0, 1, {}), (1, 2, {}), "
+ "(2, 3, {}), (3, 4, {}), "
+ "(4, 5, {}), (5, 6, {}), "
+ "(6, 7, {}), (7, 8, {})])"
)
assert repr(ev) == rep
def test_contains_with_nbunch(self):
evr = self.eview(self.G)
ev = evr(nbunch=[0, 2])
assert (0, 1) not in ev
assert (1, 2) in ev
assert (2, 3) not in ev
assert (3, 4) not in ev
assert (4, 5) not in ev
assert (5, 6) not in ev
assert (7, 8) not in ev
assert (8, 9) not in ev
| TestInEdgeDataView |
python | pytorch__pytorch | torch/_inductor/autotune_process.py | {
"start": 1466,
"end": 7024
} | class ____:
"""
Class to launch and interact with a benchmarking subprocess.
"""
@staticmethod
def process_main(read_pipe: IO[bytes], write_pipe: IO[bytes]) -> None:
"""
Entry point for the child process.
"""
autotuning_log.debug(
"Started autotune subprocess %s. Visible devices: %s",
os.getpid(),
os.environ.get(CUDA_VISIBLE_DEVICES),
)
def workloop():
while True:
job, extra_env = TuningProcess.recv(read_pipe)
if job is None:
# None is a sentinel for the child to shut down
break
try:
if extra_env:
os.environ.update(extra_env)
result = job()
except Exception as e:
result = e
TuningProcess.send(result, write_pipe)
try:
workloop()
except EOFError:
# The parent closed the pipe
pass
@staticmethod
def send(
obj: Any, write_pipe: IO[bytes], extra_env: dict[str, str] | None = None
) -> None:
pickle.dump((obj, extra_env), write_pipe)
write_pipe.flush()
@staticmethod
def recv(read_pipe: IO[bytes]) -> Any:
return pickle.load(read_pipe)
def __init__(self, device: Optional[int]):
self.device = device
self.start()
def start(self):
"""
Start the benchmarking subprocess.
"""
entry = os.path.join(os.path.dirname(__file__), "__autotune_main__.py")
subproc_read_fd, write_fd = os.pipe()
read_fd, subproc_write_fd = os.pipe()
self.write_pipe = os.fdopen(write_fd, "wb")
self.read_pipe = os.fdopen(read_fd, "rb")
self.selector = selectors.DefaultSelector()
self.selector.register(self.read_pipe, selectors.EVENT_READ)
cmd = [
sys.executable,
entry,
f"--parent={os.getpid()}",
f"--read-fd={str(subproc_read_fd)}",
f"--write-fd={str(subproc_write_fd)}",
]
env = {
**python_subprocess_env(),
# We shouldn't be using the Triton async compile subprocess pool,
# but as a precaution set the env var that disables its creation.
"TORCH_WARM_POOL": "0",
# Some internal usages need a modified LD_LIBRARY_PATH.
"LD_LIBRARY_PATH": get_ld_library_path(),
# This will cause the subprocs to profile using the profiler.
"TORCHINDUCTOR_PROFILE_WITH_DO_BENCH_USING_PROFILING": "1"
if config.profile_bandwidth_with_do_bench_using_profiling
else "0",
}
if self.device is not None:
env[CUDA_VISIBLE_DEVICES] = str(self.device)
self.process = subprocess.Popen(
cmd,
env=env,
pass_fds=(subproc_read_fd, subproc_write_fd),
)
os.close(subproc_read_fd)
os.close(subproc_write_fd)
self.running = True
def alive(self) -> bool:
"""
True if the subprocess is still running.
"""
return self.running and self.process.poll() is None
def put(self, req: Any, extra_env: dict[str, str] | None = None) -> None:
"""
Push a work item to the child process.
"""
if not self.alive():
self.start()
TuningProcess.send(req, self.write_pipe, extra_env=extra_env)
def get(self, timeout: float = 120.0) -> Any:
"""
Get a response from the child process. Raises TimeoutError on timeout;
raises EOFError if the subprocess crashes.
"""
try:
if not self.selector.select(timeout):
raise TimeoutError(f"Timeout in autotune subprocess {self.process.pid}")
result, _ = TuningProcess.recv(self.read_pipe)
except TimeoutError:
self.kill()
raise
except EOFError:
# The subprocess crashed
self.close()
raise
except Exception:
autotuning_log.exception(
"Unexpected exception in autotune subprocess %s", self.process.pid
)
self.kill()
raise
if isinstance(result, Exception):
raise result
return result
def shutdown(self, wait: bool = True) -> None:
"""
Signal the child process to shut down gracefully.
"""
if self.alive():
TuningProcess.send(None, self.write_pipe)
if wait:
self.wait()
def wait(self) -> None:
"""
Wait for the child process to exit.
"""
if self.alive():
self.process.wait()
self.close()
def close(self) -> None:
"""
Close resources.
"""
self.selector.close()
self.read_pipe.close()
self.write_pipe.close()
self.running = False
def kill(self) -> None:
"""
Send a SIGKILL to the child process.
"""
if self.alive():
autotuning_log.error(
"Sending SIGKILL to autotune subprocess %d",
self.process.pid,
)
self.process.kill()
self.close()
def restart(self) -> None:
"""
Gracefully restarts the child process.
"""
self.shutdown(wait=True)
self.start()
| TuningProcess |
python | fluentpython__example-code | attic/objects/cards_format.py | {
"start": 1262,
"end": 2081
} | class ____(Enum):
spades = '\u2660' # U+2660 ♠ BLACK SPADE SUIT
diamonds = '\u2662' # U+2662 ♢ WHITE DIAMOND SUIT
clubs = '\u2663' # U+2663 ♣ BLACK CLUB SUIT
hearts = '\u2661' # U+2661 ♡ WHITE HEART SUIT
def format_p(self):
return chr(0x2660 + self.value)
def format_s(self):
return self.name
def format_S(self):
return self.name.capitalize()
def __bytes__(self):
return bytes([self.value])
def __format__(self, format_spec):
use_spec = 's' if format_spec == '' else format_spec
format_method = getattr(self, 'format_' + use_spec, None)
if format_method:
return format_method()
msg = "Invalid format spec {!r} for object of type 'Suite'"
raise ValueError(msg.format(format_spec))
| Suite |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/test_groups.py | {
"start": 587,
"end": 2692
} | class ____(TestCase):
@property
def _config(self):
return (
ConfigBuilder()
.with_basic_auth_credentials("user@example.com", "password")
.with_subdomain("d3v-airbyte")
.with_start_date(ab_datetime_now().subtract(timedelta(weeks=104)))
.build()
)
@staticmethod
def get_authenticator(config):
return ApiTokenAuthenticator(email=config["credentials"]["email"], password=config["credentials"]["api_token"])
@HttpMocker()
def test_given_incoming_state_semi_incremental_groups_does_not_emit_earlier_record(self, http_mocker):
"""
Perform a semi-incremental sync where records that came before the current state are not included in the set
of records emitted
"""
api_token_authenticator = self.get_authenticator(self._config)
given_groups_with_later_records(
http_mocker,
string_to_datetime(self._config["start_date"]),
timedelta(weeks=12),
api_token_authenticator,
)
output = read_stream("groups", SyncMode.full_refresh, self._config)
assert len(output.records) == 2
@HttpMocker()
def test_given_incoming_state_semi_incremental_groups_does_not_emit_earlier_record(self, http_mocker):
"""
Perform a semi-incremental sync where records that came before the current state are not included in the set
of records emitted
"""
api_token_authenticator = self.get_authenticator(self._config)
given_groups_with_later_records(
http_mocker,
string_to_datetime(self._config["start_date"]),
timedelta(weeks=12),
api_token_authenticator,
)
state_value = {"updated_at": datetime_to_string(ab_datetime_now().subtract(timedelta(weeks=102)))}
state = StateBuilder().with_stream_state("groups", state_value).build()
output = read_stream("groups", SyncMode.full_refresh, self._config, state=state)
assert len(output.records) == 1
| TestGroupsStreamFullRefresh |
python | giampaolo__psutil | tests/test_memleaks.py | {
"start": 10610,
"end": 11052
} | class ____(MemoryLeakTestCase):
def test_cmdline_peb_true(self):
self.execute(lambda: cext.proc_cmdline(os.getpid(), use_peb=True))
def test_cmdline_peb_false(self):
self.execute(lambda: cext.proc_cmdline(os.getpid(), use_peb=False))
# ===================================================================
# system APIs
# ===================================================================
| TestProcessDualImplementation |
python | huggingface__transformers | tests/models/phi3/test_modeling_phi3.py | {
"start": 3114,
"end": 25419
} | class ____(unittest.TestCase):
def test_model_phi3_mini_4k_instruct_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor(
[
[8.9005, 8.5380, 12.0361, 9.1562, 7.4068, 10.2581, 7.8991, 7.2447,7.0626, 7.5760, 7.8315, 9.4076, 16.1104, 20.1290, 7.7500, 7.1947, 6.1550, 7.0563, 8.5344, 8.7248, 7.1359, 7.8237, 7.6817, 7.6395, 7.7924, 6.9702, 6.9097, 8.7074, 9.5768, 8.1145],
[18.7090, 18.5701, 19.3660, 21.5171, 17.5042, 17.8716, 16.3554, 17.4617, 18.1623, 16.5641, 17.7547, 18.0193, 23.8355, 29.4481, 16.3864, 16.0560, 16.1543, 18.5507, 18.1343, 17.3883, 17.7422, 17.3012, 16.7657, 17.6874,17.9067, 16.8301, 16.2719, 18.3709, 19.0318, 16.7315],
]
).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_4k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
outputs = model.generate(inputs, max_new_tokens=32)
output_text = tokenizer.batch_decode(outputs)
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for incorporating these fruits into your"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_4k_instruct_with_static_cache(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64)
output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device))
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_model_phi3_mini_128k_instruct_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor(
[
[10.6076, 10.6499, 14.0601, 19.8499, 15.1787, 19.3717, 19.9782, 17.0394, 15.7875, 18.1403, 19.2748, 12.6627, 20.2804, 24.5362, 18.8105, 15.3394, 12.1219, 15.9941, 19.0679, 16.4936, 17.0505, 16.8738, 17.3090, 16.6572, 16.8754, 16.6912, 15.1627, 18.8721, 19.6017, 18.5513],
[16.2141, 18.7298, 17.4216, 21.9312, 17.7606, 17.6177, 16.7766, 17.9859, 18.4132, 17.4505, 18.6385, 18.5396, 23.6260, 28.7443, 16.1817, 15.5148, 16.0035, 18.6652, 18.3087, 17.2960, 17.8223, 17.7776, 16.8686, 17.4093, 17.8037, 17.2544, 16.7231, 18.6195, 19.6784, 16.6647],
]
).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_128k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
outputs = model.generate(inputs, max_new_tokens=32)
output_text = tokenizer.batch_decode(outputs)
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways. Here are some creative and healthy"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_128k_instruct_with_static_cache(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64)
output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device))
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_4k_sliding_window(self):
"""
This tests that Phi3 doesn't deteriorate in quality for long context generations. Since Phi3 has
sliding window attention, the test is tailored so that (context + max_new_tokens > sliding_window).
See #33586 for more
"""
model = Phi3ForCausalLM.from_pretrained(
"microsoft/Phi-3-mini-4k-instruct", device_map=torch_device, dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
input_text = """
<|user|>
Tell me about Paris, France.<|end|>
<|assistant|>
Paris, the capital city of France, is renowned for its rich history, iconic landmarks, and vibrant culture. Known as "The City of Light," Paris is situated in the north-central part of the country along the Seine River.
Here are some key aspects of Paris:
1. Landmarks: Paris is home to numerous famous landmarks, including the Eiffel Tower, the Louvre Museum, Notre-Dame Cathedral, and the Champs-Élysées. The Eiffel Tower, built in 1889, is an iconic symbol of Paris and attracts millions of tourists each year. The Louvre Museum, the world's largest art museum, houses thousands of works of art, including the Mona Lisa and the Venus de Milo.
2. History: Paris has a rich history dating back to the 3rd century BC, when it was founded by a Celtic tribe called the Parisii. Over the centuries, the city has been influenced by various cultures, including the Romans, the Franks, and the Normans. The French Revolution in the late 18th century marked a significant turning point in Paris's history, leading to the establishment of the modern French Republic.
3. Culture: Paris is a global center for art, fashion, gastronomy, and culture. The city is home to numerous museums, including the Centre Pompidou, Musée d'Orsay, and Musée Rodin. Paris is also known for its fashion industry, with many famous designers having their origins in the city. The city's cuisine is also highly regarded, with a focus on fresh ingredients, and a wide variety of dishes, including French classics like coq au vin, boeuf bourguignon, and crêpes.
4. Architecture: Parisian architecture is characterized by its diverse styles, ranging from Gothic and Romanesque to Art Nouveau and Art Deco. The city's famous Haussmannian buildings, designed by Baron Haussmann in the mid-19th century, are known for their uniform facades, wrought-iron balconies, and large windows.
5. Transportation: Paris has an extensive public transportation system, including the Paris Métro, RER (suburban trains), and buses. The city's iconic yellow taxis are also a popular mode of transportation.
6. Language: The official language of Paris is French, and the city's residents are known for their charm and politeness.
7. Festivals and Events: Paris hosts numerous festivals and events throughout the year, including the annual Bastille Day celebrations, the Paris Fashion Week, and the famous annual New Year's Eve fireworks on the Eiffel Tower.
8. Geography: Paris is located in the north-central part of France, with the Seine River running through the city. The city's geography is characterized by rolling hills and picturesque parks, such as the Bois de Boulogne and the Jardin des Tuileries.
9. Population: As of 2021, Paris has an estimated population of around 2.2 million residents, with the metropolitan area housing over 12 million people.
In summary, Paris is a city steeped in history, culture, and art, with a unique blend of architectural styles and a vibrant atmosphere that continues to captivate millions of visitors each year.<|end|>
<|user|>
Please give me a list of 5 architectural landmarks in Paris, France.<|end|>
<|assistant|>
1. Eiffel Tower: Designed by Gustave Eiffel and completed in 1889, the Eiffel Tower is an iconic symbol of Paris and France. Standing at 324 meters tall, it was the tallest man-made structure in the world until the completion of the Chrysler Building in New York in 1930. The Eiffel Tower is made of wrought iron and offers visitors stunning views of the city from its three levels.
2. Notre-Dame Cathedral: Located on the Île de la Cité, Notre-Dame Cathedral is a masterpiece of French Gothic architecture. Construction began in the 12th century and continued for over 200 years, with the cathedral's completion in the 14th century. The cathedral is famous for its intricate facade, stained-glass windows, and the iconic gargoyles and chimeras.
3. Louvre Museum: Originally built as a fortress in the 12th century, the Louvre Museum is now the world's largest art museum and a historic monument in Paris. The museum's most famous landmark is the iconic glass pyramid entrance, designed by architect I. M. Pei in the 1980s. The Louvre houses over 380,000 works of art, including the Mona Lisa and the Venus de Milo.
4. Sacré-Cœur Basilica: The Sacré-Cœur Basilica, also known as the Basilique du Sacré-Cœur, is a Roman Catholic church and minor basilica located at the summit of the butte Montmartre, the highest point in Paris. The basilica was designed by Paul Abadie and dedicated in 1914. Its white domes and lavender-colored travertine stone make it a distinctive landmark in the Paris skyline.
5. Arc de Triomphe: The Arc de Triomphe is a monumental structure located at the western end of the Champs-Élysées. Commissioned by Napoleon in 1806, the Arc was designed by Jean-François-Thérèse Chalgrin and completed in 1836. The monument honors those who fought and died for France during the French Revolutionary and Napoleonic Wars. The Arc features sculptural reliefs and inscriptions, and its façade is adorned with the names of 357 generals and 660 soldiers.
These five architectural landmarks showcase the diverse styles and historical periods of Paris, from Gothic to Neoclassical, and from the 19th to the 20th centuries. Each landmark has its unique features and contributes to the city's rich architectural heritage.<|end|>
<|user|>
Please give me a list of 10 famous items displayed in the Louvre Museum. Thanks!<|end|>
<|assistant|>
1. Mona Lisa: The Mona Lisa, painted by Leonardo da Vinci in the early 16th century, is arguably the most famous painting in the world. The portrait is known for its enigmatic smile and masterful use of sfumato, a technique that creates a soft, hazy effect.
2. Venus de Milo: This ancient Greek statue, believed to have been created around 130-100 BC, is a masterpiece of Hellenistic sculpture. The Venus de Milo is renowned for its graceful beauty and the mystery surrounding its missing arms.
3. Winged Victory of Samothrace: This Hellenistic sculpture, dating back to the 2nd century BC, depicts the Greek goddess Nike, the personification of victory. The sculpture is celebrated for its dynamic movement and intricate details.
4. Liberty Leading the People: This iconic painting by Eugène Delacroix, created in 1830, commemorates the July Revolution in France. The artwork depicts a woman personifying Liberty leading a group of revolutionaries over the bodies of the fallen.
5. The Wedding at Cana: A 1516 painting by Veronese, The Wedding at Cana is a large-scale work that depicts the biblical story of Jesus turning water into wine at a wedding feast. The painting is known for its vibrant colors and intricate details.
6. The Raft of the Medusa: This 1819 painting by Théodore Géricault is a powerful depiction of the aftermath of the shipwreck of the French frigate Méduse. The painting is famous for its dramatic composition and emotional intensity.
7. The Coronation of Napoleon: This 1805 painting by Jacques-Louis David portrays the coronation of Napoleon Bonaparte as Emperor of the French. The artwork is a masterpiece of neoclassical style and captures the pomp and ceremony of the event.
8. The Death of Socrates: A 1914 painting by Pablo Picasso, The Death of Socrates depicts the moment when the ancient Greek philosopher Socrates drinks the poison hemlock, as ordered by the Athenian government. The painting is a powerful expression of the existential themes associated with Socrates' death.
9. The Turkish Bath: A 1887 painting by Puvis de Chavannes, The Turkish Bath is a large-scale work that depicts a scene of relaxation and leisity in a traditional Turkish bath. The painting is known for its subdued color palette and serene atmosphere.
10. The Wedding at Cana: A 1901 painting by Giovanni Boldini, The Wedding at Cana is a lively depiction of a festive Italian wedding. The painting is characterized by Boldini's signature flamboyant style and his ability to capture the essence of the subjects with a sense of movement and energy.
These 10 famous items in the Louvre Museum showcase a diverse range of artistic styles and periods, from ancient Greek and Roman sculptures to Renaissance and 19th-century paintings. The collection at the Louvre is one of the world's largest and most important museums, housing over 800,000 works of art, and these famous items are just a small selection of the museum's vast collection.<|end|>
<|user|>
Please give me a list of 10 famous French dishes.<|end|>
<|assistant|>
"""
inputs = tokenizer(input_text, return_tensors="pt").to(device=torch_device)
outputs = model.generate(**inputs, max_new_tokens=100)
output_text = tokenizer.batch_decode(outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True)
EXPECTED_OUTPUT = [
'1. Coq au Vin: Coq au Vin is a classic French dish that translates to "rooster in wine." The dish consists of chicken braised with wine, lardons, mushrooms, and garlic. It is a hearty and flavorful dish that is often served with potatoes or rice.\n\n 2. Boeuf Bourguignon: Boeuf Bourguignon is a traditional French beef stew that'
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
model_id = "microsoft/Phi-4-mini-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="</s>", padding_side="right")
expected_text_completions = Expectations(
{
("xpu", None): ["You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user. A 45-year-old patient with a 10-year history of type 2 diabetes mellitus, who is currently on metformin and a SGLT2 inhibitor, presents with a 2-year history"],
("rocm", (9, 5)): ["You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user. A 45-year-old patient with a 10-year history of type 2 diabetes mellitus presents with a 2-year history of progressive, non-healing, and painful, 2.5 cm"],
("cuda", None): ["You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user. A 45-year-old patient with a 10-year history of type 2 diabetes mellitus, who is currently on metformin and a SGLT2 inhibitor, presents with a 2-year history"],
}
) # fmt: skip
EXPECTED_TEXT_COMPLETION = expected_text_completions.get_expectation()
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load config
config = AutoConfig.from_pretrained(model_id)
# NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting
# the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite
# that function to avoid the data-dependent control flow.
if hasattr(config, "rope_parameters") and config.rope_parameters is not None:
config.rope_parameters["type"] = "default"
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = AutoModelForCausalLM.from_pretrained(
model_id,
config=config,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = [
"You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."
]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
| Phi3IntegrationTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 520905,
"end": 522638
} | class ____(Response):
"""
Response of tasks.stopped endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stopped"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(StoppedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| StoppedResponse |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 64048,
"end": 64940
} | class ____(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path) -> bool:
return os.path.exists(path)
def _isdir(self, path) -> bool:
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager: object, resource_name: str):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path) -> bytes:
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = (
'SourceFileLoader',
'SourcelessFileLoader',
)
for name in loader_names:
loader_cls = getattr(importlib.machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
| DefaultProvider |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 51747,
"end": 54385
} | class ____(TaskRunOrchestrationRule):
"""
We do not allow tasks to leave terminal states if:
- The task is completed and has a persisted result
- The task is going to CANCELLING / PAUSED / CRASHED
We reset the run count when a task leaves a terminal state for a non-terminal state
which resets task run retries; this is particularly relevant for flow run retries.
"""
FROM_STATES: set[states.StateType | None] = TERMINAL_STATES # pyright: ignore[reportAssignmentType] technically TERMINAL_STATES doesn't contain None
TO_STATES: set[states.StateType | None] = ALL_ORCHESTRATION_STATES
async def before_transition(
self,
initial_state: states.State[Any] | None,
proposed_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.TaskRun, core.TaskRunPolicy],
) -> None:
if initial_state is None or proposed_state is None:
return
self.original_run_count: int = context.run.run_count
# Do not allow runs to be marked as crashed, paused, or cancelling if already terminal
if proposed_state.type in {
StateType.CANCELLING,
StateType.PAUSED,
StateType.CRASHED,
}:
await self.abort_transition(f"Run is already {initial_state.type.value}.")
return
# Only allow departure from a happily completed state if the result is not persisted
if (
initial_state.is_completed()
and initial_state.data
and initial_state.data.get("type") != "unpersisted"
):
await self.reject_transition(None, "This run is already completed.")
return
if not proposed_state.is_final():
# Reset run count to reset retries
context.run.run_count = 0
# Change the name of the state to retrying if its a flow run retry
if proposed_state.is_running() and context.run.flow_run_id is not None:
self.flow_run: orm_models.FlowRun | None = await context.flow_run()
if self.flow_run is not None:
flow_retrying = context.run.flow_run_run_count < self.flow_run.run_count
if flow_retrying:
await self.rename_state("Retrying")
async def cleanup(
self,
initial_state: states.State[Any] | None,
validated_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.TaskRun, core.TaskRunPolicy],
) -> None:
# reset run count
context.run.run_count = self.original_run_count
| HandleTaskTerminalStateTransitions |
python | getsentry__sentry | src/sentry/logging/handlers.py | {
"start": 4344,
"end": 5048
} | class ____(logging.Filter):
"""
A logging filter that allows log records where the message
contains given substring(s).
contains -- a string or list of strings to match
"""
def __init__(self, contains):
if not isinstance(contains, list):
contains = [contains]
if not all(isinstance(c, str) for c in contains):
raise TypeError("'contains' must be a string or list of strings")
self.contains = contains
def filter(self, record):
message = record.getMessage()
return any(c in message for c in self.contains)
whitespace_re = re.compile(r"\s+")
metrics_badchars_re = re.compile("[^a-z0-9_.]")
| MessageContainsFilter |
python | langchain-ai__langchain | libs/partners/prompty/tests/unit_tests/fake_callback_handler.py | {
"start": 2956,
"end": 6171
} | class ____(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake callback handler for testing."""
def __init__(self) -> None:
super().__init__()
self.input_prompts = []
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> Any:
self.input_prompts = prompts
self.on_llm_start_common()
def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_new_token_common()
def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_end_common()
def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_error_common()
def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_start_common()
def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_end_common()
def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_error_common()
def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_start_common()
def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_end_common()
def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_error_common()
def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_action_common()
def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_finish_common()
def on_text(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_text_common()
def on_retriever_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_start_common()
def on_retriever_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_end_common()
def on_retriever_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_error_common()
# Overriding since BaseModel has __deepcopy__ method as well
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore
return self
| FakeCallbackHandler |
python | doocs__leetcode | lcp/LCP 51. 烹饪料理/Solution.py | {
"start": 0,
"end": 696
} | class ____:
def perfectMenu(
self,
materials: List[int],
cookbooks: List[List[int]],
attribute: List[List[int]],
limit: int,
) -> int:
n = len(cookbooks)
ans = -1
for mask in range(1 << n):
a = b = 0
cnt = [0] * 5
for i in range(n):
if mask >> i & 1:
x, y = attribute[i]
a += x
b += y
for j, v in enumerate(cookbooks[i]):
cnt[j] += v
if b >= limit and ans < a and all(c <= d for c, d in zip(cnt, materials)):
ans = a
return ans
| Solution |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/actions/test_webhook.py | {
"start": 386,
"end": 3499
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.webhooks_plugin = plugins.get(WebHooksPlugin.slug)
self.webhooks_plugin.enable(self.project)
# non notification plugin
self.trello_plugin = plugins.get(TrelloPlugin.slug)
self.trello_plugin.enable(self.project)
self.alertable_sentry_app = self.create_sentry_app(
organization=self.organization,
name="Test Application 1",
is_alertable=True,
)
self.create_sentry_app_installation(
slug=self.alertable_sentry_app.slug, organization=self.organization
)
self.non_alertable_sentry_app = self.create_sentry_app(
organization=self.organization,
name="Test Application 2",
is_alertable=False,
)
self.create_sentry_app_installation(
slug=self.non_alertable_sentry_app.slug, organization=self.organization
)
self.valid_data = {
"type": Action.Type.WEBHOOK,
"config": {"targetIdentifier": self.alertable_sentry_app.slug},
"data": {},
}
def test_validate__sentry_app(self) -> None:
validator = BaseActionValidator(
data=self.valid_data,
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is True
validator.save()
def test_validate__invalid_sentry_app(self) -> None:
validator = BaseActionValidator(
data={
**self.valid_data,
"config": {"targetIdentifier": self.non_alertable_sentry_app.slug},
},
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is False
assert validator.errors == {
"service": [
ErrorDetail(
string=f"Select a valid choice. {self.non_alertable_sentry_app.slug} is not one of the available choices.",
code="invalid",
)
]
}
def test_validate__plugin(self) -> None:
validator = BaseActionValidator(
data={**self.valid_data, "config": {"targetIdentifier": self.webhooks_plugin.slug}},
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is True
validator.save()
def test_validate__invalid_plugin(self) -> None:
validator = BaseActionValidator(
data={**self.valid_data, "config": {"targetIdentifier": self.trello_plugin.slug}},
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is False
assert validator.errors == {
"service": [
ErrorDetail(
string=f"Select a valid choice. {self.trello_plugin.slug} is not one of the available choices.",
code="invalid",
)
]
}
| TestWebhookActionValidator |
python | jazzband__django-oauth-toolkit | oauth2_provider/migrations/0007_application_post_logout_redirect_uris.py | {
"start": 92,
"end": 498
} | class ____(migrations.Migration):
dependencies = [
("oauth2_provider", "0006_alter_application_client_secret"),
]
operations = [
migrations.AddField(
model_name="application",
name="post_logout_redirect_uris",
field=models.TextField(blank=True, help_text="Allowed Post Logout URIs list, space separated", default=""),
),
]
| Migration |
python | bokeh__bokeh | src/bokeh/core/property/any.py | {
"start": 2347,
"end": 3001
} | class ____(Any):
""" Accept all values and force reference discovery. """
@property
def has_ref(self) -> bool:
return True
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| AnyRef |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_model_with_type_hints.py | {
"start": 9417,
"end": 16973
} | class ____(pydantic.BaseModel):
custom_field: dict[str, list[str]]
messages: list[Message]
optional_int: Optional[int] = None # noqa: UP045
int_or_none: int | None = None
@pytest.mark.parametrize(
("type_hint", "result_type", "input_example"),
[
# scalars
# bytes and datetime are not supported in spark_udf
(list[int], None, [1, 2, 3]),
(list[str], None, ["a", "b", "c"]),
(list[bool], None, [True, False, True]),
(list[float], None, [1.23, 2.34, 3.45]),
# lists
(list[list[str]], ArrayType(StringType()), [["a", "b"], ["c", "d"]]),
# dictionaries
(
list[dict[str, str]],
MapType(StringType(), StringType()),
[{"a": "b"}, {"c": "d"}],
),
(
list[dict[str, list[str]]],
MapType(StringType(), ArrayType(StringType())),
[{"a": ["b"]}, {"c": ["d"]}],
),
# Union type is not supported because fields in the same column of spark DataFrame
# must be of same type
# Any type is not supported yet
# (list[Any], Schema([ColSpec(type=AnyType())]), ["a", "b", "c"]),
# Pydantic Models
(
list[CustomExample3],
StructType(
[
StructField("custom_field", MapType(StringType(), ArrayType(StringType()))),
StructField(
"messages",
ArrayType(
StructType(
[
StructField("role", StringType(), False),
StructField("content", StringType(), False),
]
)
),
),
StructField("optional_int", IntegerType()),
StructField("int_or_none", IntegerType()),
]
),
[
{
"custom_field": {"a": ["a", "b", "c"]},
"messages": [
{"role": "admin", "content": "hello"},
{"role": "user", "content": "hi"},
],
"optional_int": 123,
"int_or_none": 456,
},
{
"custom_field": {"a": ["a", "b", "c"]},
"messages": [
{"role": "admin", "content": "hello"},
],
"optional_int": None,
"int_or_none": None,
},
],
),
],
)
def test_spark_udf(spark, type_hint, result_type, input_example):
class Model(mlflow.pyfunc.PythonModel):
def predict(self, model_input: type_hint) -> type_hint:
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=Model(), input_example=input_example
)
# test spark_udf
udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type=result_type)
# the spark dataframe must put the input data in a single column
if result_type is None:
# rely on spark to auto-infer schema
df = spark.createDataFrame(pd.DataFrame({"input": input_example}))
else:
schema = StructType([StructField("input", result_type)])
df = spark.createDataFrame(pd.DataFrame({"input": input_example}), schema=schema)
df = df.withColumn("response", udf("input"))
pdf = df.toPandas()
assert [
x.asDict(recursive=True) if isinstance(x, Row) else x for x in pdf["response"].tolist()
] == input_example
def test_pyfunc_model_with_no_op_type_hint_pass_signature_works():
def predict(model_input: pd.DataFrame) -> pd.DataFrame:
return model_input
input_example = pd.DataFrame({"a": [1]})
signature = infer_signature(input_example, predict(input_example))
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=predict,
input_example=input_example,
signature=signature,
)
assert model_info.signature.inputs == Schema([ColSpec(type=DataType.long, name="a")])
pyfunc = mlflow.pyfunc.load_model(model_info.model_uri)
pd.testing.assert_frame_equal(pyfunc.predict(input_example), input_example)
class Model(mlflow.pyfunc.PythonModel):
def predict(self, model_input: pd.DataFrame, params=None) -> pd.DataFrame:
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=Model(),
input_example=input_example,
)
assert model_info.signature.inputs == Schema([ColSpec(type=DataType.long, name="a")])
pyfunc = mlflow.pyfunc.load_model(model_info.model_uri)
pd.testing.assert_frame_equal(pyfunc.predict(input_example), input_example)
def test_pyfunc_model_infer_signature_from_type_hints_errors(recwarn):
def predict(model_input: list[int]) -> int:
return model_input
with mlflow.start_run():
with mock.patch("mlflow.models.signature._logger.warning") as mock_warning:
mlflow.pyfunc.log_model(
name="test_model", python_model=predict, input_example=["string"]
)
assert (
"Input example is not compatible with the type hint of the `predict` function."
in mock_warning.call_args[0][0]
)
def predict(model_input: list[int]) -> str:
return model_input
output_hints = _extract_type_hints(predict, 0).output
with mlflow.start_run():
with mock.patch("mlflow.models.signature._logger.warning") as mock_warning:
model_info = mlflow.pyfunc.log_model(
name="test_model", python_model=predict, input_example=[123]
)
assert (
f"Failed to validate output `[123]` against type hint `{output_hints}`"
in mock_warning.call_args[0][0]
)
assert model_info.signature.inputs == Schema([ColSpec(type=DataType.long)])
assert model_info.signature.outputs == Schema([ColSpec(AnyType())])
class Model(mlflow.pyfunc.PythonModel):
def predict(self, model_input: pd.DataFrame, params=None) -> pd.DataFrame:
return model_input
with mlflow.start_run():
with mock.patch("mlflow.pyfunc._logger.warning") as mock_warning:
mlflow.pyfunc.log_model(name="test_model", python_model=Model())
assert (
"cannot be used to infer model signature and input example is not provided, "
"model signature cannot be inferred."
) in mock_warning.call_args[0][0]
with mlflow.start_run():
with mock.patch("mlflow.pyfunc._logger.warning") as mock_warning:
mlflow.pyfunc.log_model(
name="test_model", python_model=Model(), input_example=pd.DataFrame()
)
assert "Failed to infer model signature from input example" in mock_warning.call_args[0][0]
def save_model_file_for_code_based_logging(type_hint, tmp_path, model_type, extra_def=""):
if model_type == "callable":
model_def = f"""
def predict(model_input: {type_hint}) -> {type_hint}:
return model_input
set_model(predict)
"""
elif model_type == "python_model":
model_def = f"""
| CustomExample3 |
python | spack__spack | lib/spack/spack/util/gcs.py | {
"start": 5053,
"end": 7464
} | class ____:
"""GCS Blob object
Wraps some blob methods for spack functionality
"""
def __init__(self, url, client=None):
self.url = url
if url.scheme != "gs":
raise ValueError(
"Can not create GCS blob connection with scheme: {SCHEME}".format(
SCHEME=url.scheme
)
)
self.client = client or gcs_client()
self.bucket = GCSBucket(url)
self.blob_path = self.url.path.lstrip("/")
tty.debug("New GCSBlob")
tty.debug(" blob_path = {0}".format(self.blob_path))
if not self.bucket.exists():
tty.warn("The bucket {0} does not exist, it will be created".format(self.bucket.name))
self.bucket.create()
def get(self):
return self.bucket.get_blob(self.blob_path)
def exists(self):
from google.cloud.exceptions import NotFound
try:
blob = self.bucket.blob(self.blob_path)
exists = blob.exists()
except NotFound:
return False
return exists
def delete_blob(self):
from google.cloud.exceptions import NotFound
try:
blob = self.bucket.blob(self.blob_path)
blob.delete()
except NotFound as ex:
tty.error("{0}, Could not delete gcs blob {1}".format(ex, self.blob_path))
def upload_to_blob(self, local_file_path):
blob = self.bucket.blob(self.blob_path)
blob.upload_from_filename(local_file_path)
def get_blob_byte_stream(self):
return self.bucket.get_blob(self.blob_path).open(mode="rb")
def get_blob_headers(self):
blob = self.bucket.get_blob(self.blob_path)
headers = {
"Content-type": blob.content_type,
"Content-encoding": blob.content_encoding,
"Content-language": blob.content_language,
"MD5Hash": blob.md5_hash,
}
return headers
def gcs_open(req, *args, **kwargs):
"""Open a reader stream to a blob object on GCS"""
url = urllib.parse.urlparse(req.get_full_url())
gcsblob = GCSBlob(url)
if not gcsblob.exists():
raise URLError("GCS blob {0} does not exist".format(gcsblob.blob_path))
stream = gcsblob.get_blob_byte_stream()
headers = gcsblob.get_blob_headers()
return urllib.response.addinfourl(stream, headers, url)
| GCSBlob |
python | astropy__astropy | astropy/extern/ply/lex.py | {
"start": 3883,
"end": 22447
} | class ____:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens))))
tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags)))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
| Lexer |
python | tensorflow__tensorflow | tensorflow/python/training/momentum_test.py | {
"start": 1373,
"end": 27888
} | class ____(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtype,
name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtype,
name="var1_%d" % i)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
momentum = lambda: 0.9
if not use_callable_params:
learning_rate = learning_rate()
momentum = momentum()
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
if not context.executing_eagerly():
self.assertFalse(slot0 in variables.trainable_variables())
self.assertFalse(slot1 in variables.trainable_variables())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
if not context.executing_eagerly():
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
if context.executing_eagerly():
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
else:
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32,
name="var0")
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtypes.float32,
name="var1")
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEqual(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32,
name="var2")
var3 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtypes.float32,
name="var3")
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEqual(2, len(optimizer_variables))
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
self.evaluate(variables.global_variables_initializer())
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = indexed_slices.IndexedSlices(x_feed,
constant_op.constant([0, 1]),
constant_op.constant([2]))
grads_and_vars = [(y_feed, var0),
(constant_op.constant([3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
self.evaluate(variables.global_variables_initializer())
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
return pred * pred
# pylint: enable=cell-var-from-loop
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
@test_util.run_in_graph_and_eager_modes
def testMinimizeWith2DIndicesForEmbeddingLookup(self):
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(sgd_op)
self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0))
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
for i in range(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), self.evaluate(var0))
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = indexed_slices.IndexedSlices(
constant_op.constant([[.1, .1]], dtype=dtype),
constant_op.constant([1]), constant_op.constant([4, 2]))
grads1 = indexed_slices.IndexedSlices(
constant_op.constant([[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]), constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], self.evaluate(var0)[0])
self.assertAllClose([0, 0], self.evaluate(var0)[1])
self.assertAllClose([1, 1], self.evaluate(var1)[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]),
self.evaluate(var1)[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]),
self.evaluate(var1)[2])
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.MomentumOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
| MomentumOptimizerTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py | {
"start": 86,
"end": 140
} | class ____:
def __iter__(self):
...
| NoReturn |
python | pytorch__pytorch | test/inductor/test_snode_runtime.py | {
"start": 1450,
"end": 2480
} | class ____(InductorTestCase):
device = DEVICE
"""
Helper methods to compare runtime estimate against 0. Since this estimate is hardware dependent,
stronger comparisons may fail depending on the host's specs.
atol/rtol must be provided explicitly with each call, since precision/rel_tol overrides are not always utilized
"""
def setUp(self):
super().setUp()
# These tests check metrics.node_runtimes and we don't save / restore
# those in the FX graph cache.
self._test_snode_stack = contextlib.ExitStack()
self._test_snode_stack.enter_context(
config.patch({"fx_graph_remote_cache": False})
)
def tearDown(self):
self._test_snode_stack.close()
super().tearDown()
def assertZero(self, x: float):
assert isinstance(x, float)
super().assertEqual(x, 0.0, atol=0, rtol=0)
def assertNotZero(self, x):
assert isinstance(x, float)
super().assertNotEqual(x, 0.0, atol=0, rtol=0)
| TestCase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg.py | {
"start": 23948,
"end": 25030
} | class ____(PGDialect_psycopg):
is_async = True
supports_statement_cache = True
@classmethod
def import_dbapi(cls):
import psycopg
from psycopg.pq import ExecStatus
return PsycopgAdaptDBAPI(psycopg, ExecStatus)
def _type_info_fetch(self, connection, name):
from psycopg.types import TypeInfo
adapted = connection.connection
return await_(TypeInfo.fetch(adapted.driver_connection, name))
def _do_isolation_level(self, connection, autocommit, isolation_level):
connection.set_autocommit(autocommit)
connection.set_isolation_level(isolation_level)
def _do_autocommit(self, connection, value):
connection.set_autocommit(value)
def set_readonly(self, connection, value):
connection.set_read_only(value)
def set_deferrable(self, connection, value):
connection.set_deferrable(value)
def get_driver_connection(self, connection):
return connection._connection
dialect = PGDialect_psycopg
dialect_async = PGDialectAsync_psycopg
| PGDialectAsync_psycopg |
python | google__jax | jax/_src/prng.py | {
"start": 4949,
"end": 12675
} | class ____(Array):
"""An array of PRNG keys backed by an RNG implementation.
This class lifts the definition of a PRNG, provided in the form of a
``PRNGImpl``, into an array-like pytree class. Instances of this
class behave like an array whose base elements are keys, hiding the
fact that keys are typically arrays (of ``uint32`` dtype) themselves.
PRNGKeyArrays are also restricted relative to JAX arrays in that
they do not expose arithmetic operations. They instead expose
wrapper methods around the PRNG implementation functions (``split``,
``random_bits``, ``fold_in``).
"""
# TODO(jakevdp): potentially add tolist(), tobytes(),
# device_buffer, device_buffers, __cuda_interface__()
_impl: PRNGImpl
_base_array: Array
_consumed: bool | np.ndarray # Used in jax.experimental.key_reuse.
_source_info: None | source_info_util.SourceInfo = None
def __init__(self, impl, key_data: Any):
assert not isinstance(key_data, core.Tracer)
_check_prng_key_data(impl, key_data)
self._impl = impl
self._consumed = False # TODO(jakevdp): default to True here?
if isinstance(key_data, (np.ndarray, literals.TypedNdArray)):
aval = core.get_aval(key_data)
device = pxla.get_default_device()
key_data = pxla.batched_device_put(
aval, SingleDeviceSharding(device), [np.asarray(key_data)], [device],
committed=False)
self._base_array = key_data
def _replace_with(self, value: PRNGKeyArray):
self._base_array._replace_with(value._base_array)
def block_until_ready(self):
_ = self._base_array.block_until_ready()
return self
def copy_to_host_async(self):
_ = self._base_array.copy_to_host_async()
@property
def aval(self):
vma = self._base_array.aval.vma
return keys_shaped_array(self._impl, self.shape, self.sharding, vma)
@property
def shape(self):
return base_arr_shape_to_keys_shape(self._impl, self._base_array.shape)
@property
def size(self):
return math.prod(self.shape)
@property
def ndim(self):
return len(self.shape)
@property
def dtype(self):
return KeyTy(self._impl)
@property
def nbytes(self):
return self.itemsize * self.size
@property
def itemsize(self):
return self.dtype.itemsize
_device = property(op.attrgetter('_base_array._device'))
_committed = property(op.attrgetter('_base_array._committed'))
device = property(op.attrgetter('_base_array.device'))
devices = property(op.attrgetter('_base_array.devices')) # type: ignore[assignment]
is_fully_addressable = property(op.attrgetter('_base_array.is_fully_addressable')) # type: ignore[assignment]
is_fully_replicated = property(op.attrgetter('_base_array.is_fully_replicated')) # type: ignore[assignment]
delete = property(op.attrgetter('_base_array.delete')) # type: ignore[assignment]
is_deleted = property(op.attrgetter('_base_array.is_deleted')) # type: ignore[assignment]
on_device_size_in_bytes = property(op.attrgetter('_base_array.on_device_size_in_bytes')) # type: ignore[assignment]
unsafe_buffer_pointer = property(op.attrgetter('_base_array.unsafe_buffer_pointer')) # type: ignore[assignment]
def addressable_data(self, index: int) -> PRNGKeyArray:
return PRNGKeyArray(self._impl, self._base_array.addressable_data(index))
@property
def addressable_shards(self) -> list[Shard]:
return [
type(s)(
device=s._device,
sharding=s._sharding,
global_shape=s._global_shape,
data=PRNGKeyArray(self._impl, s._data),
)
for s in self._base_array.addressable_shards
]
@property
def global_shards(self) -> list[Shard]:
return [
type(s)(
device=s._device,
sharding=s._sharding,
global_shape=s._global_shape,
data=PRNGKeyArray(self._impl, s._data),
)
for s in self._base_array.global_shards
]
@property
def sharding(self):
return logical_sharding(self.shape, self.dtype, self._base_array.sharding)
@property
def committed(self):
return self._base_array.committed
def _is_scalar(self):
base_ndim = len(self._impl.key_shape)
return self._base_array.ndim == base_ndim
def __len__(self):
if self._is_scalar():
raise TypeError('len() of unsized object')
return len(self._base_array)
def __iter__(self) -> Iterator[PRNGKeyArray]:
if self._is_scalar():
raise TypeError('iteration over a 0-d key array')
# TODO(frostig): we may want to avoid iteration by slicing because
# a very common use of iteration is `k1, k2 = split(key)`, and
# slicing/indexing may be trickier to track for linearity checking
# purposes. Maybe we can:
# * introduce an unpack primitive+traceable (also allow direct use)
# * unpack upfront into shape[0] many keyarray slices
# * return iter over these unpacked slices
# Whatever we do, we'll want to do it by overriding
# ShapedArray._iter when the element type is KeyTy...
return (PRNGKeyArray(self._impl, k) for k in iter(self._base_array))
def __repr__(self):
return (f'Array({self.shape}, dtype={self.dtype.name}) overlaying:\n'
f'{self._base_array}')
def pprint(self):
pp_keys = pp.text('shape = ') + pp.text(str(self.shape))
pp_impl = pp.text('impl = ') + self._impl.pprint()
return str(pp.group(
pp.text('PRNGKeyArray:') +
pp.nest(2, pp.brk() + pp_keys + pp.brk() + pp_impl)))
def copy(self):
out = self.__class__(self._impl, self._base_array.copy())
out._consumed = self._consumed # TODO(jakevdp): is this correct?
return out
__hash__ = None # type: ignore[assignment]
__array_priority__ = 100
def __array__(self, dtype: np.dtype | None = None, copy: bool | None = None) -> np.ndarray:
raise TypeError("JAX array with PRNGKey dtype cannot be converted to a NumPy array."
" Use jax.random.key_data(arr) if you wish to extract the underlying"
" integer array.")
# Overwritten immediately below
@property
def at(self) -> _IndexUpdateHelper: assert False # type: ignore[override]
@property
def T(self) -> PRNGKeyArray: assert False
def __getitem__(self, _) -> PRNGKeyArray: assert False
def flatten(self, *_, **__) -> PRNGKeyArray: assert False
def ravel(self, *_, **__) -> PRNGKeyArray: assert False
def reshape(self, *_, **__) -> PRNGKeyArray: assert False
def squeeze(self, *_, **__) -> PRNGKeyArray: assert False
def swapaxes(self, *_, **__) -> PRNGKeyArray: assert False
def take(self, *_, **__) -> PRNGKeyArray: assert False
def transpose(self, *_, **__) -> PRNGKeyArray: assert False
_set_array_base_attributes(PRNGKeyArray, include=[
*(f"__{op}__" for op in _array_operators),
'at', 'flatten', 'ravel', 'reshape',
'squeeze', 'swapaxes', 'take', 'transpose', 'T'])
def prngkeyarray_flatten(x):
return (x._base_array,), x._impl
def prngkeyarray_unflatten(impl, children):
base_array, = children
return PRNGKeyArray(impl, base_array)
tree_util.dispatch_registry.register_node(
PRNGKeyArray, prngkeyarray_flatten, prngkeyarray_unflatten)
# TODO(frostig): remove, rerouting callers directly to random_seed
def seed_with_impl(impl: PRNGImpl, seed: int | typing.ArrayLike) -> PRNGKeyArray:
return random_seed(seed, impl=impl)
def keys_shaped_array(impl, shape, sharding, vma):
aval = core.ShapedArray(shape, KeyTy(impl))
return core.update_aval_with_sharding(aval, sharding, vma=vma)
def base_arr_shape_to_keys_shape(impl, base_arr_shape):
base_ndim = len(impl.key_shape)
return base_arr_shape[:-base_ndim]
| PRNGKeyArray |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 10104,
"end": 10732
} | class ____(nn.Module):
"""
A module containing an embedding with different dimension or different # of
parameters depending on the rank.
"""
def __init__(self, rank, diff_num_params=False):
super().__init__()
embedding_dim = 500 if diff_num_params or rank == 0 else 50
self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim)
self.lin = nn.Linear(embedding_dim, 1)
if diff_num_params:
self.lin2 = nn.Linear(1, 1, bias=False)
def forward(self, x):
x = self.embedding(x)
return self.lin(x)
| EmbeddingNetDifferentParams |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/relativity/relativity.py | {
"start": 25007,
"end": 28029
} | class ____(pg.ItemGroup):
def __init__(self, clock):
pg.ItemGroup.__init__(self)
self.size = clock.size
self.item = QtWidgets.QGraphicsEllipseItem(QtCore.QRectF(0, 0, self.size, self.size))
tr = QtGui.QTransform.fromTranslate(-self.size*0.5, -self.size*0.5)
self.item.setTransform(tr)
self.item.setPen(pg.mkPen(100,100,100))
self.item.setBrush(clock.brush)
self.hand = QtWidgets.QGraphicsLineItem(0, 0, 0, self.size*0.5)
self.hand.setPen(pg.mkPen('w'))
self.hand.setZValue(10)
self.flare = QtWidgets.QGraphicsPolygonItem(QtGui.QPolygonF([
QtCore.QPointF(0, -self.size*0.25),
QtCore.QPointF(0, self.size*0.25),
QtCore.QPointF(self.size*1.5, 0),
QtCore.QPointF(0, -self.size*0.25),
]))
self.flare.setPen(pg.mkPen('y'))
self.flare.setBrush(pg.mkBrush(255,150,0))
self.flare.setZValue(-10)
self.addItem(self.hand)
self.addItem(self.item)
self.addItem(self.flare)
self.clock = clock
self.i = 1
self._spaceline = None
def spaceline(self):
if self._spaceline is None:
self._spaceline = pg.InfiniteLine()
self._spaceline.setPen(self.clock.pen)
return self._spaceline
def stepTo(self, t):
data = self.clock.refData
while self.i < len(data)-1 and data['t'][self.i] < t:
self.i += 1
while self.i > 1 and data['t'][self.i-1] >= t:
self.i -= 1
self.setPos(data['x'][self.i], self.clock.y0)
t = data['pt'][self.i]
self.hand.setRotation(-0.25 * t * 360.)
v = data['v'][self.i]
gam = (1.0 - v**2)**0.5
self.setTransform(QtGui.QTransform.fromScale(gam, 1.0))
f = data['f'][self.i]
tr = QtGui.QTransform()
if f < 0:
tr.translate(self.size*0.4, 0)
else:
tr.translate(-self.size*0.4, 0)
tr.scale(-f * (0.5+np.random.random()*0.1), 1.0)
self.flare.setTransform(tr)
if self._spaceline is not None:
self._spaceline.setPos(pg.Point(data['x'][self.i], data['t'][self.i]))
self._spaceline.setAngle(data['v'][self.i] * 45.)
def reset(self):
self.i = 1
#class Spaceline(pg.InfiniteLine):
#def __init__(self, sim, frame):
#self.sim = sim
#self.frame = frame
#pg.InfiniteLine.__init__(self)
#self.setPen(sim.clocks[frame].pen)
#def stepTo(self, t):
#self.setAngle(0)
#pass
if __name__ == '__main__':
app = pg.mkQApp()
#import pyqtgraph.console
#cw = pyqtgraph.console.ConsoleWidget()
#cw.show()
#cw.catchNextException()
win = RelativityGUI()
win.setWindowTitle("Relativity!")
win.show()
win.resize(1100,700)
pg.exec()
| ClockItem |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_table04.py | {
"start": 481,
"end": 1956
} | class ____(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
worksheet.add_table("C3:F13", {"autofilter": False})
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleTable |
python | huggingface__transformers | src/transformers/models/longcat_flash/modeling_longcat_flash.py | {
"start": 24013,
"end": 25118
} | class ____(PreTrainedModel):
config: LongcatFlashConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LongcatFlashDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": LongcatFlashDecoderLayer,
"attentions": LongcatFlashMLA,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, LongcatFlashTopkRouter):
init.normal_(module.classifier.weight, mean=0.0, std=self.config.initializer_range)
if isinstance(module, LongcatFlashExperts):
if module.gate_up_proj is not None:
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
| LongcatFlashPreTrainedModel |
python | kamyu104__LeetCode-Solutions | Python/booking-concert-tickets-in-groups.py | {
"start": 1508,
"end": 2949
} | class ____(object):
def __init__(self, n, m):
"""
:type n: int
:type m: int
"""
self.__st = SegmentTree(n,
build_fn=lambda _: [m]*2,
query_fn=lambda x, y: y if x is None else x if y is None else [max(x[0], y[0]), x[1]+y[1]])
self.__m = m
self.__i = 0
def gather(self, k, maxRow):
"""
:type k: int
:type maxRow: int
:rtype: List[int]
"""
i = 1
if k > self.__st.tree[i][0]:
return []
while i < self.__st.base:
i = 2*i+int(self.__st.tree[2*i][0] < k)
if i-self.__st.base > maxRow:
return []
cnt = self.__st.tree[i][0]
c = self.__m-cnt
i -= self.__st.base
self.__st.update(i, [cnt-k]*2)
return [i, c]
def scatter(self, k, maxRow):
"""
:type k: int
:type maxRow: int
:rtype: bool
"""
cnt = self.__st.query(self.__i, maxRow)
if not cnt or cnt[1] < k:
return False
for i in xrange(self.__i, maxRow+1):
cnt = self.__st.tree[self.__st.base+i][1]
c = min(cnt, k)
cnt -= c
if not cnt:
self.__i += 1
self.__st.update(i, [cnt]*2)
k -= c
if not k:
break
return True
| BookMyShow |
python | huggingface__transformers | src/transformers/models/sew_d/modeling_sew_d.py | {
"start": 62835,
"end": 67790
} | class ____(SEWDPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of SEWD adapters (config.add_adapter=True)"
)
self.sew_d = SEWDModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.sew_d.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.sew_d.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`SEWDProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.sew_d(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", "SEWDPreTrainedModel"]
| SEWDForSequenceClassification |
python | coleifer__peewee | tests/sql.py | {
"start": 82935,
"end": 83384
} | class ____(BaseTestCase):
def test_parentheses_functions(self):
expr = (User.c.income + 100)
expr2 = expr * expr
query = User.select(fn.sum(expr), fn.avg(expr2))
self.assertSQL(query, (
'SELECT sum("t1"."income" + ?), '
'avg(("t1"."income" + ?) * ("t1"."income" + ?)) '
'FROM "users" AS "t1"'), [100, 100, 100])
#Person = Table('person', ['id', 'name', 'dob'])
| TestExpressionSQL |
python | google__jax | tests/array_api_test.py | {
"start": 3581,
"end": 4187
} | class ____(absltest.TestCase):
"""Smoke test for the array API."""
def test_main_namespace(self):
self.assertContainsSubset(MAIN_NAMESPACE, names(ARRAY_API_NAMESPACE))
def test_linalg_namespace(self):
self.assertContainsSubset(LINALG_NAMESPACE, names(ARRAY_API_NAMESPACE.linalg))
def test_fft_namespace(self):
self.assertContainsSubset(FFT_NAMESPACE, names(ARRAY_API_NAMESPACE.fft))
def test_array_namespace_method(self):
x = ARRAY_API_NAMESPACE.arange(20)
self.assertIsInstance(x, jax.Array)
self.assertIs(x.__array_namespace__(), ARRAY_API_NAMESPACE)
| ArrayAPISmokeTest |
python | django__django | tests/backends/base/test_base.py | {
"start": 6200,
"end": 10775
} | class ____(TestCase):
@staticmethod
def call_execute(connection, params=None):
ret_val = "1" if params is None else "%s"
sql = "SELECT " + ret_val + connection.features.bare_select_suffix
with connection.cursor() as cursor:
cursor.execute(sql, params)
def call_executemany(self, connection, params=None):
# executemany() must use an update query. Make sure it does nothing
# by putting a false condition in the WHERE clause.
sql = "DELETE FROM {} WHERE 0=1 AND 0=%s".format(Square._meta.db_table)
if params is None:
params = [(i,) for i in range(3)]
with connection.cursor() as cursor:
cursor.executemany(sql, params)
@staticmethod
def mock_wrapper():
return MagicMock(side_effect=lambda execute, *args: execute(*args))
def test_wrapper_invoked(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
self.call_execute(connection)
self.assertTrue(wrapper.called)
(_, sql, params, many, context), _ = wrapper.call_args
self.assertIn("SELECT", sql)
self.assertIsNone(params)
self.assertIs(many, False)
self.assertEqual(context["connection"], connection)
def test_wrapper_invoked_many(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
self.call_executemany(connection)
self.assertTrue(wrapper.called)
(_, sql, param_list, many, context), _ = wrapper.call_args
self.assertIn("DELETE", sql)
self.assertIsInstance(param_list, (list, tuple))
self.assertIs(many, True)
self.assertEqual(context["connection"], connection)
def test_database_queried(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
with connection.cursor() as cursor:
sql = "SELECT 17" + connection.features.bare_select_suffix
cursor.execute(sql)
seventeen = cursor.fetchall()
self.assertEqual(list(seventeen), [(17,)])
self.call_executemany(connection)
def test_nested_wrapper_invoked(self):
outer_wrapper = self.mock_wrapper()
inner_wrapper = self.mock_wrapper()
with (
connection.execute_wrapper(outer_wrapper),
connection.execute_wrapper(inner_wrapper),
):
self.call_execute(connection)
self.assertEqual(inner_wrapper.call_count, 1)
self.call_executemany(connection)
self.assertEqual(inner_wrapper.call_count, 2)
def test_outer_wrapper_blocks(self):
def blocker(*args):
pass
wrapper = self.mock_wrapper()
c = connection # This alias shortens the next line.
with (
c.execute_wrapper(wrapper),
c.execute_wrapper(blocker),
c.execute_wrapper(wrapper),
):
with c.cursor() as cursor:
cursor.execute("The database never sees this")
self.assertEqual(wrapper.call_count, 1)
cursor.executemany("The database never sees this %s", [("either",)])
self.assertEqual(wrapper.call_count, 2)
def test_wrapper_gets_sql(self):
wrapper = self.mock_wrapper()
sql = "SELECT 'aloha'" + connection.features.bare_select_suffix
with connection.execute_wrapper(wrapper), connection.cursor() as cursor:
cursor.execute(sql)
(_, reported_sql, _, _, _), _ = wrapper.call_args
self.assertEqual(reported_sql, sql)
def test_wrapper_connection_specific(self):
wrapper = self.mock_wrapper()
with connections["other"].execute_wrapper(wrapper):
self.assertEqual(connections["other"].execute_wrappers, [wrapper])
self.call_execute(connection)
self.assertFalse(wrapper.called)
self.assertEqual(connection.execute_wrappers, [])
self.assertEqual(connections["other"].execute_wrappers, [])
def test_wrapper_debug(self):
def wrap_with_comment(execute, sql, params, many, context):
return execute(f"/* My comment */ {sql}", params, many, context)
with CaptureQueriesContext(connection) as ctx:
with connection.execute_wrapper(wrap_with_comment):
list(Person.objects.all())
last_query = ctx.captured_queries[-1]["sql"]
self.assertTrue(last_query.startswith("/* My comment */"))
| ExecuteWrapperTests |
python | protocolbuffers__protobuf | python/google/protobuf/internal/containers.py | {
"start": 19978,
"end": 20667
} | class ____:
"""A parsed unknown field."""
# Disallows assignment to other attributes.
__slots__ = ['_field_number', '_wire_type', '_data']
def __init__(self, field_number, wire_type, data):
self._field_number = field_number
self._wire_type = wire_type
self._data = data
return
def __lt__(self, other):
# pylint: disable=protected-access
return self._field_number < other._field_number
def __eq__(self, other):
if self is other:
return True
# pylint: disable=protected-access
return (self._field_number == other._field_number and
self._wire_type == other._wire_type and
self._data == other._data)
| _UnknownField |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 6244,
"end": 7124
} | class ____(ModelOutput):
r"""
embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`):
The embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The hidden state of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Pooled output of the `last_hidden_state`.
"""
embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
| ClvpEncoderOutput |
python | huggingface__transformers | src/transformers/models/bitnet/modular_bitnet.py | {
"start": 4054,
"end": 5781
} | class ____(LlamaForCausalLM):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = None
_pp_plan = None
def forward(
self,
**super_kwargs,
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, BitNetForCausalLM
>>> model = BitNetForCausalLM.from_pretrained("microsoft/bitnet-b1.58-2B-4T")
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/bitnet-b1.58-2B-4T")
>>> prompt = f'<|begin_of_text|>User: Hey, are you conscious? Can you talk to me?<|eot_id|>Assistant: '
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=100)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"User: Hey, are you conscious? Can you talk to me?Assistant: No, I'm not conscious. I'm an artificial intelligence designed to assist with information and tasks. How can I help you today?"
```"""
return super().forward(**super_kwargs)
__all__ = [
"BitNetForCausalLM",
"BitNetModel",
"BitNetPreTrainedModel", # noqa: F822
]
| BitNetForCausalLM |
python | huggingface__transformers | tests/models/dinat/test_modeling_dinat.py | {
"start": 7092,
"end": 11955
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DinatModel,
DinatForImageClassification,
DinatBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": DinatModel, "image-classification": DinatForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = DinatModelTester(self)
self.config_tester = ConfigTester(
self, config_class=DinatConfig, embed_dim=37, common_properties=["patch_size", "num_channels"]
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
@unittest.skip(reason="Dinat does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Dinat does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_attention_outputs(self):
self.skipTest(reason="Dinat's attention operation is handled entirely by NATTEN.")
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# Dinat has a different seq_length
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
height = image_size[0] // patch_size[0]
width = image_size[1] // patch_size[1]
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[height, width, self.model_tester.embed_dim],
)
if model_class.__name__ != "DinatBackbone":
reshaped_hidden_states = outputs.reshaped_hidden_states
self.assertEqual(len(reshaped_hidden_states), expected_num_layers)
batch_size, num_channels, height, width = reshaped_hidden_states[0].shape
reshaped_hidden_states = (
reshaped_hidden_states[0].view(batch_size, num_channels, height, width).permute(0, 2, 3, 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-3:]),
[height, width, self.model_tester.embed_dim],
)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
@slow
def test_model_from_pretrained(self):
model_name = "shi-labs/dinat-mini-in1k-224"
model = DinatModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_natten
@require_vision
@require_torch
| DinatModelTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_dataflow.py | {
"start": 65548,
"end": 69595
} | class ____:
@pytest.mark.parametrize(
"log",
[
pytest.param(APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG, id="apache-beam-2.14.0-JDK"),
pytest.param(APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG, id="apache-beam-2.22.0-JDK"),
pytest.param(APACHE_BEAM_V_2_58_1_JAVA_SDK_LOG, id="apache-beam-2.58.1-JDK"),
pytest.param(
CLOUD_COMPOSER_CLOUD_LOGGING_APACHE_BEAM_V_2_56_0_JAVA_SDK_LOG,
id="cloud-composer-cloud-logging-apache-beam-2.56.0-JDK",
),
pytest.param(APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG, id="apache-beam-2.14.0-Python"),
pytest.param(APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG, id="apache-beam-2.22.0-Python"),
],
)
def test_data_flow_valid_job_id(self, log):
echos = ";".join(f"echo {shlex.quote(line)}" for line in log.splitlines())
cmd = ["bash", "-c", echos]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
mock_log = MagicMock()
run_beam_command(
cmd=cmd,
process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback),
log=mock_log,
)
assert found_job_id == TEST_JOB_ID
def test_data_flow_missing_job_id(self):
cmd = ["echo", "unit testing"]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
log = MagicMock()
run_beam_command(
cmd=cmd,
process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback),
log=log,
)
assert found_job_id is None
@mock.patch("subprocess.Popen")
@mock.patch("select.select")
def test_dataflow_wait_for_done_logging(self, mock_select, mock_popen, caplog):
logger_name = "fake-dataflow-wait-for-done-logger"
fake_logger = logging.getLogger(logger_name)
fake_logger.setLevel(logging.INFO)
cmd = ["fake", "cmd"]
mock_proc = MagicMock(name="FakeProc")
fake_stderr_fd = MagicMock(name="FakeStderr")
fake_stdout_fd = MagicMock(name="FakeStdout")
mock_proc.stderr = fake_stderr_fd
mock_proc.stdout = fake_stdout_fd
fake_stderr_fd.readline.side_effect = [
b"dataflow-stderr-1",
b"dataflow-stderr-2",
StopIteration,
b"dataflow-stderr-3",
StopIteration,
b"dataflow-other-stderr",
]
fake_stdout_fd.readline.side_effect = [b"dataflow-stdout", StopIteration]
mock_select.side_effect = [
([fake_stderr_fd], None, None),
(None, None, None),
([fake_stderr_fd], None, None),
]
mock_proc.poll.side_effect = [None, True]
mock_proc.returncode = 1
mock_popen.return_value = mock_proc
caplog.clear()
caplog.set_level(logging.INFO)
with pytest.raises(AirflowException, match="Apache Beam process failed with return code 1"):
run_beam_command(cmd=cmd, log=fake_logger)
mock_popen.assert_called_once_with(
cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=None
)
info_messages = [rt[2] for rt in caplog.record_tuples if rt[0] == logger_name and rt[1] == 20]
assert "Running command: fake cmd" in info_messages
assert "dataflow-stdout" in info_messages
warn_messages = [rt[2] for rt in caplog.record_tuples if rt[0] == logger_name and rt[1] == 30]
assert "dataflow-stderr-1" in warn_messages
assert "dataflow-stderr-2" in warn_messages
assert "dataflow-stderr-3" in warn_messages
assert "dataflow-other-stderr" in warn_messages
@pytest.fixture
def make_mock_awaitable():
def func(mock_obj, return_value):
f = Future()
f.set_result(return_value)
mock_obj.return_value = f
return func
| TestDataflow |
python | Netflix__metaflow | metaflow/exception.py | {
"start": 249,
"end": 1275
} | class ____(Exception):
def __init__(self, exc=None):
if exc is not None:
self.exception = str(exc)
self.type = "%s.%s" % (exc.__class__.__module__, exc.__class__.__name__)
if sys.exc_info()[0] is None:
self.stacktrace = None
else:
self.stacktrace = traceback.format_exc()
# Base Exception defines its own __reduce__ and __setstate__
# which don't work nicely with derived exceptions. We override
# the magic methods related to pickle to get desired behavior.
def __reduce__(self):
return MetaflowExceptionWrapper, (None,), self.__dict__
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
def __repr__(self):
return str(self)
def __str__(self):
if self.stacktrace:
return self.stacktrace
else:
return "[no stacktrace]\n%s: %s" % (self.type, self.exception)
| MetaflowExceptionWrapper |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 21393,
"end": 25499
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization()
self.project = self.create_project()
self.environment = self.create_environment()
self.rule1: Rule = self.create_project_rule(
project=self.project,
condition_data=[TEST_RULE_SLOW_CONDITION],
environment_id=self.environment.id,
)
self.group1: Group = self.create_group(self.project)
self.group2: Group = self.create_group(self.project)
self.condition_group_results: dict[UniqueConditionQuery, dict[int, int | float]] = {
UniqueConditionQuery(
cls_id=TEST_RULE_SLOW_CONDITION["id"],
interval=TEST_RULE_SLOW_CONDITION["interval"],
environment_id=self.environment.id,
): {self.group1.id: 2, self.group2.id: 1}
}
self.rules_to_slow_conditions: DefaultDict[Rule, list[EventFrequencyConditionData]] = (
defaultdict(list)
)
self.rules_to_slow_conditions[self.rule1].append(TEST_RULE_SLOW_CONDITION)
self.rules_to_groups: DefaultDict[int, set[int]] = defaultdict(set)
self.rules_to_groups[self.rule1.id].add(self.group1.id)
self.rules_to_groups[self.rule1.id].add(self.group2.id)
# Mock passes_comparison function
self.patcher = patch("sentry.rules.processing.delayed_processing.passes_comparison")
self.mock_passes_comparison = self.patcher.start()
def tearDown(self) -> None:
self.patcher.stop()
def test_comparison(self) -> None:
self.mock_passes_comparison.return_value = True
result = get_rules_to_fire(
self.condition_group_results,
self.rules_to_slow_conditions,
self.rules_to_groups,
self.project.id,
)
assert result[self.rule1] == {self.group1.id, self.group2.id}
def test_comparison_fail_all(self) -> None:
self.mock_passes_comparison.return_value = False
result = get_rules_to_fire(
self.condition_group_results,
self.rules_to_slow_conditions,
self.rules_to_groups,
self.project.id,
)
assert self.rule1 not in result
def test_comparison_any(self) -> None:
self.rule1.data["action_match"] = "any"
self.mock_passes_comparison.return_value = True
result = get_rules_to_fire(
self.condition_group_results,
self.rules_to_slow_conditions,
self.rules_to_groups,
self.project.id,
)
assert result[self.rule1] == {self.group1.id, self.group2.id}
def test_comparison_any_fail(self) -> None:
self.rule1.data["action_match"] = "any"
self.mock_passes_comparison.return_value = False
result = get_rules_to_fire(
self.condition_group_results,
self.rules_to_slow_conditions,
self.rules_to_groups,
self.project.id,
)
assert self.rule1 not in result
def test_empty_input(self) -> None:
result = get_rules_to_fire({}, defaultdict(list), defaultdict(set), self.project.id)
assert len(result) == 0
@patch("sentry.rules.processing.delayed_processing.passes_comparison", return_value=True)
def test_multiple_rules_and_groups(self, mock_passes: MagicMock) -> None:
rule2 = self.create_project_rule(
project=self.project,
condition_data=[TEST_RULE_SLOW_CONDITION],
environment_id=self.environment.id,
)
self.rules_to_slow_conditions[rule2].append(TEST_RULE_SLOW_CONDITION)
self.rules_to_groups[rule2.id].add(self.group2.id)
result = get_rules_to_fire(
self.condition_group_results,
self.rules_to_slow_conditions,
self.rules_to_groups,
self.project.id,
)
assert len(result) == 2
assert result[self.rule1] == {self.group1.id, self.group2.id}
assert result[rule2] == {self.group2.id}
| GetRulesToFireTest |
python | pytorch__pytorch | test/quantization/core/test_quantized_module.py | {
"start": 79703,
"end": 89331
} | class ____(QuantizationTestCase):
def _quant_dequant_weight(self, weight, weight_qparams):
qscheme = weight_qparams["qscheme"]
scale = weight_qparams["scale"]
zero_point = weight_qparams["zero_point"]
dtype = weight_qparams["dtype"]
if qscheme == torch.per_tensor_affine:
weight = torch.quantize_per_tensor(weight, scale, zero_point, dtype)
else:
# per channel affine
axis = weight_qparams["axis"]
weight = torch.quantize_per_channel(weight, scale, zero_point, axis, dtype)
weight = weight.dequantize()
return weight
# TODO: add tests for conv and linear
def test_rnn_cell(self):
""" Checks the rnn cell reference quantized modules has correct numerics
This includes LSTMCell, GRUCell, RNNCell
"""
batch = 7
input_size = 3
hidden_size = 7
bias = True
x = torch.rand(batch, input_size)
h = torch.rand(batch, hidden_size)
cell_dict = {'LSTMCell': torch.nn.LSTMCell,
'GRUCell': torch.nn.GRUCell,
'RNNTanh': torch.nn.RNNCell,
'RNNReLU': torch.nn.RNNCell
}
state = {'LSTMCell': (h, h),
'GRUCell': h,
'RNNTanh': h,
'RNNReLU': h}
qfn_dict = {'LSTMCell': nnqr.LSTMCell,
'GRUCell': nnqr.GRUCell,
'RNNTanh': nnqr.RNNCell,
'RNNReLU': nnqr.RNNCell}
for rnn_type in cell_dict:
kwargs = {'input_size': input_size, 'hidden_size': hidden_size, 'bias': bias}
if rnn_type == 'RNNReLU':
kwargs['nonlinearity'] = "relu"
elif rnn_type == 'RNNTanh':
kwargs['nonlinearity'] = "tanh"
fp_cell = cell_dict[rnn_type](**kwargs)
# initialize ref rnn cell module
weight_qparams = {
'qscheme': torch.per_tensor_affine,
'dtype': torch.quint8,
'scale': 2.0,
'zero_point': 5
}
weight_qparams_dict = {
"weight_ih": weight_qparams,
"weight_hh": weight_qparams,
"is_decomposed": False,
}
ref_kwargs = kwargs.copy()
ref_kwargs["weight_qparams_dict"] = weight_qparams_dict
ref_cell = qfn_dict[rnn_type](**ref_kwargs)
# reassign the weights from fp32 rnn cell modulea
ref_cell.weight_ih = fp_cell.weight_ih
ref_cell.weight_hh = fp_cell.weight_hh
ref_cell.bias_ih = fp_cell.bias_ih
ref_cell.bias_hh = fp_cell.bias_hh
ref_res = ref_cell(x, state[rnn_type])
# change the weight of fp_res, we first want to run a quantie and
# dequantize on the weight
fp_cell.weight_ih = torch.nn.Parameter(self._quant_dequant_weight(fp_cell.weight_ih, weight_qparams_dict["weight_ih"]))
fp_cell.weight_hh = torch.nn.Parameter(self._quant_dequant_weight(fp_cell.weight_hh, weight_qparams_dict["weight_hh"]))
fp_res = fp_cell(x, state[rnn_type])
self.assertEqual(ref_res[0], fp_res[0], msg="RNNCell module API failed")
self.assertEqual(ref_res[1], fp_res[1], msg="RNNCell module API failed")
def test_rnn(self):
""" Checks the rnn reference quantized modules has correct numerics
This includes LSTM
"""
seq_len = 4
batch = 2
input_size = 3
hidden_size = 7
num_layers = 2
bias = True
for bidirectional in [True, False]:
x = torch.randn(seq_len, batch, input_size)
h = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
c = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
fp32_rnn = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional)
# initialize ref rnn module
weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.qint8,
"scale": 2.0,
"zero_point": 5
}
weight_qparams_dict = {key: weight_qparams for key in fp32_rnn._flat_weights_names if key.startswith("weight")}
weight_qparams_dict["is_decomposed"] = False
ref_rnn = nnqr.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional,
weight_qparams_dict=weight_qparams_dict)
for wn in fp32_rnn._flat_weights_names:
setattr(ref_rnn, wn, copy.deepcopy(getattr(fp32_rnn, wn)))
ref_rnn._flat_weights = copy.deepcopy(fp32_rnn._flat_weights)
# quantize and dequantize the weights for fp32_rnn module
flat_weights = []
for wn in fp32_rnn._flat_weights_names:
if wn.startswith("weight"):
weight = self._quant_dequant_weight(getattr(fp32_rnn, wn), weight_qparams)
else:
weight = getattr(fp32_rnn, wn)
flat_weights.append(weight)
fp32_rnn._flat_weights = flat_weights
fp32_res = fp32_rnn(x, (h, c))
ref_res = ref_rnn(x, (h, c))
self.assertEqual(fp32_res, ref_res)
def test_sparse(self):
""" Embedding and EmbeddingBag
"""
num_embeddings = 10
embedding_dim = 3
# embedding input
ex = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
# embedding bag input
ebx = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)
fp_to_ref = {
nn.Embedding: (nnqr.Embedding, (ex,)),
nn.EmbeddingBag: (nnqr.EmbeddingBag, (ebx, offsets)),
}
per_tensor_weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.quint8,
"scale": 2.0,
"zero_point": 5,
"is_decomposed": False,
}
per_channel_weight_qparams = {
"qscheme": torch.per_channel_affine,
"dtype": torch.quint8,
"scale": torch.randn(10),
"zero_point": torch.randint(0, 255, (10,)),
"axis": 0,
"is_decomposed": False,
}
per_channel_weight_qparams_quint4x2 = {
"qscheme": torch.per_channel_affine_float_qparams,
"dtype": torch.quint4x2,
"scale": torch.randn(10),
"zero_point": torch.randint(0, 255, (10,)),
"axis": 0,
"is_decomposed": False,
}
weight_qparams_options = [
per_tensor_weight_qparams,
per_channel_weight_qparams,
per_channel_weight_qparams_quint4x2,
]
for fp_cls, weight_qparams in itertools.product([nn.Embedding, nn.EmbeddingBag], weight_qparams_options):
# TODO: torch.quint4x2 not supported in quantize_per_channel, need to add support
if weight_qparams["dtype"] == torch.quint4x2:
continue
ref_cls, args = fp_to_ref[fp_cls]
fp32_embedding = fp_cls(num_embeddings, embedding_dim)
ref_embedding = ref_cls(num_embeddings, embedding_dim, weight_qparams=weight_qparams)
ref_embedding.weight = fp32_embedding.weight
# quantize and dequantize the weight for fp32 module
fp32_embedding.weight = torch.nn.Parameter(self._quant_dequant_weight(fp32_embedding.weight, weight_qparams))
fp32_res = fp32_embedding(*args)
ref_res = ref_embedding(*args)
self.assertEqual(fp32_res, ref_res)
def test_linear_decomposed_weight_custom_qmin_qmax(self):
"""Verify that reference Linear respects custom qmin/qmax for weight
"""
linear_fp32 = torch.nn.Linear(2, 2)
qconfig = torch.ao.quantization.default_symmetric_qnnpack_qconfig
w_obs = qconfig.weight()
self.assertTrue(w_obs.quant_min == -127)
self.assertTrue(w_obs.quant_max == 127)
w_obs(linear_fp32.weight)
weight_qparams = torch.ao.quantization.utils.get_qparam_dict(w_obs)
weight_qparams["is_decomposed"] = True
linear_ref = nnqr.Linear.from_float(linear_fp32, weight_qparams)
linear_ref_traced = torch.fx.symbolic_trace(linear_ref)
# verify that the qmin/qmax arguments for weight q/dq are correctly
# taken from the observer
found = 0
for n in linear_ref_traced.graph.nodes:
if n.op != 'call_function':
continue
if n.target in (
torch.ops.quantized_decomposed.quantize_per_tensor,
torch.ops.quantized_decomposed.dequantize_per_tensor,
):
_0, _1, _2, qmin, qmax, _5 = n.args
self.assertTrue(qmin == -127)
self.assertTrue(qmax == 127)
found += 1
self.assertTrue(found == 2)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestReferenceQuantizedModule |
python | doocs__leetcode | solution/3200-3299/3242.Design Neighbor Sum Service/Solution.py | {
"start": 0,
"end": 897
} | class ____:
def __init__(self, grid: List[List[int]]):
self.grid = grid
self.d = {}
self.dirs = ((-1, 0, 1, 0, -1), (-1, 1, 1, -1, -1))
for i, row in enumerate(grid):
for j, x in enumerate(row):
self.d[x] = (i, j)
def adjacentSum(self, value: int) -> int:
return self.cal(value, 0)
def cal(self, value: int, k: int):
i, j = self.d[value]
s = 0
for a, b in pairwise(self.dirs[k]):
x, y = i + a, j + b
if 0 <= x < len(self.grid) and 0 <= y < len(self.grid[0]):
s += self.grid[x][y]
return s
def diagonalSum(self, value: int) -> int:
return self.cal(value, 1)
# Your NeighborSum object will be instantiated and called as such:
# obj = NeighborSum(grid)
# param_1 = obj.adjacentSum(value)
# param_2 = obj.diagonalSum(value)
| NeighborSum |
python | pytorch__pytorch | test/dynamo/test_exceptions.py | {
"start": 574,
"end": 671
} | class ____(type):
def __instancecheck__(cls, instance):
return True
| CustomExceptionMeta |
python | langchain-ai__langchain | libs/partners/openai/langchain_openai/llms/base.py | {
"start": 26799,
"end": 30726
} | class ____(BaseOpenAI):
"""OpenAI completion model integration.
Setup:
Install `langchain-openai` and set environment variable `OPENAI_API_KEY`.
```bash
pip install -U langchain-openai
export OPENAI_API_KEY="your-api-key"
```
Key init args — completion params:
model:
Name of OpenAI model to use.
temperature:
Sampling temperature.
max_tokens:
Max number of tokens to generate.
logprobs:
Whether to return logprobs.
stream_options:
Configure streaming outputs, like whether to return token usage when
streaming (`{"include_usage": True}`).
Key init args — client params:
timeout:
Timeout for requests.
max_retries:
Max number of retries.
api_key:
OpenAI API key. If not passed in will be read from env var `OPENAI_API_KEY`.
base_url:
Base URL for API requests. Only specify if using a proxy or service
emulator.
organization:
OpenAI organization ID. If not passed in will be read from env
var `OPENAI_ORG_ID`.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from langchain_openai import OpenAI
model = OpenAI(
model="gpt-3.5-turbo-instruct",
temperature=0,
max_retries=2,
# api_key="...",
# base_url="...",
# organization="...",
# other params...
)
```
Invoke:
```python
input_text = "The meaning of life is "
model.invoke(input_text)
```
```txt
"a philosophical question that has been debated by thinkers and scholars for centuries."
```
Stream:
```python
for chunk in model.stream(input_text):
print(chunk, end="|")
```
```txt
a| philosophical| question| that| has| been| debated| by| thinkers| and| scholars| for| centuries|.
```
```python
"".join(model.stream(input_text))
```
```txt
"a philosophical question that has been debated by thinkers and scholars for centuries."
```
Async:
```python
await model.ainvoke(input_text)
# stream:
# async for chunk in (await model.astream(input_text)):
# print(chunk)
# batch:
# await model.abatch([input_text])
```
```txt
"a philosophical question that has been debated by thinkers and scholars for centuries."
```
""" # noqa: E501
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "llms", "openai"]`
"""
return ["langchain", "llms", "openai"]
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
@property
def _invocation_params(self) -> dict[str, Any]:
return {"model": self.model_name, **super()._invocation_params}
@property
def lc_secrets(self) -> dict[str, str]:
"""Mapping of secret keys to environment variables."""
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_attributes(self) -> dict[str, Any]:
"""LangChain attributes for this class."""
attributes: dict[str, Any] = {}
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
| OpenAI |
python | huggingface__transformers | tests/models/dit/test_modeling_dit.py | {
"start": 992,
"end": 2038
} | class ____(unittest.TestCase):
@slow
def test_for_image_classification(self):
image_processor = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(torch_device)
from datasets import load_dataset
dataset = load_dataset("nielsr/rvlcdip-demo")
image = dataset["train"][0]["image"].convert("RGB")
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
expected_shape = torch.Size((1, 16))
self.assertEqual(logits.shape, expected_shape)
expected_slice = torch.tensor(
[-0.4158, -0.4092, -0.4347],
device=torch_device,
dtype=torch.float,
)
torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
| DiTIntegrationTest |
python | agronholm__apscheduler | src/apscheduler/serializers/pickle.py | {
"start": 208,
"end": 1119
} | class ____(Serializer):
"""
Uses the :mod:`pickle` module to (de)serialize objects.
As this serialization method is native to Python, it is able to serialize a wide
range of types, at the expense of being insecure. Do **not** use this serializer
unless you can fully trust the entire system to not have maliciously injected data.
Such data can be made to call arbitrary functions with arbitrary arguments on
unpickling.
:param protocol: the pickle protocol number to use
"""
protocol: int = 4
def serialize(self, obj: object) -> bytes:
try:
return dumps(obj, self.protocol)
except Exception as exc:
raise SerializationError from exc
def deserialize(self, serialized: bytes):
try:
return loads(serialized)
except Exception as exc:
raise DeserializationError from exc
| PickleSerializer |
python | ansible__ansible | test/lib/ansible_test/_internal/host_profiles.py | {
"start": 3837,
"end": 5379
} | class ____:
"""Simple representation of an Ansible inventory."""
host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]]
extra_groups: t.Optional[dict[str, list[str]]] = None
@staticmethod
def create_single_host(name: str, variables: dict[str, t.Union[str, int]]) -> Inventory:
"""Return an inventory instance created from the given hostname and variables."""
return Inventory(host_groups=dict(all={name: variables}))
def write(self, args: CommonConfig, path: str) -> None:
"""Write the given inventory to the specified path on disk."""
inventory_data: dict[str, dict[str, dict[str, dict[str, object]]]] = dict()
for group, hosts in self.host_groups.items():
group_data = inventory_data.setdefault(group, dict())
hosts_data = group_data.setdefault('hosts', dict())
for host, variables in hosts.items():
host_entry = hosts_data.setdefault(host, dict())
host_entry.update(variables)
for group, children in (self.extra_groups or {}).items():
group_data = inventory_data.setdefault(group, dict())
group_children = group_data.setdefault('children', dict())
for child in children:
group_children[child] = dict()
inventory_text = json.dumps(inventory_data, indent=4)
if not args.explain:
write_text_file(path, inventory_text + '\n')
display.info(f'>>> Inventory\n{inventory_text}', verbosity=3)
| Inventory |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/unit_tests/test_limit_reducing_error_handler.py | {
"start": 928,
"end": 3868
} | class ____:
def test_orders_stream_500_error_handling(self, requests_mock):
# Mock the events endpoint to prevent NoMockAddress error
requests_mock.get(
"https://test-shop.myshopify.com/admin/api/2025-01/events.json?filter=Order&verb=destroy",
[{"status_code": 200, "json": {"events": []}}],
)
# Simulate initial URL with 500 errors, then success with pagination
requests_mock.get(
"https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=250&status=any",
[
{"status_code": 500}, # Initial request fails
{"status_code": 500}, # Retry with 250 fails again
{
"status_code": 200,
"json": ORDERS_PAGE_1,
"headers": {
"Link": '<https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=250&page_info=page1>; rel="next"'
},
},
],
)
# Response for reduced limit
requests_mock.get(
"https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=125&status=any",
[
{
"status_code": 200,
"json": ORDERS_PAGE_1,
"headers": {
"Link": '<https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=125&page_info=page1>; rel="next"'
},
}
],
)
# Paginated responses
requests_mock.get(
"https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=250&page_info=page1",
[
{
"status_code": 200,
"json": ORDERS_PAGE_2,
"headers": {
"Link": '<https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=250&page_info=page2>; rel="next"'
},
}
],
)
requests_mock.get(
"https://test-shop.myshopify.com/admin/api/2025-01/orders.json?limit=250&page_info=page2",
[{"status_code": 200, "json": ORDERS_PAGE_3, "headers": {}}], # No next page
)
# Configure the stream
config = {"shop": "test-shop", "authenticator": None}
stream = Orders(config)
# Read records
records = list(stream.read_records(sync_mode="full_refresh"))
# Assertions
assert len(records) == 375 # Total orders: 125 + 125 + 125
assert records[0]["id"] == 1
assert records[-1]["id"] == 375
# Assert that a request with the reduced limit was actually made
assert any(
"limit=125" in req.url for req in requests_mock.request_history
), "No request was made with the reduced limit (limit=125)"
| TestOrdersLimitReducingErrorHandler |
python | ray-project__ray | python/ray/serve/_private/proxy_request_response.py | {
"start": 497,
"end": 1126
} | class ____(ABC):
"""Base ProxyRequest class to use in the common interface among proxies"""
@property
@abstractmethod
def request_type(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def method(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def route_path(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def is_route_request(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def is_health_request(self) -> bool:
raise NotImplementedError
| ProxyRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.