language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | prabhupant__python-ds | data_structures/bst/linked_list_to_bst.py | {
"start": 137,
"end": 547
} | class ____():
def __init__(self, val, next):
self.val = val
self.next = None
def linked_list_to_bst(head):
if not head:
return None
curr = head
n = 0
while curr:
n += 1
curr = curr.next
return ll_to_bst_recur(head, n)
def ll_to_bst_recur(head, n):
if n <= 0:
return None
# TODO: Fix me!
# left = ll_to_bst_recur(
| LLNode |
python | sphinx-doc__sphinx | sphinx/deprecation.py | {
"start": 176,
"end": 2777
} | class ____(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInSphinx10Warning
# By default, all Sphinx deprecation warnings will be emitted.
# To avoid this, set the environment variable: PYTHONWARNINGS=
if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('default', category=RemovedInNextVersionWarning)
def _deprecation_warning(
module: str,
attribute: str,
canonical_name: str = '',
*,
remove: tuple[int, int],
raises: bool = False,
) -> None:
"""Helper function for module-level deprecations using ``__getattr__``.
:param module: The module containing a deprecated object.
:param attribute: The name of the deprecated object.
:param canonical_name: Optional fully-qualified name for its replacement.
:param remove: Target version for removal.
:param raises: Indicate whether to raise an exception instead of a warning.
When *raises* is ``True``, an :exc:`AttributeError` is raised instead
of emitting a warning so that it is easy to locate deprecated objects
in tests that could suppress deprecation warnings.
Usage::
# deprecated name -> (object to return, canonical path or empty string, removal version)
_DEPRECATED_OBJECTS = {
'deprecated_name': (
object_to_return,
'fully_qualified_replacement_name',
(10, 0),
),
}
def __getattr__(name: str) -> Any:
if name not in _DEPRECATED_OBJECTS:
msg = f'module {__name__!r} has no attribute {name!r}'
raise AttributeError(msg)
from sphinx.deprecation import _deprecation_warning
deprecated_object, canonical_name, remove = _DEPRECATED_OBJECTS[name]
_deprecation_warning(__name__, name, canonical_name, remove=remove)
return deprecated_object
"""
if remove == (10, 0):
warning_class: type[Warning] = RemovedInSphinx10Warning
elif remove == (11, 0):
warning_class = RemovedInSphinx11Warning
else:
msg = f'removal version {remove!r} is invalid!'
raise RuntimeError(msg)
qualname = f'{module}.{attribute}'
if canonical_name:
message = (
f'The alias {qualname!r} is deprecated, use {canonical_name!r} instead.'
)
else:
message = f'{qualname!r} is deprecated.'
if raises:
raise AttributeError(message)
message = f'{message} Check CHANGES for Sphinx API modifications.'
warnings.warn(message, warning_class, stacklevel=3)
| RemovedInSphinx11Warning |
python | PrefectHQ__prefect | src/prefect/settings/models/server/ui.py | {
"start": 220,
"end": 2069
} | class ____(PrefectBaseSettings):
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("server", "ui"))
enabled: bool = Field(
default=True,
description="Whether or not to serve the Prefect UI.",
validation_alias=AliasChoices(
AliasPath("enabled"),
"prefect_server_ui_enabled",
"prefect_ui_enabled",
),
)
api_url: Optional[str] = Field(
default=None,
description="The connection url for communication from the UI to the API. Defaults to `PREFECT_API_URL` if set. Otherwise, the default URL is generated from `PREFECT_SERVER_API_HOST` and `PREFECT_SERVER_API_PORT`.",
validation_alias=AliasChoices(
AliasPath("api_url"),
"prefect_server_ui_api_url",
"prefect_ui_api_url",
),
)
serve_base: str = Field(
default="/",
description="The base URL path to serve the Prefect UI from.",
validation_alias=AliasChoices(
AliasPath("serve_base"),
"prefect_server_ui_serve_base",
"prefect_ui_serve_base",
),
)
static_directory: Optional[str] = Field(
default=None,
description="The directory to serve static files from. This should be used when running into permissions issues when attempting to serve the UI from the default directory (for example when running in a Docker container).",
validation_alias=AliasChoices(
AliasPath("static_directory"),
"prefect_server_ui_static_directory",
"prefect_ui_static_directory",
),
)
show_promotional_content: bool = Field(
default=True,
description="Whether or not to display promotional content in the UI, including upgrade prompts and marketing banners.",
)
| ServerUISettings |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 67714,
"end": 70515
} | class ____(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuple
@staticmethod
def _color_as_tuple(c: Any) -> tuple[Any, Any, Any]:
return c[0], c[1], c[2]
def test_colors(self) -> None:
# with single color, we don't want rgb array
artist = self.plotmethod(colors="k")
assert artist.cmap.colors[0] == "k"
# 2 colors, will repeat every other tick:
artist = self.plotmethod(colors=["k", "b"])
assert artist.cmap.colors[:2] == ["k", "b"]
# 4 colors, will repeat every 4th tick:
artist = self.darray.plot.contour(
levels=[-0.5, 0.0, 0.5, 1.0], colors=["k", "r", "w", "b"]
)
assert artist.cmap.colors[:5] == ["k", "r", "w", "b"] # type: ignore[attr-defined,unused-ignore]
# the last color is now under "over"
assert self._color_as_tuple(artist.cmap.get_over()) == (0.0, 0.0, 1.0)
def test_colors_np_levels(self) -> None:
# https://github.com/pydata/xarray/issues/3284
levels = np.array([-0.5, 0.0, 0.5, 1.0])
artist = self.darray.plot.contour(levels=levels, colors=["k", "r", "w", "b"])
cmap = artist.cmap
assert isinstance(cmap, mpl.colors.ListedColormap)
assert artist.cmap.colors[:5] == ["k", "r", "w", "b"] # type: ignore[attr-defined,unused-ignore]
# the last color is now under "over"
assert self._color_as_tuple(cmap.get_over()) == (0.0, 0.0, 1.0)
def test_cmap_and_color_both(self) -> None:
with pytest.raises(ValueError):
self.plotmethod(colors="k", cmap="RdBu")
def list_of_colors_in_cmap_raises_error(self) -> None:
with pytest.raises(ValueError, match=r"list of colors"):
self.plotmethod(cmap=["k", "b"])
@pytest.mark.slow
def test_2d_coord_names(self) -> None:
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
def test_single_level(self) -> None:
# this used to raise an error, but not anymore since
# add_colorbar defaults to false
self.plotmethod(levels=[0.1])
self.plotmethod(levels=1)
def test_colormap_norm(self) -> None:
# Using a norm should plot a nice colorbar and look consistent with pcolormesh.
norm = mpl.colors.LogNorm(0.1, 1e1)
with pytest.warns(UserWarning):
artist = self.plotmethod(norm=norm, add_colorbar=True)
actual = artist.colorbar.locator()
expected = np.array([0.01, 0.1, 1.0, 10.0])
np.testing.assert_allclose(actual, expected)
| TestContour |
python | django__django | django/db/models/functions/math.py | {
"start": 2428,
"end": 2781
} | class ____(NumericOutputFieldMixin, Transform):
function = "DEGREES"
lookup_name = "degrees"
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
template="((%%(expressions)s) * 180 / %s)" % math.pi,
**extra_context,
)
| Degrees |
python | pytorch__pytorch | torch/utils/_sympy/numbers.py | {
"start": 6060,
"end": 11495
} | class ____(Number, metaclass=Singleton):
"""Negative integer infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
IntInfinity
"""
# Ensure we get dispatched to before plain numbers
_op_priority = 100.0
is_integer = True
is_extended_real = True
is_commutative = True
is_comparable = True
is_extended_negative = True
is_number = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _eval_subs(self, old, new):
if self == old:
return new
def _sympystr(self, printer) -> str:
return "-int_oo"
"""
def _eval_evalf(self, prec=None):
return Float('-inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
"""
@_sympifyit("other", NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity:
return S.Infinity
if other in (S.IntInfinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit("other", NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.NegativeInfinity:
return S.Infinity
if other in (S.NegativeIntInfinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit("other", NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit("other", NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.IntInfinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit("other", NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (
S.Infinity,
S.IntInfinity,
S.NegativeInfinity,
S.NegativeIntInfinity,
S.NaN,
):
return S.NaN
if other.is_extended_nonnegative:
return self
return S.Infinity # truediv returns float
return Number.__truediv__(self, other)
def __abs__(self):
return S.IntInfinity
def __neg__(self):
return S.IntInfinity
def _eval_power(self, expt):
if expt.is_number:
if expt in (
S.NaN,
S.Infinity,
S.NegativeInfinity,
S.IntInfinity,
S.NegativeIntInfinity,
):
return S.NaN
if isinstance(expt, sympy.Integer) and expt.is_extended_positive:
if expt.is_odd:
return S.NegativeIntInfinity
else:
return S.IntInfinity
inf_part = S.IntInfinity**expt
s_part = S.NegativeOne**expt
if inf_part == 0 and s_part.is_finite:
return inf_part
if (
inf_part is S.ComplexInfinity
and s_part.is_finite
and not s_part.is_zero
):
return S.ComplexInfinity
return s_part * inf_part
def _as_mpf_val(self, prec):
return mlib.fninf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.NegativeIntInfinity
def __ne__(self, other):
return other is not S.NegativeIntInfinity
def __gt__(self, other):
if other is S.NegativeInfinity:
return sympy.true # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.false
def __ge__(self, other):
if other is S.NegativeInfinity:
return sympy.true # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.false
def __lt__(self, other):
if other is S.NegativeInfinity:
return sympy.false # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.false # consistency with sympy.oo
else:
return sympy.true
def __le__(self, other):
if other is S.NegativeInfinity:
return sympy.false # -sympy.oo < -int_oo
elif other is S.NegativeIntInfinity:
return sympy.true # consistency with sympy.oo
else:
return sympy.true
@_sympifyit("other", NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
def as_powers_dict(self):
return {S.NegativeOne: 1, S.IntInfinity: 1}
| NegativeIntInfinity |
python | kamyu104__LeetCode-Solutions | Python/construct-binary-tree-from-string.py | {
"start": 153,
"end": 832
} | class ____(object):
def str2tree(self, s):
"""
:type s: str
:rtype: TreeNode
"""
def str2treeHelper(s, i):
start = i
if s[i] == '-': i += 1
while i < len(s) and s[i].isdigit(): i += 1
node = TreeNode(int(s[start:i]))
if i < len(s) and s[i] == '(':
i += 1
node.left, i = str2treeHelper(s, i)
i += 1
if i < len(s) and s[i] == '(':
i += 1
node.right, i = str2treeHelper(s, i)
i += 1
return node, i
return str2treeHelper(s, 0)[0] if s else None
| Solution |
python | optuna__optuna | optuna/_callbacks.py | {
"start": 247,
"end": 2023
} | class ____:
"""Set a maximum number of trials before ending the study.
While the ``n_trials`` argument of :meth:`optuna.study.Study.optimize` sets the number of
trials that will be run, you may want to continue running until you have a certain number of
successfully completed trials or stop the study when you have a certain number of trials that
fail. This ``MaxTrialsCallback`` class allows you to set a maximum number of trials for a
particular :class:`~optuna.trial.TrialState` before stopping the study.
Example:
.. testcode::
import optuna
from optuna.study import MaxTrialsCallback
from optuna.trial import TrialState
def objective(trial):
x = trial.suggest_float("x", -1, 1)
return x**2
study = optuna.create_study()
study.optimize(
objective,
callbacks=[MaxTrialsCallback(10, states=(TrialState.COMPLETE,))],
)
Args:
n_trials:
The max number of trials. Must be set to an integer.
states:
Tuple of the :class:`~optuna.trial.TrialState` to be counted
towards the max trials limit. Default value is ``(TrialState.COMPLETE,)``.
If :obj:`None`, count all states.
"""
def __init__(
self, n_trials: int, states: Container[TrialState] | None = (TrialState.COMPLETE,)
) -> None:
self._n_trials = n_trials
self._states = states
def __call__(self, study: Study, trial: FrozenTrial) -> None:
trials = study.get_trials(deepcopy=False, states=self._states)
n_complete = len(trials)
if n_complete >= self._n_trials:
study.stop()
| MaxTrialsCallback |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 1883,
"end": 1998
} | class ____(_BackupConfigBase):
"""Options to configure the backup when restoring a backup."""
| BackupConfigRestore |
python | pytorch__pytorch | torch/_higher_order_ops/cond.py | {
"start": 10471,
"end": 27676
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(
ctx,
pred,
true_fn,
false_fn,
*operands,
):
ctx._pred = pred
ctx._true_bw_fn = create_bw_fn(
true_fn,
operands,
)
ctx._false_bw_fn = create_bw_fn(
false_fn,
operands,
)
# We snapshot the dispatch keys in forward for materializing the
# the bw_graph in backward.
ctx._fw_include_key_set = torch._C._dispatch_tls_local_include_set()
ctx._fw_exclude_key_set = torch._C._dispatch_tls_local_exclude_set()
save_tensors_and_symints_for_backward(ctx, operands)
with torch._C._AutoDispatchBelowAutograd():
return cond_op(pred, true_fn, false_fn, operands)
@staticmethod
def backward(ctx, *flat_grads):
operands = saved_tensors_and_symints(ctx)
args = operands + flat_grads
# TODO: we need to materialize the bw graphs because dynamo is unable to
# trace through the joint function when torch.compile torch.autograd.grad.
grads_tensor_masks = []
def create_fn_remove_none(fn):
@functools.wraps(fn)
def wrapped(*args):
nonlocal grads_tensor_masks
true_outputs = fn(*args)
grads_tensor_masks = [
bool(isinstance(out, torch.Tensor)) for out in true_outputs
]
return filter_with_masks(true_outputs, grads_tensor_masks)
return wrapped
true_bw_gm = materialize_as_graph(
create_fn_remove_none(ctx._true_bw_fn),
args,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
force_enable_grad=True,
)
false_bw_gm = materialize_as_graph(
create_fn_remove_none(ctx._false_bw_fn),
args,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
force_enable_grad=True,
)
grads = cond_op(
ctx._pred,
true_bw_gm,
false_bw_gm,
args,
)
return None, None, None, *fill_none_with_masks(grads, grads_tensor_masks)
# Note:
# As long as one of the tensors in pred or operands requires grad,
# all the output would require grad with backward fn set to be the CondAutogradOp.
# This is consistent with autograd.Function's semantic.
@cond_op.py_autograd_impl
def cond_autograd(pred, true_fn, false_fn, operands):
return CondAutogradOp.apply(
pred,
true_fn,
false_fn,
*operands,
)
@cond_op.py_impl(ProxyTorchDispatchMode)
def inner(mode, pred, true_fn, false_fn, operands):
return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands)
@cond_op.py_impl(FakeTensorMode)
def cond_fake_tensor_mode(mode, pred, true_fn, false_fn, operands):
# Ignore here, because if you've gotten here but you're not manually
# tracing the inner graphs, that means that you intend to reuse the graph
# directly. Which means the old unbacked symbol bindings are appropriate.
# This strategy will not work if unbacked symbols can escape.
ignore_fresh_unbacked = contextlib.nullcontext()
if mode.shape_env:
ignore_fresh_unbacked = mode.shape_env.ignore_fresh_unbacked_symbols()
with mode, ignore_fresh_unbacked:
flat_true_outs, true_out_spec = pytree.tree_flatten(true_fn(*operands))
flat_false_outs, false_out_spec = pytree.tree_flatten(false_fn(*operands))
if true_out_spec != false_out_spec:
raise RuntimeError(
"Unmatched output spec from torch.cond branches: "
f"true branch tree_spec {true_out_spec} vs false branch tree_spec {false_out_spec}."
)
merged_outs = []
for true_out, false_out in zip(flat_true_outs, flat_false_outs):
merged_outs.append(_merge_output(true_out, false_out, mode))
return pytree.tree_unflatten(merged_outs, true_out_spec)
def check_tensor_meta_match(
t1: torch.Tensor, t2: torch.Tensor, attr_names: tuple[str, ...], msg_prefix: str
) -> None:
def _get_attr_maybe_call(t: torch.Tensor, attr_name: str) -> Any:
attr = getattr(t, attr_name)
if callable(attr):
return attr()
return attr
for attr_name in attr_names:
lattr = _get_attr_maybe_call(t1, attr_name)
rattr = _get_attr_maybe_call(t2, attr_name)
torch._check(
lattr == rattr,
lambda: f"{msg_prefix} expected same {attr_name} but got {lattr} and {rattr}.",
)
def _merge_output(
a: Optional[Union[torch.Tensor, int]],
b: Optional[Union[torch.Tensor, int]],
mode: FakeTensorMode,
):
from torch.fx.experimental.symbolic_shapes import (
has_free_unbacked_symbols,
SymIntEqByExpr,
)
if a is None or b is None:
assert a is None and b is None, (a, b)
return None
def min_max(s0, s1):
def _bound(s0, lower_bound: bool):
if isinstance(s0, int):
return s0
r = mode.shape_env.var_to_range.get( # type: ignore[union-attr]
s0.node.expr,
torch.utils._sympy.value_ranges.ValueRanges.unknown(),
)
return r.lower if lower_bound else r.upper
return min(_bound(s0, True), _bound(s1, True)), max(
_bound(s0, False), _bound(s1, False)
)
if type(a) is int and type(b) is int:
if a == b:
return a
assert mode.shape_env is not None
merged_out = mode.shape_env.create_unbacked_symint()
mode.shape_env.constrain_symbol_range(merged_out.node.expr, *min_max(a, b))
return merged_out
assert type(a) is FakeTensor and type(b) is FakeTensor, (a, type(a), b, type(b))
# Note: we don't check size, stride because
# they'll be merged with unbacked symints if they differ.
_meta_to_check = {
"dtype",
"device",
"layout",
"dim",
"is_quantized",
"is_conj",
"is_sparse",
"storage_offset",
}
check_tensor_meta_match(
a,
b,
tuple(_meta_to_check),
msg_prefix="When merging two branches' output in torch.cond, ",
)
# NYI
assert not a.is_quantized and not b.is_quantized
assert not a.is_sparse and not b.is_sparse
assert not a.is_conj() and not b.is_conj()
"""
Step 1: create unbacked symints for sizes that are different
along the same axis. For example:
a.size is [s0, 4, s0, 5, 4, 5]
b.size is [s1, 4, s2, 8, 4, 7]
merged_size will be [u0, 4, u1, u2, 4, u3], where
u0 has range [min(s0, s1), max(s0, s1)]
u1 has range [min(s0, s2), max(s0, s2)]
u2 has range [5, 8]
u3 has range [5, 7]
"""
merged_size: list[Union[int, torch.SymInt]] = []
def _has_unbacked_symbols(s: Union[int, torch.SymInt]) -> bool:
if isinstance(s, int):
return False
else:
return has_free_unbacked_symbols(s.node.expr)
for s0, s1 in zip(a.size(), b.size()):
# If there are unbacked symbols leaked out of true_branch or false_branch
# we need to merge them with a new unbacked symbol and track in parent graph.
if (
not _has_unbacked_symbols(s0)
and not _has_unbacked_symbols(s1)
and SymIntEqByExpr(s0) == SymIntEqByExpr(s1)
):
merged_size.append(s0)
else:
assert mode.shape_env is not None
new_size = mode.shape_env.create_unbacked_symint()
mode.shape_env.constrain_symbol_range(new_size.node.expr, *min_max(s0, s1))
merged_size.append(new_size)
"""
This follows the logic in symbolic_shapes._compute_symbolic_stride
Step 2: Since tensor stride is an accumulative multiplication of the sizes, which is a permutated
(due to view ops) non-descending sequence.
Case 1: No size is 1. In this case, strides have unique values.
For example, suppose we have a tensor with:
size [3, 4, 3, 5, 4, 5],
stride (1200, 300, 1, 12, 3, 60),
merged_size [u0, u1, u2, u3, u4, u5].
We visit the strides in ascending order: 1, 3, 12, 60, 300, 1200. In each step, we check whether
the current stride is bounded or not and bound next stride by setting.
stride_expr[next_stride] = current_stride_expr * current_size_expr
1st round:
current_stride is 1, current_size is 3, so next_stride is 1 * 3 = 3,
current_stride_expr is set to 1, current_size_expr is u2, so stride_expr[3] is therefore 1 * u2 = u2
2nd round:
current_stride is 3, current_size is 4, so next_stride is 3 * 4 = 12,
current_stride_expr is stride_expr[3] i.e. u2, current_size_expr is u4, so stride_expr[12] = u2 * u4
...
Case 2: At least one dimension has size 1, which can produce duplicates in strides.
In this case, theoretically, we cannot uniquely determine the expr of strides because
the accessing stride_expr with same key in different order causes the final stride expression
to be different.
Suppose we have:
size: (3, 1)
stride: (1, 1)
merged_size: (u0, u1)
The stride expr could either be (u1, 1) or (1, u0) depending on whether we start with u1 or u0.
For this reason, we try to break tie by sorting via descending index so we always get (u1, 1).
Note that backend might optimize the strides anyway so this is usually not a problem as long
as two branches matches. See relevant discussions in https://github.com/pytorch/pytorch/issues/142024.
Case 3: Dim has 0 stride. 0 stride doesn't participate in the accumulative multiplication of
sizes. So they're always treated as constant even if their corresponding size is turned into unbacked symint.
Suppose we have:
size: (3, 3)
stride: (0, 1)
merged_size: (u0, u1)
The merged stride would be (0, 1)
"""
def _bound_stride(
a_ex_size: torch.Size,
b_ex_size: torch.Size,
a_ex_stride: tuple[int, ...],
b_ex_stride: tuple[int, ...],
merged_size: list[Union[int, torch.SymInt]],
) -> list[Union[int, torch.SymInt]]:
from torch._inductor.ir import get_stride_order
a_sorted_stride_idx = get_stride_order(a_ex_stride, mode.shape_env)
b_sorted_stride_idx = get_stride_order(b_ex_stride, mode.shape_env)
a_stride_li: list[Optional[tuple[Union[int, torch.SymInt], int]]] = [
None
] * len(a_ex_stride)
b_stride_li: list[Optional[tuple[Union[int, torch.SymInt], int]]] = [
None
] * len(b_ex_stride)
for i, idx in enumerate(a_sorted_stride_idx):
a_stride_li[idx] = (a_ex_stride[i], -i)
for i, idx in enumerate(b_sorted_stride_idx):
b_stride_li[idx] = (b_ex_stride[i], -i)
for a_pair, b_pair in zip(a_stride_li, b_stride_li):
assert a_pair is not None and b_pair is not None
_, a_idx = a_pair
_, b_idx = b_pair
if a_idx != b_idx:
raise RuntimeError(
f"The sorted order of strides of the two branches' output doesn't match."
f"this indicates the contiguousness of the two branches are different. "
f"True branch has stride {a_ex_stride} but false branch has stride {b_ex_stride}."
f"Consider using contiguous() to make the two branches have the same contiguousness."
)
def _maybe_expr(s: Union[int, torch.SymInt]):
if isinstance(s, int):
return s
return s.node.expr
a_stride_expr: dict[Any, Union[int, torch.SymInt]] = {}
b_stride_expr: dict[Any, Union[int, torch.SymInt]] = {}
merged_strides: list[Union[int, torch.SymInt]] = [None] * len(a_ex_stride) # type: ignore[list-item]
for a_pair, b_pair in zip(a_stride_li, b_stride_li):
assert a_pair is not None and b_pair is not None
a_val, neg_i = a_pair
b_val, _ = b_pair
i = -neg_i
if a_val == 0:
assert b_val == 0, (a_val, b_val)
merged_strides[i] = 0
continue
if _maybe_expr(a_val) in a_stride_expr:
a_expr = a_stride_expr[_maybe_expr(a_val)]
assert b_stride_expr[_maybe_expr(b_val)] == a_expr, (
f"a_stride_expr:{a_stride_expr}, b_stride_expr:{b_stride_expr}"
)
merged_strides[i] = a_expr
else:
if a_val == 1:
assert b_val == 1
a_stride_expr[_maybe_expr(a_val)] = 1
b_stride_expr[_maybe_expr(b_val)] = 1
merged_strides[i] = 1
else:
# If we cannot find the expr of a_val in a_stride_expr, it means
# the strides is not a simple accumulative multiplication of sizes.
# In this case, we cannot determine the expr of strides from the new
# shapes so we error out and hint users to call contiguous().
raise RuntimeError(
f"It seems one of cond's output stride is not a simple accumulative multiplication of sizes. "
f"This could be because cond returns a slice of a tensor, which is not dense in memory. "
f"True branch has size {a_ex_size}, stride {a_ex_stride} and false branch has size {b_ex_size} "
f"stride {b_ex_stride}. Hint: can call t.contiguous(). "
)
nxt_merged_stride_expr = merged_strides[i] * merged_size[i]
a_stride_expr[_maybe_expr(a_val * a_ex_size[i])] = nxt_merged_stride_expr
b_stride_expr[_maybe_expr(b_val * b_ex_size[i])] = nxt_merged_stride_expr
return merged_strides
merged_stride: list[Union[int, torch.SymInt]] = _bound_stride(
a.size(), b.size(), a.stride(), b.stride(), merged_size
)
with mode:
return torch.empty_strided(
merged_size, merged_stride, dtype=a.dtype, device=a.device
)
@cond_op.py_functionalize_impl
def cond_func(ctx, pred, true_fn, false_fn, inputs):
from torch._higher_order_ops.utils import _check_alias_and_mutation
unwrapped_inputs = ctx.unwrap_tensors(inputs)
unwrapped_pred = ctx.unwrap_tensors(pred)
with ctx.redispatch_to_next():
functional_true = ctx.functionalize(_maybe_run_with_interpreter(true_fn))
functional_false = ctx.functionalize(_maybe_run_with_interpreter(false_fn))
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
for branch, branch_name in [(true_fn, "cond_true"), (false_fn, "cond_false")]:
_check_alias_and_mutation(
branch, unwrapped_inputs, branch_name, pre_dispatch
)
cond_return = cond_op(
unwrapped_pred, functional_true, functional_false, unwrapped_inputs
)
return ctx.wrap_tensors(cond_return)
@cond_op.py_impl(torch._C._functorch.TransformType.Vmap)
def cond_batch_rule(interpreter, pred, true_fn, false_fn, inputs):
assert isinstance(inputs, (list, tuple)), (
"Cond inputs must be a list or tuple of tensors"
)
assert all(isinstance(i, torch.Tensor) for i in inputs), (
"Cond inputs must be a list of tensors"
)
pred_is_batched = isinstance(pred, torch.Tensor) and is_batchedtensor(pred)
pred_ = get_unwrapped(pred) if pred_is_batched else pred
# unbatched tensors are not vmapped
tensors, in_dims = zip(
*[
(get_unwrapped(t), maybe_get_bdim(t)) if is_batchedtensor(t) else (t, None)
for t in inputs
]
)
if pred_is_batched:
# prepend "pred" and vmap everything
tensors = (pred_,) + tensors
in_dims = (0,) + in_dims
def fn(p, *args):
t = true_fn(*args)
f = false_fn(*args)
return torch.where(p, t[0], f[0])
with interpreter.lower():
result = torch.vmap(fn, in_dims=in_dims)(*tensors)
else:
# predicate is known at this stage and it is a boolean expression or a
# tensor with one element.
true_fn = torch.vmap(true_fn, in_dims=in_dims)
false_fn = torch.vmap(false_fn, in_dims=in_dims)
with interpreter.lower():
result = cond_op(pred, true_fn, false_fn, tensors)
if not isinstance(result, tuple):
result = (result,)
lvl = interpreter.level()
return tuple(_add_batch_dim(r, 0, lvl) for r in result)
| CondAutogradOp |
python | walkccc__LeetCode | solutions/1064. Fixed Point/1064.py | {
"start": 0,
"end": 388
} | class ____:
def fixedPoint(self, arr: list[int]) -> int:
l = 0
r = len(arr) - 1
# Since arr[i] is strictly increasing, arr[i] - i will also be increasing.
# Therefore, binary search `arr` for the first arr[i] - i = 0.
while l < r:
m = (l + r) // 2
if arr[m] - m >= 0:
r = m
else:
l = m + 1
return l if arr[l] == l else -1
| Solution |
python | doocs__leetcode | solution/1500-1599/1577.Number of Ways Where Square of Number Is Equal to Product of Two Numbers/Solution2.py | {
"start": 0,
"end": 535
} | class ____:
def numTriplets(self, nums1: List[int], nums2: List[int]) -> int:
def cal(nums: List[int], cnt: Counter) -> int:
ans = 0
for x in nums:
for y, v1 in cnt.items():
z = x * x // y
if y * z == x * x:
v2 = cnt[z]
ans += v1 * (v2 - int(y == z))
return ans // 2
cnt1 = Counter(nums1)
cnt2 = Counter(nums2)
return cal(nums1, cnt2) + cal(nums2, cnt1)
| Solution |
python | pypa__warehouse | warehouse/oidc/forms/activestate.py | {
"start": 751,
"end": 3496
} | class ____(TypedDict):
data: dict[str, Any]
errors: list[dict[str, Any]]
def _no_double_dashes(_form: wtforms.Form, field: wtforms.Field) -> None:
if _DOUBLE_DASHES.search(field.data):
raise wtforms.validators.ValidationError(
_("Double dashes are not allowed in the name")
)
def _no_leading_or_trailing_dashes(_form: wtforms.Form, field: wtforms.Field) -> None:
if field.data.startswith("-") or field.data.endswith("-"):
raise wtforms.validators.ValidationError(
_("Leading or trailing dashes are not allowed in the name")
)
def _activestate_gql_api_call(
query: str,
variables: dict[str, str],
response_handler: Callable[[GqlResponse], Any],
) -> Any:
try:
response = requests.post(
_ACTIVESTATE_GRAPHQL_API_URL,
json={
"query": query,
"variables": variables,
},
timeout=5,
)
if response.status_code == 404:
sentry_sdk.capture_message(
f"Unexpected {response.status_code} error "
f"from ActiveState API: {response.content!r}"
)
raise wtforms.validators.ValidationError(
_("Unexpected error from ActiveState. Try again in a few minutes")
)
elif response.status_code >= 400:
sentry_sdk.capture_message(
f"Unexpected {response.status_code} error "
f"from ActiveState API: {response.content!r}"
)
raise wtforms.validators.ValidationError(
_("Unexpected error from ActiveState. Try again")
)
except (requests.Timeout, requests.ConnectionError):
sentry_sdk.capture_message("Connection error from ActiveState API")
raise wtforms.validators.ValidationError(
_("Unexpected error from ActiveState. Try again in a few minutes")
)
# Graphql reports it's errors within the body of the 200 response
try:
response_json = response.json()
errors = response_json.get("errors")
if errors:
sentry_sdk.capture_message(
f"Unexpected error from ActiveState API: {errors}"
)
raise wtforms.validators.ValidationError(
_("Unexpected error from ActiveState. Try again")
)
return response_handler(response_json)
except requests.exceptions.JSONDecodeError:
sentry_sdk.capture_message(
f"Unexpected error from ActiveState API: {response.content!r}"
)
raise wtforms.validators.ValidationError(
_("Unexpected error from ActiveState. Try again")
)
| GqlResponse |
python | PyCQA__bandit | tests/unit/core/test_config.py | {
"start": 245,
"end": 727
} | class ____(fixtures.Fixture):
def __init__(self, contents=None, suffix=".yaml"):
super().__init__()
self.contents = contents
self.suffix = suffix
def setUp(self):
super().setUp()
with tempfile.NamedTemporaryFile(
suffix=self.suffix, mode="wt", delete=False
) as f:
if self.contents:
f.write(self.contents)
self.addCleanup(os.unlink, f.name)
self.name = f.name
| TempFile |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 103574,
"end": 108052
} | class ____:
def test_simple(self):
y = np.bincount(np.arange(4))
assert_array_equal(y, np.ones(4))
def test_simple2(self):
y = np.bincount(np.array([1, 5, 2, 4, 1]))
assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
def test_simple_weight(self):
x = np.arange(4)
w = np.array([0.2, 0.3, 0.5, 0.1])
y = np.bincount(x, w)
assert_array_equal(y, w)
def test_simple_weight2(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
def test_with_minlength(self):
x = np.array([0, 1, 0, 1, 1])
y = np.bincount(x, minlength=3)
assert_array_equal(y, np.array([2, 3, 0]))
x = []
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([]))
def test_with_minlength_smaller_than_maxvalue(self):
x = np.array([0, 1, 1, 2, 2, 3, 3])
y = np.bincount(x, minlength=2)
assert_array_equal(y, np.array([1, 2, 2, 2]))
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([1, 2, 2, 2]))
def test_with_minlength_and_weights(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w, 8)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
def test_empty(self):
x = np.array([], dtype=int)
y = np.bincount(x)
assert_array_equal(x, y)
def test_empty_with_minlength(self):
x = np.array([], dtype=int)
y = np.bincount(x, minlength=5)
assert_array_equal(y, np.zeros(5, dtype=int))
@pytest.mark.parametrize('minlength', [0, 3])
def test_empty_list(self, minlength):
assert_array_equal(np.bincount([], minlength=minlength),
np.zeros(minlength, dtype=int))
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
x = np.arange(5)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
double_refcount = sys.getrefcount(np.dtype(np.double))
for j in range(10):
np.bincount([1, 2, 3])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
for j in range(10):
np.bincount([1, 2, 3], [4, 5, 6])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
@pytest.mark.parametrize("vals", [[[2, 2]], 2])
def test_error_not_1d(self, vals):
# Test that values has to be 1-D (both as array and nested list)
vals_arr = np.asarray(vals)
with assert_raises(ValueError):
np.bincount(vals_arr)
with assert_raises(ValueError):
np.bincount(vals)
@pytest.mark.parametrize("dt", np.typecodes["AllInteger"])
def test_gh_28354(self, dt):
a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt)
actual = np.bincount(a)
expected = [1, 3, 1, 1, 0, 0, 0, 1]
assert_array_equal(actual, expected)
def test_contiguous_handling(self):
# check for absence of hard crash
np.bincount(np.arange(10000)[::2])
def test_gh_28354_array_like(self):
class A:
def __array__(self):
return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64)
a = A()
actual = np.bincount(a)
expected = [1, 3, 1, 1, 0, 0, 0, 1]
assert_array_equal(actual, expected)
| TestBincount |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_numeric.py | {
"start": 40952,
"end": 44326
} | class ____:
# TODO: add more dtypes
@pytest.mark.parametrize("holder", [Index, RangeIndex, Series])
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
def test_ufunc_compat(self, holder, dtype):
box = Series if holder is Series else Index
if holder is RangeIndex:
if dtype != np.int64:
pytest.skip(f"dtype {dtype} not relevant for RangeIndex")
idx = RangeIndex(0, 5, name="foo")
else:
idx = holder(np.arange(5, dtype=dtype), name="foo")
result = np.sin(idx)
expected = box(np.sin(np.arange(5, dtype=dtype)), name="foo")
tm.assert_equal(result, expected)
# TODO: add more dtypes
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
def test_ufunc_coercions(self, index_or_series, dtype):
idx = index_or_series([1, 2, 3, 4, 5], dtype=dtype, name="x")
box = index_or_series
result = np.sqrt(idx)
assert result.dtype == "f8" and isinstance(result, box)
exp = Index(np.sqrt(np.array([1, 2, 3, 4, 5], dtype=np.float64)), name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = np.divide(idx, 2.0)
assert result.dtype == "f8" and isinstance(result, box)
exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = Index([3.0, 4.0, 5.0, 6.0, 7.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx - 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = Index([-1.0, 0.0, 1.0, 2.0, 3.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx * 1.0
assert result.dtype == "f8" and isinstance(result, box)
exp = Index([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx / 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# TODO: add more dtypes
@pytest.mark.parametrize("holder", [Index, Series])
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
def test_ufunc_multiple_return_values(self, holder, dtype):
obj = holder([1, 2, 3], dtype=dtype, name="x")
box = Series if holder is Series else Index
result = np.modf(obj)
assert isinstance(result, tuple)
exp1 = Index([0.0, 0.0, 0.0], dtype=np.float64, name="x")
exp2 = Index([1.0, 2.0, 3.0], dtype=np.float64, name="x")
tm.assert_equal(result[0], tm.box_expected(exp1, box))
tm.assert_equal(result[1], tm.box_expected(exp2, box))
def test_ufunc_at(self):
s = Series([0, 1, 2], index=[1, 2, 3], name="x")
np.add.at(s, [0, 2], 10)
expected = Series([10, 1, 12], index=[1, 2, 3], name="x")
tm.assert_series_equal(s, expected)
| TestUFuncCompat |
python | huggingface__transformers | src/transformers/models/ernie4_5/modeling_ernie4_5.py | {
"start": 1997,
"end": 5006
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Ernie4_5Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Ernie4_5Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# keeping it in full precision
return cos, sin
| Ernie4_5RotaryEmbedding |
python | neetcode-gh__leetcode | python/2101-detonate-the-maximum-bombs.py | {
"start": 0,
"end": 867
} | class ____:
def maximumDetonation(self, bombs: List[List[int]]) -> int:
n = len(bombs)
graph = [[] for _ in range(n)]
for i in range(n):
for j in range(n):
if i != j:
x1, y1, r1 = bombs[i]
x2, y2, _ = bombs[j]
dst = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
if dst <= r1:
graph[i].append(j)
def dfs(node, vis):
vis[node] = True
count = 1
for nbh in graph[node]:
if not vis[nbh]:
count += dfs(nbh, vis)
return count
detonated = 0
for i in range(n):
visited = [False] * n
detonated = max(detonated, dfs(i, visited))
return detonated | Solution |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/linkedin_oauth2/tests.py | {
"start": 459,
"end": 14125
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = LinkedInOAuth2Provider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""
{}
""",
),
MockedResponse(
HTTPStatus.OK,
"""
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
}
}
""",
),
]
def get_expected_to_str(self):
return "Raymond Penners"
def test_data_to_str(self):
data = {
"emailAddress": "john@doe.org",
"firstName": "John",
"id": "a1b2c3d4e",
"lastName": "Doe",
"pictureUrl": "https://media.licdn.com/mpr/foo",
"pictureUrls": {
"_total": 1,
"values": ["https://media.licdn.com/foo"],
},
"publicProfileUrl": "https://www.linkedin.com/in/johndoe",
}
acc = SocialAccount(extra_data=data, provider="linkedin_oauth2")
self.assertEqual(acc.get_provider_account().to_str(), "john@doe.org")
def test_get_avatar_url_no_picture_setting(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertIsNone(acc.get_avatar_url())
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"linkedin_oauth2": {
"PROFILE_FIELDS": [
"id",
"firstName",
"lastName",
"profilePicture(displayImage~:playableStreams)",
],
"PROFILEPICTURE": {
"display_size_w_h": (400, 400.0),
},
},
}
)
def test_get_avatar_url_with_setting(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertIsNone(acc.get_avatar_url())
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"linkedin_oauth2": {
"PROFILE_FIELDS": [
"id",
"firstName",
"lastName",
"profilePicture(displayImage~:playableStreams)",
],
"PROFILEPICTURE": {
"display_size_w_h": (100, 100.0),
},
},
}
)
def test_get_avatar_url_with_picture(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
},
"profilePicture": {
"displayImage~": {
"elements": [
{
"authorizationMethod": "PUBLIC",
"data": {
"com.linkedin.digitalmedia.mediaartifact.StillImage": {
"storageSize": {
"height": 100,
"width": 100
},
"storageAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"displaySize": {
"height": 100.0,
"width": 100.0,
"uom": "PX"
},
"rawCodecSpec": {
"name": "jpeg",
"type": "image"
},
"displayAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"mediaType": "image/jpeg"
}
},
"artifact": "urn:li:digitalmediaMediaArtifact:avatar",
"identifiers": [
{
"identifierExpiresInSeconds": 4,
"file": "urn:li:digitalmediaFile:this-is-the-link",
"index": 0,
"identifier": "this-is-the-link",
"mediaType": "image/jpeg",
"identifierType": "EXTERNAL_URL"
}
]
}
]
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertEqual("this-is-the-link", acc.get_avatar_url())
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"linkedin_oauth2": {
"PROFILE_FIELDS": [
"id",
"firstName",
"lastName",
"profilePicture(displayImage~:playableStreams)",
],
"PROFILEPICTURE": {
"display_size_w_h": (400, 400.0),
},
},
}
)
def test_get_avatar_url_size_mismatch(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
},
"profilePicture": {
"displayImage~": {
"elements": [
{
"authorizationMethod": "PUBLIC",
"data": {
"com.linkedin.digitalmedia.mediaartifact.StillImage": {
"storageSize": {
"height": 100,
"width": 100
},
"storageAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"displaySize": {
"height": 100.0,
"width": 100.0,
"uom": "PX"
},
"rawCodecSpec": {
"name": "jpeg",
"type": "image"
},
"displayAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"mediaType": "image/jpeg"
}
},
"artifact": "urn:li:digitalmediaMediaArtifact:avatar",
"identifiers": [
{
"identifierExpiresInSeconds": 4,
"file": "urn:li:digitalmediaFile:this-is-the-link",
"index": 0,
"identifier": "this-is-the-link",
"mediaType": "image/jpeg",
"identifierType": "EXTERNAL_URL"
}
]
}
]
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertIsNone(acc.get_avatar_url())
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"linkedin_oauth2": {
"PROFILE_FIELDS": [
"id",
"firstName",
"lastName",
"profilePicture(displayImage~:playableStreams)",
],
"PROFILEPICTURE": {
"display_size_w_h": (400, 400.0),
},
},
}
)
def test_get_avatar_url_auth_mismatch(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
},
"profilePicture": {
"displayImage~": {
"elements": [
{
"authorizationMethod": "PRIVATE",
"data": {
"com.linkedin.digitalmedia.mediaartifact.StillImage": {
"storageSize": {
"height": 100,
"width": 100
},
"storageAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"displaySize": {
"height": 100.0,
"width": 100.0,
"uom": "PX"
},
"rawCodecSpec": {
"name": "jpeg",
"type": "image"
},
"displayAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"mediaType": "image/jpeg"
}
},
"artifact": "urn:li:digitalmediaMediaArtifact:avatar",
"identifiers": [
{
"identifierExpiresInSeconds": 4,
"file": "urn:li:digitalmediaFile:this-is-the-link",
"index": 0,
"identifier": "this-is-the-link",
"mediaType": "image/jpeg",
"identifierType": "EXTERNAL_URL"
}
]
}
]
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertIsNone(acc.get_avatar_url())
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"linkedin_oauth2": {
"PROFILE_FIELDS": [
"id",
"firstName",
"lastName",
"profilePicture(displayImage~:playableStreams)",
],
"PROFILEPICTURE": {
"display_size_w_h": (100, 100),
},
},
}
)
def test_get_avatar_url_float_vs_int(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"id": "1234567",
"lastName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Penners"
}
},
"firstName": {
"preferredLocale": {
"language": "en",
"country": "US"
},
"localized": {
"en_US": "Raymond"
}
},
"profilePicture": {
"displayImage~": {
"elements": [
{
"authorizationMethod": "PUBLIC",
"data": {
"com.linkedin.digitalmedia.mediaartifact.StillImage": {
"storageSize": {
"height": 100,
"width": 100
},
"storageAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"displaySize": {
"height": 100.0,
"width": 100.0,
"uom": "PX"
},
"rawCodecSpec": {
"name": "jpeg",
"type": "image"
},
"displayAspectRatio": {
"heightAspect": 1.0,
"formatted": "1.00:1.00",
"widthAspect": 1.0
},
"mediaType": "image/jpeg"
}
},
"artifact": "urn:li:digitalmediaMediaArtifact:avatar",
"identifiers": [
{
"identifierExpiresInSeconds": 4,
"file": "urn:li:digitalmediaFile:this-is-the-link",
"index": 0,
"identifier": "this-is-the-link",
"mediaType": "image/jpeg",
"identifierType": "EXTERNAL_URL"
}
]
}
]
}
}
}
"""
acc = SocialAccount(
extra_data=loads(extra_data),
provider="linkedin_oauth2",
)
self.assertEqual("this-is-the-link", acc.get_avatar_url())
def test_id_missing(self):
extra_data = """
{
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:12345abcdefgh-12abcd"
},
"Id": "1234567"
}
"""
self.assertRaises(
ProviderException, self.provider.extract_uid, loads(extra_data)
)
| LinkedInOAuth2Tests |
python | getsentry__sentry | tests/sentry/integrations/github/test_client.py | {
"start": 19493,
"end": 29806
} | class ____(TestCase):
jwt = "my_cool_jwt"
access_token = "access_token"
def setUp(self) -> None:
self.integration = self.create_integration(
organization=self.organization,
provider="github",
name="github-test",
external_id="github:1",
metadata={"access_token": None, "expires_at": None, "permissions": None},
status=ObjectStatus.ACTIVE,
)
self.installation = self.integration.get_installation(organization_id=self.organization.id)
self.gh_client = self.installation.get_client()
self.installation_id = self.gh_client._get_installation_id()
self.expires_at = (datetime.today() + timedelta(weeks=2)).isoformat()[:19] + "Z"
responses.add(
method=responses.POST,
url=f"https://api.github.com/app/installations/{self.installation_id}/access_tokens",
json={
"token": self.access_token,
"expires_at": self.expires_at,
"permissions": {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
},
"repository_selection": "all",
},
match=[matchers.header_matcher({"Authorization": f"Bearer {self.jwt}"})],
status=200,
)
project = self.create_project(organization=self.organization)
self.repo = self.create_repo(
project=project,
name="Test-Organization/foo",
provider="integrations:github",
integration_id=self.integration.id,
url="https://github.com/Test-Organization/foo",
)
@responses.activate
@mock.patch("sentry.integrations.github.client.get_jwt", return_value=jwt)
def test__refresh_access_token(self, mock_jwt) -> None:
assert self.integration.metadata == {
"access_token": None,
"expires_at": None,
"permissions": None,
}
self.gh_client._refresh_access_token()
assert mock_jwt.called
self.integration.refresh_from_db()
assert self.integration.metadata == {
"access_token": self.access_token,
"expires_at": self.expires_at.rstrip("Z"),
"permissions": {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
},
}
@responses.activate
@mock.patch("sentry.integrations.github.client.get_jwt", return_value=jwt)
def test__get_token(self, mock_jwt) -> None:
access_token_request = Request(
url=f"{self.gh_client.base_url}/repos/test-repo/issues"
).prepare()
jwt_request = Request(
url=f"{self.gh_client.base_url}/app/installations/{self.installation_id}"
).prepare()
with mock.patch(
"sentry.integrations.github.client.GithubProxyClient._refresh_access_token",
wraps=self.gh_client._refresh_access_token,
) as mock_refresh_token:
# Regular API requests should use access tokens
token = self.gh_client._get_token(prepared_request=access_token_request)
self.integration.refresh_from_db()
assert mock_jwt.call_count == 1
assert mock_refresh_token.call_count == 1
assert token == self.access_token == self.integration.metadata["access_token"]
# If the access token isn't expired, don't refresh it with an API call
mock_refresh_token.reset_mock()
mock_jwt.reset_mock()
token = self.gh_client._get_token(prepared_request=access_token_request)
assert mock_refresh_token.call_count == 0
assert mock_jwt.call_count == 0
assert token == self.access_token == self.integration.metadata["access_token"]
# Meta, app-installation requests should use jwts
token = self.gh_client._get_token(prepared_request=jwt_request)
assert mock_jwt.call_count == 1
assert mock_refresh_token.call_count == 0
assert token == self.jwt
@responses.activate
@mock.patch("sentry.integrations.github.client.get_jwt", return_value=jwt)
def test_get_access_token(self, _) -> None:
self.gh_client.integration.metadata["access_token"] = "access_token_1"
self.gh_client.integration.metadata["expires_at"] = "3000-01-01T00:00:00Z"
self.gh_client.integration.metadata["permissions"] = {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
}
assert self.gh_client.get_access_token() == {
"access_token": "access_token_1",
"permissions": {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
},
}
@responses.activate
@mock.patch("sentry.integrations.github.client.GithubProxyClient._get_token", return_value=None)
def test_authorize_request_invalid(self, mock_get_invalid_token) -> None:
request = Request(url=f"{self.gh_client.base_url}/repos/test-repo/issues").prepare()
self.gh_client.integration = None
self.gh_client.authorize_request(prepared_request=request)
assert "Authorization" not in request.headers
self.gh_client.integration = self.integration
self.gh_client.authorize_request(prepared_request=request)
assert mock_get_invalid_token.called
assert "Authorization" not in request.headers
@responses.activate
@mock.patch("sentry.integrations.github.client.get_jwt", return_value=jwt)
def test_authorize_request_valid(self, mock_jwt) -> None:
access_token_request = Request(
url=f"{self.gh_client.base_url}/repos/test-repo/issues"
).prepare()
jwt_request = Request(
url=f"{self.gh_client.base_url}/app/installations/{self.installation_id}"
).prepare()
# First request should refresh the token and add headers
self.gh_client.authorize_request(prepared_request=access_token_request)
assert mock_jwt.called
assert access_token_request.headers["Accept"] == "application/vnd.github+json"
assert self.access_token in access_token_request.headers["Authorization"]
mock_jwt.reset_mock()
access_token_request.headers.clear()
# Following requests should just add headers
self.gh_client.authorize_request(prepared_request=access_token_request)
assert not mock_jwt.called
assert access_token_request.headers["Accept"] == "application/vnd.github+json"
assert self.access_token in access_token_request.headers["Authorization"]
# JWT-authorized requests should be identified by request path
self.gh_client.authorize_request(prepared_request=jwt_request)
assert mock_jwt.called
assert jwt_request.headers["Accept"] == "application/vnd.github+json"
assert jwt_request.headers["Authorization"] == f"Bearer {self.jwt}"
@responses.activate
@mock.patch(
"sentry.integrations.github.client.GithubProxyClient._get_token", return_value=access_token
)
def test_integration_proxy_is_active(self, mock_get_token) -> None:
class GithubProxyTestClient(GitHubApiClient):
_use_proxy_url_for_tests = True
def assert_proxy_request(self, request, is_proxy=True):
assert (PROXY_BASE_PATH in request.url) == is_proxy
assert (PROXY_OI_HEADER in request.headers) == is_proxy
assert (PROXY_SIGNATURE_HEADER in request.headers) == is_proxy
# The following GitHub headers don't appear in proxied requests
assert ("Authorization" in request.headers) != is_proxy
assert ("Accept" in request.headers) != is_proxy
if is_proxy:
assert request.headers[PROXY_OI_HEADER] is not None
expected_proxy_path = "repos/test-repo/issues/123"
control_proxy_responses = add_control_silo_proxy_response(
method=responses.GET,
path=expected_proxy_path,
json={"ok": True},
status=200,
)
github_responses = responses.add(
method=responses.GET,
url=re.compile(rf"\S+{expected_proxy_path}$"),
json={"ok": True},
status=200,
)
with override_settings(SILO_MODE=SiloMode.MONOLITH):
client = GithubProxyTestClient(integration=self.integration)
client.get_issue("test-repo", "123")
request = responses.calls[0].request
assert github_responses.call_count == 1
assert "/repos/test-repo/issues" in request.url
assert client.base_url in request.url
client.assert_proxy_request(request, is_proxy=False)
responses.calls.reset()
with override_settings(SILO_MODE=SiloMode.CONTROL):
client = GithubProxyTestClient(integration=self.integration)
client.get_issue("test-repo", "123")
request = responses.calls[0].request
assert github_responses.call_count == 2
assert "/repos/test-repo/issues" in request.url
assert client.base_url in request.url
client.assert_proxy_request(request, is_proxy=False)
responses.calls.reset()
assert control_proxy_responses.call_count == 0
with override_settings(SILO_MODE=SiloMode.REGION):
client = GithubProxyTestClient(integration=self.integration)
client.get_issue("test-repo", "123")
request = responses.calls[0].request
assert control_proxy_responses.call_count == 1
assert client.base_url not in request.url
client.assert_proxy_request(request, is_proxy=True)
| GithubProxyClientTest |
python | django__django | tests/admin_filters/tests.py | {
"start": 4948,
"end": 5693
} | class ____(SimpleListFilter):
title = "Department Ownership"
parameter_name = "department_ownership"
def lookups(self, request, model_admin):
return [
("DEV_OWNED", "Owned by Dev Department"),
("OTHER", "Other"),
]
def queryset(self, request, queryset):
queryset = queryset.annotate(
owned_book_count=models.Count(
"employee__department",
filter=models.Q(employee__department__code="DEV"),
),
)
if self.value() == "DEV_OWNED":
return queryset.filter(owned_book_count__gt=0)
elif self.value() == "OTHER":
return queryset.filter(owned_book_count=0)
| DepartmentOwnershipListFilter |
python | facelessuser__pymdown-extensions | pymdownx/blocks/caption.py | {
"start": 2824,
"end": 7709
} | class ____(Treeprocessor):
"""Caption tree processor."""
def __init__(self, md, types, config):
"""Initialize."""
super().__init__(md)
self.auto = config['auto']
self.prepend = config['prepend']
self.type = ''
self.auto_level = max(0, config['auto_level'])
self.fig_types = types
def run(self, doc):
"""Update caption IDs and prefixes."""
parent_map = {c: p for p in doc.iter() for c in p}
last = dict.fromkeys(self.fig_types, 0)
counters = {k: [0] for k in self.fig_types}
fig_type = last_type = self.type
figs = []
fig_num = ''
# Calculate the depth and iteration at that depth of the given figure.
for el in doc.iter():
fig_num = ''
stack = -1
if el.tag == 'figure':
fig_type = last_type
prepend = False
skip = False
# Find caption appended or prepended
if '__figure_prepend' in el.attrib:
prepend = True
del el.attrib['__figure_prepend']
# Determine figure type
if '__figure_type' in el.attrib:
fig_type = el.attrib['__figure_type']
figs.append(el)
# See if we have an unknown type or the type has no prefix template.
if fig_type not in self.fig_types or not self.fig_types[fig_type]:
continue
else:
# Found a figure that was not generated by this plugin.
continue
# Handle a specified relative nesting depth
if '__figure_level' in el.attrib:
stack += int(el.attrib['__figure_level']) + 1
if self.auto_level and stack >= self.auto_level:
continue
else:
stack += 1
current = el
while True:
parent = parent_map.get(current, None)
# No more parents
if parent is None:
break
# Check if parent element is a figure of the current type
if parent.tag == 'figure' and parent.attrib['__figure_type'] == fig_type:
# See if position in stack is manually specified
level = '__figure_level' in parent.attrib
if level:
stack += int(parent.attrib['__figure_level']) + 1
else:
stack += 1
if level:
el.attrib['__figure_level'] = str(stack + 1)
# Ensure position in stack is not deeper than the specified level
if self.auto_level and stack >= self.auto_level:
skip = True
break
current = parent
if skip:
# Parent has been skipped so all children are also skipped
continue
# Found an appropriate figure at an acceptable depth
if stack > -1:
# Handle a manual number
if '__figure_num' in el.attrib:
fig_num = [int(x) for x in el.attrib['__figure_num'].split('.')]
del el.attrib['__figure_num']
new_stack = len(fig_num) - 1
el.attrib['__figure_level'] = new_stack - stack
stack = new_stack
# Increment counter
l = last[fig_type]
counter = counters[fig_type]
if stack > l:
counter.extend([1] * (stack - l))
elif stack == l:
counter[stack] += 1
else:
del counter[stack + 1:]
counter[-1] += 1
last[fig_type] = stack
last_type = fig_type
# Determine if manual number is not smaller than existing figure numbers at that depth
if fig_num and fig_num > counter:
counter[:] = fig_num[:]
# Apply prefix and ID
update_tag(
el,
fig_type,
'.'.join(str(x) for x in counter[:stack + 1]),
self.fig_types.get(fig_type, ''),
prepend,
self.md
)
# Clean up attributes
for fig in figs:
del fig.attrib['__figure_type']
if '__figure_level' in fig.attrib:
del fig.attrib['__figure_level']
| CaptionTreeprocessor |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/sftp_to_s3.py | {
"start": 1205,
"end": 4230
} | class ____(BaseOperator):
"""
Transfer files from an SFTP server to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToS3Operator`
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param sftp_path: The sftp remote path. This is the specified file path
for downloading the file from the SFTP server.
:param s3_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket to where
the file is uploaded.
:param s3_key: The targeted s3 key. This is the specified path for
uploading the file to S3.
:param use_temp_file: If True, copies file first to local,
if False streams file from SFTP to S3.
:param fail_on_file_not_exist: If True, operator fails when file does not exist,
if False, operator will not fail and skips transfer. Default is True.
"""
template_fields: Sequence[str] = ("s3_key", "sftp_path", "s3_bucket")
def __init__(
self,
*,
s3_bucket: str,
s3_key: str,
sftp_path: str,
sftp_conn_id: str = "ssh_default",
s3_conn_id: str = "aws_default",
use_temp_file: bool = True,
fail_on_file_not_exist: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sftp_conn_id = sftp_conn_id
self.sftp_path = sftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.s3_conn_id = s3_conn_id
self.use_temp_file = use_temp_file
self.fail_on_file_not_exist = fail_on_file_not_exist
@staticmethod
def get_s3_key(s3_key: str) -> str:
"""Parse the correct format for S3 keys regardless of how the S3 url is passed."""
parsed_s3_key = urlsplit(s3_key)
return parsed_s3_key.path.lstrip("/")
def execute(self, context: Context) -> None:
self.s3_key = self.get_s3_key(self.s3_key)
ssh_hook = SSHHook(ssh_conn_id=self.sftp_conn_id)
s3_hook = S3Hook(self.s3_conn_id)
sftp_client = ssh_hook.get_conn().open_sftp()
try:
sftp_client.stat(self.sftp_path)
except FileNotFoundError:
if self.fail_on_file_not_exist:
raise
self.log.info("File %s not found on SFTP server. Skipping transfer.", self.sftp_path)
return
if self.use_temp_file:
with NamedTemporaryFile("w") as f:
sftp_client.get(self.sftp_path, f.name)
s3_hook.load_file(filename=f.name, key=self.s3_key, bucket_name=self.s3_bucket, replace=True)
else:
with sftp_client.file(self.sftp_path, mode="rb") as data:
s3_hook.get_conn().upload_fileobj(data, self.s3_bucket, self.s3_key, Callback=self.log.info)
| SFTPToS3Operator |
python | tensorflow__tensorflow | tensorflow/python/profiler/pprof_profiler_test.py | {
"start": 1156,
"end": 5133
} | class ____(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEqual(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEqual(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = while_loop.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEqual(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEqual(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEqual(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
| PprofProfilerTest |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_dimension.py | {
"start": 233,
"end": 18396
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.dimension"
_valid_props = {
"constraintrange",
"label",
"multiselect",
"name",
"range",
"templateitemname",
"tickformat",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"values",
"valuessrc",
"visible",
}
@property
def constraintrange(self):
"""
The domain range to which the filter on the dimension is
constrained. Must be an array of `[fromValue, toValue]` with
`fromValue <= toValue`, or if `multiselect` is not disabled,
you may give an array of arrays, where each inner array is
`[fromValue, toValue]`.
The 'constraintrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'constraintrange[0]' property accepts values of any type
(1) The 'constraintrange[1]' property accepts values of any type
* a 2D list where:
(0) The 'constraintrange[i][0]' property accepts values of any type
(1) The 'constraintrange[i][1]' property accepts values of any type
Returns
-------
list
"""
return self["constraintrange"]
@constraintrange.setter
def constraintrange(self, val):
self["constraintrange"] = val
@property
def label(self):
"""
The shown name of the dimension.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def multiselect(self):
"""
Do we allow multiple selection ranges or just a single range?
The 'multiselect' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["multiselect"]
@multiselect.setter
def multiselect(self, val):
self["multiselect"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def range(self):
"""
The domain range that represents the full, shown axis extent.
Defaults to the `values` extent. Must be an array of
`[fromValue, toValue]` with finite numbers as elements.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property is a number and may be specified as:
- An int or float
(1) The 'range[1]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def values(self):
"""
Dimension values. `values[n]` represents the value of the `n`th
point in the dataset, therefore the `values` vector for all
dimensions must be the same (longer vectors will be truncated).
Each value must be a finite number.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for `values`.
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
@property
def visible(self):
"""
Shows the dimension when set to `true` (the default). Hides the
dimension for `false`.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
constraintrange
The domain range to which the filter on the dimension
is constrained. Must be an array of `[fromValue,
toValue]` with `fromValue <= toValue`, or if
`multiselect` is not disabled, you may give an array of
arrays, where each inner array is `[fromValue,
toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a single
range?
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
The domain range that represents the full, shown axis
extent. Defaults to the `values` extent. Must be an
array of `[fromValue, toValue]` with finite numbers as
elements.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticktext
Sets the text displayed at the ticks position via
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
values
Dimension values. `values[n]` represents the value of
the `n`th point in the dataset, therefore the `values`
vector for all dimensions must be the same (longer
vectors will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
"""
def __init__(
self,
arg=None,
constraintrange=None,
label=None,
multiselect=None,
name=None,
range=None,
templateitemname=None,
tickformat=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Dimension object
The dimensions (variables) of the parallel coordinates chart.
2..60 dimensions are supported.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Dimension`
constraintrange
The domain range to which the filter on the dimension
is constrained. Must be an array of `[fromValue,
toValue]` with `fromValue <= toValue`, or if
`multiselect` is not disabled, you may give an array of
arrays, where each inner array is `[fromValue,
toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a single
range?
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
The domain range that represents the full, shown axis
extent. Defaults to the `values` extent. Must be an
array of `[fromValue, toValue]` with finite numbers as
elements.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticktext
Sets the text displayed at the ticks position via
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
values
Dimension values. `values[n]` represents the value of
the `n`th point in the dataset, therefore the `values`
vector for all dimensions must be the same (longer
vectors will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
Returns
-------
Dimension
"""
super().__init__("dimensions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Dimension
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Dimension`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("constraintrange", arg, constraintrange)
self._set_property("label", arg, label)
self._set_property("multiselect", arg, multiselect)
self._set_property("name", arg, name)
self._set_property("range", arg, range)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("tickformat", arg, tickformat)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("values", arg, values)
self._set_property("valuessrc", arg, valuessrc)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Dimension |
python | sympy__sympy | sympy/geometry/plane.py | {
"start": 764,
"end": 26799
} | class ____(GeometryEntity):
"""
A plane is a flat, two-dimensional surface. A plane is the two-dimensional
analogue of a point (zero-dimensions), a line (one-dimension) and a solid
(three-dimensions). A plane can generally be constructed by two types of
inputs. They are:
- three non-collinear points
- a point and the plane's normal vector
Attributes
==========
p1
normal_vector
Examples
========
>>> from sympy import Plane, Point3D
>>> Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane((1, 1, 1), (2, 3, 4), (2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane(Point3D(1, 1, 1), normal_vector=(1,4,7))
Plane(Point3D(1, 1, 1), (1, 4, 7))
"""
def __new__(cls, p1, a=None, b=None, **kwargs):
p1 = Point3D(p1, dim=3)
if a and b:
p2 = Point(a, dim=3)
p3 = Point(b, dim=3)
if Point3D.are_collinear(p1, p2, p3):
raise ValueError('Enter three non-collinear points')
a = p1.direction_ratio(p2)
b = p1.direction_ratio(p3)
normal_vector = tuple(Matrix(a).cross(Matrix(b)))
else:
a = kwargs.pop('normal_vector', a)
evaluate = kwargs.get('evaluate', True)
if is_sequence(a) and len(a) == 3:
normal_vector = Point3D(a).args if evaluate else a
else:
raise ValueError(filldedent('''
Either provide 3 3D points or a point with a
normal vector expressed as a sequence of length 3'''))
if all(coord.is_zero for coord in normal_vector):
raise ValueError('Normal vector cannot be zero vector')
return GeometryEntity.__new__(cls, p1, normal_vector, **kwargs)
def __contains__(self, o):
k = self.equation(x, y, z)
if isinstance(o, (LinearEntity, LinearEntity3D)):
d = Point3D(o.arbitrary_point(t))
e = k.subs([(x, d.x), (y, d.y), (z, d.z)])
return e.equals(0)
try:
o = Point(o, dim=3, strict=True)
d = k.xreplace(dict(zip((x, y, z), o.args)))
return d.equals(0)
except TypeError:
return False
def _eval_evalf(self, prec=15, **options):
pt, tup = self.args
dps = prec_to_dps(prec)
pt = pt.evalf(n=dps, **options)
tup = tuple([i.evalf(n=dps, **options) for i in tup])
return self.func(pt, normal_vector=tup, evaluate=False)
def angle_between(self, o):
"""Angle between the plane and other geometric entity.
Parameters
==========
LinearEntity3D, Plane.
Returns
=======
angle : angle in radians
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the angle between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the angle.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 2), normal_vector=(1, 2, 3))
>>> b = Line3D(Point3D(1, 3, 4), Point3D(2, 2, 2))
>>> a.angle_between(b)
-asin(sqrt(21)/6)
"""
if isinstance(o, LinearEntity3D):
a = Matrix(self.normal_vector)
b = Matrix(o.direction_ratio)
c = a.dot(b)
d = sqrt(sum(i**2 for i in self.normal_vector))
e = sqrt(sum(i**2 for i in o.direction_ratio))
return asin(c/(d*e))
if isinstance(o, Plane):
a = Matrix(self.normal_vector)
b = Matrix(o.normal_vector)
c = a.dot(b)
d = sqrt(sum(i**2 for i in self.normal_vector))
e = sqrt(sum(i**2 for i in o.normal_vector))
return acos(c/(d*e))
def arbitrary_point(self, u=None, v=None):
""" Returns an arbitrary point on the Plane. If given two
parameters, the point ranges over the entire plane. If given 1
or no parameters, returns a point with one parameter which,
when varying from 0 to 2*pi, moves the point in a circle of
radius 1 about p1 of the Plane.
Examples
========
>>> from sympy import Plane, Ray
>>> from sympy.abc import u, v, t, r
>>> p = Plane((1, 1, 1), normal_vector=(1, 0, 0))
>>> p.arbitrary_point(u, v)
Point3D(1, u + 1, v + 1)
>>> p.arbitrary_point(t)
Point3D(1, cos(t) + 1, sin(t) + 1)
While arbitrary values of u and v can move the point anywhere in
the plane, the single-parameter point can be used to construct a
ray whose arbitrary point can be located at angle t and radius
r from p.p1:
>>> Ray(p.p1, _).arbitrary_point(r)
Point3D(1, r*cos(t) + 1, r*sin(t) + 1)
Returns
=======
Point3D
"""
circle = v is None
if circle:
u = _symbol(u or 't', real=True)
else:
u = _symbol(u or 'u', real=True)
v = _symbol(v or 'v', real=True)
x, y, z = self.normal_vector
a, b, c = self.p1.args
# x1, y1, z1 is a nonzero vector parallel to the plane
if x.is_zero and y.is_zero:
x1, y1, z1 = S.One, S.Zero, S.Zero
else:
x1, y1, z1 = -y, x, S.Zero
# x2, y2, z2 is also parallel to the plane, and orthogonal to x1, y1, z1
x2, y2, z2 = tuple(Matrix((x, y, z)).cross(Matrix((x1, y1, z1))))
if circle:
x1, y1, z1 = (w/sqrt(x1**2 + y1**2 + z1**2) for w in (x1, y1, z1))
x2, y2, z2 = (w/sqrt(x2**2 + y2**2 + z2**2) for w in (x2, y2, z2))
p = Point3D(a + x1*cos(u) + x2*sin(u), \
b + y1*cos(u) + y2*sin(u), \
c + z1*cos(u) + z2*sin(u))
else:
p = Point3D(a + x1*u + x2*v, b + y1*u + y2*v, c + z1*u + z2*v)
return p
@staticmethod
def are_concurrent(*planes):
"""Is a sequence of Planes concurrent?
Two or more Planes are concurrent if their intersections
are a common line.
Parameters
==========
planes: list
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(5, 0, 0), normal_vector=(1, -1, 1))
>>> b = Plane(Point3D(0, -2, 0), normal_vector=(3, 1, 1))
>>> c = Plane(Point3D(0, -1, 0), normal_vector=(5, -1, 9))
>>> Plane.are_concurrent(a, b)
True
>>> Plane.are_concurrent(a, b, c)
False
"""
planes = list(uniq(planes))
for i in planes:
if not isinstance(i, Plane):
raise ValueError('All objects should be Planes but got %s' % i.func)
if len(planes) < 2:
return False
planes = list(planes)
first = planes.pop(0)
sol = first.intersection(planes[0])
if sol == []:
return False
else:
line = sol[0]
for i in planes[1:]:
l = first.intersection(i)
if not l or l[0] not in line:
return False
return True
def distance(self, o):
"""Distance between the plane and another geometric entity.
Parameters
==========
Point3D, LinearEntity3D, Plane.
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the distance between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the distance.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.distance(b)
sqrt(3)
>>> c = Line3D(Point3D(2, 3, 1), Point3D(1, 2, 2))
>>> a.distance(c)
0
"""
if self.intersection(o) != []:
return S.Zero
if isinstance(o, (Segment3D, Ray3D)):
a, b = o.p1, o.p2
pi, = self.intersection(Line3D(a, b))
if pi in o:
return self.distance(pi)
elif a in Segment3D(pi, b):
return self.distance(a)
else:
assert isinstance(o, Segment3D) is True
return self.distance(b)
# following code handles `Point3D`, `LinearEntity3D`, `Plane`
a = o if isinstance(o, Point3D) else o.p1
n = Point3D(self.normal_vector).unit
d = (a - self.p1).dot(n)
return abs(d)
def equals(self, o):
"""
Returns True if self and o are the same mathematical entities.
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Plane(Point3D(1, 2, 3), normal_vector=(2, 2, 2))
>>> c = Plane(Point3D(1, 2, 3), normal_vector=(-1, 4, 6))
>>> a.equals(a)
True
>>> a.equals(b)
True
>>> a.equals(c)
False
"""
if isinstance(o, Plane):
a = self.equation()
b = o.equation()
return cancel(a/b).is_constant()
else:
return False
def equation(self, x=None, y=None, z=None):
"""The equation of the Plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 2), Point3D(2, 4, 7), Point3D(3, 5, 1))
>>> a.equation()
-23*x + 11*y - 2*z + 16
>>> a = Plane(Point3D(1, 4, 2), normal_vector=(6, 6, 6))
>>> a.equation()
6*x + 6*y + 6*z - 42
"""
x, y, z = [i if i else Symbol(j, real=True) for i, j in zip((x, y, z), 'xyz')]
a = Point3D(x, y, z)
b = self.p1.direction_ratio(a)
c = self.normal_vector
return (sum(i*j for i, j in zip(b, c)))
def intersection(self, o):
""" The intersection with other geometrical entity.
Parameters
==========
Point, Point3D, LinearEntity, LinearEntity3D, Plane
Returns
=======
List
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.intersection(b)
[Point3D(1, 2, 3)]
>>> c = Line3D(Point3D(1, 4, 7), Point3D(2, 2, 2))
>>> a.intersection(c)
[Point3D(2, 2, 2)]
>>> d = Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
>>> e = Plane(Point3D(2, 0, 0), normal_vector=(3, 4, -3))
>>> d.intersection(e)
[Line3D(Point3D(78/23, -24/23, 0), Point3D(147/23, 321/23, 23))]
"""
if not isinstance(o, GeometryEntity):
o = Point(o, dim=3)
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
if isinstance(o, (LinearEntity, LinearEntity3D)):
# recast to 3D
p1, p2 = o.p1, o.p2
if isinstance(o, Segment):
o = Segment3D(p1, p2)
elif isinstance(o, Ray):
o = Ray3D(p1, p2)
elif isinstance(o, Line):
o = Line3D(p1, p2)
else:
raise ValueError('unhandled linear entity: %s' % o.func)
if o in self:
return [o]
else:
a = Point3D(o.arbitrary_point(t))
p1, n = self.p1, Point3D(self.normal_vector)
# TODO: Replace solve with solveset, when this line is tested
c = solve((a - p1).dot(n), t)
if not c:
return []
else:
c = [i for i in c if i.is_real is not False]
if len(c) > 1:
c = [i for i in c if i.is_real]
if len(c) != 1:
raise Undecidable("not sure which point is real")
p = a.subs(t, c[0])
if p not in o:
return [] # e.g. a segment might not intersect a plane
return [p]
if isinstance(o, Plane):
if self.equals(o):
return [self]
if self.is_parallel(o):
return []
else:
x, y, z = map(Dummy, 'xyz')
a, b = Matrix([self.normal_vector]), Matrix([o.normal_vector])
c = list(a.cross(b))
d = self.equation(x, y, z)
e = o.equation(x, y, z)
result = list(linsolve([d, e], x, y, z))[0]
for i in (x, y, z): result = result.subs(i, 0)
return [Line3D(Point3D(result), direction_ratio=c)]
def is_coplanar(self, o):
""" Returns True if `o` is coplanar with self, else False.
Examples
========
>>> from sympy import Plane
>>> o = (0, 0, 0)
>>> p = Plane(o, (1, 1, 1))
>>> p2 = Plane(o, (2, 2, 2))
>>> p == p2
False
>>> p.is_coplanar(p2)
True
"""
if isinstance(o, Plane):
return not cancel(self.equation(x, y, z)/o.equation(x, y, z)).has(x, y, z)
if isinstance(o, Point3D):
return o in self
elif isinstance(o, LinearEntity3D):
return all(i in self for i in self)
elif isinstance(o, GeometryEntity): # XXX should only be handling 2D objects now
return all(i == 0 for i in self.normal_vector[:2])
def is_parallel(self, l):
"""Is the given geometric entity parallel to the plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(3,1,3), normal_vector=(4, 8, 12))
>>> a.is_parallel(b)
True
"""
if isinstance(l, LinearEntity3D):
a = l.direction_ratio
b = self.normal_vector
return sum(i*j for i, j in zip(a, b)) == 0
if isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
return bool(a.cross(b).is_zero_matrix)
def is_perpendicular(self, l):
"""Is the given geometric entity perpendicualar to the given plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(2, 2, 2), normal_vector=(-1, 2, -1))
>>> a.is_perpendicular(b)
True
"""
if isinstance(l, LinearEntity3D):
a = Matrix(l.direction_ratio)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.dot(b) == 0:
return True
else:
return False
else:
return False
@property
def normal_vector(self):
"""Normal vector of the given plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.normal_vector
(-1, 2, -1)
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 4, 7))
>>> a.normal_vector
(1, 4, 7)
"""
return self.args[1]
@property
def p1(self):
"""The only defining point of the plane. Others can be obtained from the
arbitrary_point method.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.p1
Point3D(1, 1, 1)
"""
return self.args[0]
def parallel_plane(self, pt):
"""
Plane parallel to the given plane and passing through the point pt.
Parameters
==========
pt: Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 4, 6), normal_vector=(2, 4, 6))
>>> a.parallel_plane(Point3D(2, 3, 5))
Plane(Point3D(2, 3, 5), (2, 4, 6))
"""
a = self.normal_vector
return Plane(pt, normal_vector=a)
def perpendicular_line(self, pt):
"""A line perpendicular to the given plane.
Parameters
==========
pt: Point3D
Returns
=======
Line3D
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> a.perpendicular_line(Point3D(9, 8, 7))
Line3D(Point3D(9, 8, 7), Point3D(11, 12, 13))
"""
a = self.normal_vector
return Line3D(pt, direction_ratio=a)
def perpendicular_plane(self, *pts):
"""
Return a perpendicular passing through the given points. If the
direction ratio between the points is the same as the Plane's normal
vector then, to select from the infinite number of possible planes,
a third point will be chosen on the z-axis (or the y-axis
if the normal vector is already parallel to the z-axis). If less than
two points are given they will be supplied as follows: if no point is
given then pt1 will be self.p1; if a second point is not given it will
be a point through pt1 on a line parallel to the z-axis (if the normal
is not already the z-axis, otherwise on the line parallel to the
y-axis).
Parameters
==========
pts: 0, 1 or 2 Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
>>> Z = (0, 0, 1)
>>> p = Plane(a, normal_vector=Z)
>>> p.perpendicular_plane(a, b)
Plane(Point3D(0, 0, 0), (1, 0, 0))
"""
if len(pts) > 2:
raise ValueError('No more than 2 pts should be provided.')
pts = list(pts)
if len(pts) == 0:
pts.append(self.p1)
if len(pts) == 1:
x, y, z = self.normal_vector
if x == y == 0:
dir = (0, 1, 0)
else:
dir = (0, 0, 1)
pts.append(pts[0] + Point3D(*dir))
p1, p2 = [Point(i, dim=3) for i in pts]
l = Line3D(p1, p2)
n = Line3D(p1, direction_ratio=self.normal_vector)
if l in n: # XXX should an error be raised instead?
# there are infinitely many perpendicular planes;
x, y, z = self.normal_vector
if x == y == 0:
# the z axis is the normal so pick a pt on the y-axis
p3 = Point3D(0, 1, 0) # case 1
else:
# else pick a pt on the z axis
p3 = Point3D(0, 0, 1) # case 2
# in case that point is already given, move it a bit
if p3 in l:
p3 *= 2 # case 3
else:
p3 = p1 + Point3D(*self.normal_vector) # case 4
return Plane(p1, p2, p3)
def projection_line(self, line):
"""Project the given line onto the plane through the normal plane
containing the line.
Parameters
==========
LinearEntity or LinearEntity3D
Returns
=======
Point3D, Line3D, Ray3D or Segment3D
Notes
=====
For the interaction between 2D and 3D lines(segments, rays), you should
convert the line to 3D by using this method. For example for finding the
intersection between a 2D and a 3D line, convert the 2D line to a 3D line
by projecting it on a required plane and then proceed to find the
intersection between those lines.
Examples
========
>>> from sympy import Plane, Line, Line3D, Point3D
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Line(Point3D(1, 1), Point3D(2, 2))
>>> a.projection_line(b)
Line3D(Point3D(4/3, 4/3, 1/3), Point3D(5/3, 5/3, -1/3))
>>> c = Line3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
>>> a.projection_line(c)
Point3D(1, 1, 1)
"""
if not isinstance(line, (LinearEntity, LinearEntity3D)):
raise NotImplementedError('Enter a linear entity only')
a, b = self.projection(line.p1), self.projection(line.p2)
if a == b:
# projection does not imply intersection so for
# this case (line parallel to plane's normal) we
# return the projection point
return a
if isinstance(line, (Line, Line3D)):
return Line3D(a, b)
if isinstance(line, (Ray, Ray3D)):
return Ray3D(a, b)
if isinstance(line, (Segment, Segment3D)):
return Segment3D(a, b)
def projection(self, pt):
"""Project the given point onto the plane along the plane normal.
Parameters
==========
Point or Point3D
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane, Point3D
>>> A = Plane(Point3D(1, 1, 2), normal_vector=(1, 1, 1))
The projection is along the normal vector direction, not the z
axis, so (1, 1) does not project to (1, 1, 2) on the plane A:
>>> b = Point3D(1, 1)
>>> A.projection(b)
Point3D(5/3, 5/3, 2/3)
>>> _ in A
True
But the point (1, 1, 2) projects to (1, 1) on the XY-plane:
>>> XY = Plane((0, 0, 0), (0, 0, 1))
>>> XY.projection((1, 1, 2))
Point3D(1, 1, 0)
"""
rv = Point(pt, dim=3)
if rv in self:
return rv
n = Point3D(self.normal_vector)
d = (rv - self.p1).dot(n) / n.dot(n)
return rv - d * n
def random_point(self, seed=None):
""" Returns a random point on the Plane.
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane
>>> p = Plane((1, 0, 0), normal_vector=(0, 1, 0))
>>> r = p.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point3D(2.29, 0, -1.35)
The random point can be moved to lie on the circle of radius
1 centered on p1:
>>> c = p.p1 + (r - p.p1).unit
>>> c.distance(p.p1).equals(1)
True
"""
if seed is not None:
rng = random.Random(seed)
else:
rng = random
params = {
x: 2*Rational(rng.gauss(0, 1)) - 1,
y: 2*Rational(rng.gauss(0, 1)) - 1}
return self.arbitrary_point(x, y).subs(params)
def parameter_value(self, other, u, v=None):
"""Return the parameter(s) corresponding to the given point.
Examples
========
>>> from sympy import pi, Plane
>>> from sympy.abc import t, u, v
>>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))
By default, the parameter value returned defines a point
that is a distance of 1 from the Plane's p1 value and
in line with the given point:
>>> on_circle = p.arbitrary_point(t).subs(t, pi/4)
>>> on_circle.distance(p.p1)
1
>>> p.parameter_value(on_circle, t)
{t: pi/4}
Moving the point twice as far from p1 does not change
the parameter value:
>>> off_circle = p.p1 + (on_circle - p.p1)*2
>>> off_circle.distance(p.p1)
2
>>> p.parameter_value(off_circle, t)
{t: pi/4}
If the 2-value parameter is desired, supply the two
parameter symbols and a replacement dictionary will
be returned:
>>> p.parameter_value(on_circle, u, v)
{u: sqrt(10)/10, v: sqrt(10)/30}
>>> p.parameter_value(off_circle, u, v)
{u: sqrt(10)/5, v: sqrt(10)/15}
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
if other == self.p1:
return other
if isinstance(u, Symbol) and v is None:
delta = self.arbitrary_point(u) - self.p1
eq = delta - (other - self.p1).unit
sol = solve(eq, u, dict=True)
elif isinstance(u, Symbol) and isinstance(v, Symbol):
pt = self.arbitrary_point(u, v)
sol = solve(pt - other, (u, v), dict=True)
else:
raise ValueError('expecting 1 or 2 symbols')
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return sol[0] # {t: tval} or {u: uval, v: vval}
@property
def ambient_dimension(self):
return self.p1.ambient_dimension
| Plane |
python | kamyu104__LeetCode-Solutions | Python/change-minimum-characters-to-satisfy-one-of-three-conditions.py | {
"start": 54,
"end": 730
} | class ____(object):
def minCharacters(self, a, b):
"""
:type a: str
:type b: str
:rtype: int
"""
count1 = collections.Counter(ord(c)-ord('a') for c in a)
count2 = collections.Counter(ord(c)-ord('a') for c in b)
result = len(a) + len(b) - max((count1+count2).itervalues()) # condition 3
for i in xrange(26-1):
if i > 0:
count1[i] += count1[i-1]
count2[i] += count2[i-1]
result = min(result, len(a) - count1[i] + count2[i]) # condition 1
result = min(result, len(b) - count2[i] + count1[i]) # condition 2
return result
| Solution |
python | django__django | tests/custom_methods/models.py | {
"start": 149,
"end": 1184
} | class ____(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __str__(self):
return self.headline
def was_published_today(self):
return self.pub_date == datetime.date.today()
def articles_from_same_day_1(self):
return Article.objects.filter(pub_date=self.pub_date).exclude(id=self.id)
def articles_from_same_day_2(self):
"""
Verbose version of get_articles_from_same_day_1, which does a custom
database query for the sake of demonstration.
"""
from django.db import connection
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT id, headline, pub_date
FROM custom_methods_article
WHERE pub_date = %s
AND id != %s""",
[connection.ops.adapt_datefield_value(self.pub_date), self.id],
)
return [self.__class__(*row) for row in cursor.fetchall()]
| Article |
python | pyca__cryptography | tests/hazmat/primitives/test_hash_vectors.py | {
"start": 1908,
"end": 2264
} | class ____:
test_sha512 = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "SHA2"),
["SHA512LongMsg.rsp", "SHA512ShortMsg.rsp"],
hashes.SHA512(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512_224()),
skip_message="Does not support SHA512/224",
)
| TestSHA512 |
python | cherrypy__cherrypy | cherrypy/tutorial/tut06_default_method.py | {
"start": 753,
"end": 2431
} | class ____:
"""The users app."""
@cherrypy.expose
def index(self):
"""Produce HTTP response body of the users app index URI."""
# Since this is just a stupid little example, we'll simply
# display a list of links to random, made-up users. In a real
# application, this could be generated from a database result set.
return """
<a href="./remi">Remi Delon</a><br/>
<a href="./hendrik">Hendrik Mans</a><br/>
<a href="./lorenzo">Lorenzo Lamas</a><br/>
"""
@cherrypy.expose
def default(self, user):
"""Produce HTTP response body of the users app fallback URI."""
# Here we react depending on the virtualPath -- the part of the
# path that could not be mapped to an object method. In a real
# application, we would probably do some database lookups here
# instead of the silly if/elif/else construct.
if user == 'remi':
out = 'Remi Delon, CherryPy lead developer'
elif user == 'hendrik':
out = 'Hendrik Mans, CherryPy co-developer & crazy German'
elif user == 'lorenzo':
out = 'Lorenzo Lamas, famous actor and singer!'
else:
out = 'Unknown user. :-('
return '%s (<a href="./">back</a>)' % out
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(UsersPage(), config=tutconf)
| UsersPage |
python | numba__numba | numba/core/types/containers.py | {
"start": 6841,
"end": 6991
} | class ____(BaseContainerIterator):
"""
Type class for homogeneous tuple iterators.
"""
container_class = _HomogeneousTuple
| UniTupleIter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 106312,
"end": 110489
} | class ____(_QueryEntity):
__slots__ = (
"_fetch_column",
"_row_processor",
"raw_column_index",
"translate_raw_column",
)
@classmethod
def _for_columns(
cls,
compile_state,
columns,
entities_collection,
raw_column_index,
is_current_entities,
parent_bundle=None,
):
for column in columns:
annotations = column._annotations
if "parententity" in annotations:
_entity = annotations["parententity"]
else:
_entity = sql_util.extract_first_column_annotation(
column, "parententity"
)
if _entity:
if "identity_token" in column._annotations:
_IdentityTokenEntity(
compile_state,
column,
entities_collection,
_entity,
raw_column_index,
is_current_entities,
parent_bundle=parent_bundle,
)
else:
_ORMColumnEntity(
compile_state,
column,
entities_collection,
_entity,
raw_column_index,
is_current_entities,
parent_bundle=parent_bundle,
)
else:
_RawColumnEntity(
compile_state,
column,
entities_collection,
raw_column_index,
is_current_entities,
parent_bundle=parent_bundle,
)
@property
def type(self):
return self.column.type
@property
def _non_hashable_value(self):
return not self.column.type.hashable
@property
def _null_column_type(self):
return self.column.type._isnull
def row_processor(self, context, result):
compile_state = context.compile_state
# the resulting callable is entirely cacheable so just return
# it if we already made one
if self._row_processor is not None:
getter, label_name, extra_entities = self._row_processor
if self.translate_raw_column:
extra_entities += (
context.query._raw_columns[self.raw_column_index],
)
return getter, label_name, extra_entities
# retrieve the column that would have been set up in
# setup_compile_state, to avoid doing redundant work
if self._fetch_column is not None:
column = self._fetch_column
else:
# fetch_column will be None when we are doing a from_statement
# and setup_compile_state may not have been called.
column = self.column
# previously, the RawColumnEntity didn't look for from_obj_alias
# however I can't think of a case where we would be here and
# we'd want to ignore it if this is the from_statement use case.
# it's not really a use case to have raw columns + from_statement
if compile_state._from_obj_alias:
column = compile_state._from_obj_alias.columns[column]
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
if compile_state.compound_eager_adapter:
column = compile_state.compound_eager_adapter.columns[column]
getter = result._getter(column)
ret = getter, self._label_name, self._extra_entities
self._row_processor = ret
if self.translate_raw_column:
extra_entities = self._extra_entities + (
context.query._raw_columns[self.raw_column_index],
)
return getter, self._label_name, extra_entities
else:
return ret
| _ColumnEntity |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 2118,
"end": 2162
} | class ____[**P2 = P1, **P1 = ...]: ...
| ClassPB |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/types.py | {
"start": 2606,
"end": 3376
} | class ____(sqltypes.String):
"""Base for MySQL string types."""
def __init__(
self,
charset: Optional[str] = None,
collation: Optional[str] = None,
ascii: bool = False, # noqa
binary: bool = False,
unicode: bool = False,
national: bool = False,
**kw: Any,
):
self.charset = charset
# allow collate= or collation=
kw.setdefault("collation", kw.pop("collate", collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super().__init__(**kw)
def __repr__(self) -> str:
return util.generic_repr(
self, to_inspect=[_StringType, sqltypes.String]
)
| _StringType |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 12881,
"end": 13494
} | class ____(PlotEvent):
''' Announce the coordinates of a selection event on a plot.
Attributes:
geometry (dict) : a dictionary containing the coordinates of the
selection event.
final (bool) : whether the selection event is the last selection event
in the case of selections on every mousemove.
'''
event_name = "selectiongeometry"
def __init__(self, model: Plot | None, geometry: GeometryData | None = None, final: bool = True) -> None:
self.geometry = geometry
self.final = final
super().__init__(model=model)
| SelectionGeometry |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 47903,
"end": 48380
} | class ____(unittest.TestCase):
"""Tests date_time in the vi_VN locale"""
def setUp(self):
self.fake = Faker("vi_VN")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert isinstance(day, str)
assert day in ViVNProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert isinstance(month, str)
assert month in ViVNProvider.MONTH_NAMES.values()
| TestViVn |
python | walkccc__LeetCode | solutions/2584. Split the Array to Make Coprime Products/2584.py | {
"start": 0,
"end": 1297
} | class ____:
def findValidSplit(self, nums: list[int]) -> int:
leftPrimeFactors = collections.Counter()
rightPrimeFactors = collections.Counter()
def getPrimeFactors(num: int) -> list[int]:
"""Gets the prime factors under sqrt(10^6)."""
primeFactors = []
for divisor in range(2, min(1000, num) + 1):
if num % divisor == 0:
primeFactors.append(divisor)
while num % divisor == 0:
num //= divisor
# Handle the case that `num` contains a prime factor > 1000.
if num > 1:
primeFactors.append(num)
return primeFactors
for num in nums:
for primeFactor in getPrimeFactors(num):
rightPrimeFactors[primeFactor] += 1
for i in range(len(nums) - 1):
for primeFactor in getPrimeFactors(nums[i]):
rightPrimeFactors[primeFactor] -= 1
if rightPrimeFactors[primeFactor] == 0:
# rightPrimeFactors[primeFactor] == 0, so no need to track
# leftPrimeFactors[primeFactor].
del rightPrimeFactors[primeFactor]
del leftPrimeFactors[primeFactor]
else:
# Otherwise, need to track leftPrimeFactors[primeFactor].
leftPrimeFactors[primeFactor] += 1
if not leftPrimeFactors:
return i
return -1
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/blob/fake_blob_client.py | {
"start": 3032,
"end": 4863
} | class ____:
"""Stateful mock of an Blob blob client for testing."""
def __init__(self):
self.contents = None
self.lease = None
def start_copy_from_url(self, url):
self.contents = url
def get_blob_properties(self):
if self.contents is None:
raise ResourceNotFoundError("File does not exist!")
return {"lease": self.lease}
def upload_blob(self, contents, overwrite=False, lease=None):
if self.lease is not None:
if lease != self.lease:
raise Exception("Invalid lease!")
if self.contents is None or overwrite is True:
if isinstance(contents, str):
self.contents = contents.encode("utf8")
elif isinstance(contents, io.TextIOBase):
self.contents = contents.read().encode("utf8")
elif isinstance(contents, io.IOBase):
self.contents = contents.read()
elif isinstance(contents, bytes):
self.contents = contents
# Python 2 compatibility - no base class for `file` type
elif hasattr(contents, "read"):
self.contents = contents.read()
else:
self.contents = contents
@property
def url(self):
return ":memory:"
@contextmanager
def acquire_lease(self, lease_duration=-1):
if self.lease is None:
self.lease = random.randint(0, 2**9)
try:
yield self.lease
finally:
self.lease = None
else:
raise Exception("Lease already held")
def download_blob(self):
if self.contents is None:
raise ResourceNotFoundError("File does not exist!")
return FakeBlobDownloader(contents=self.contents)
| FakeBlobClient |
python | doocs__leetcode | lcci/08.11.Coin/Solution.py | {
"start": 0,
"end": 405
} | class ____:
def waysToChange(self, n: int) -> int:
mod = 10**9 + 7
coins = [25, 10, 5, 1]
f = [[0] * (n + 1) for _ in range(5)]
f[0][0] = 1
for i, c in enumerate(coins, 1):
for j in range(n + 1):
f[i][j] = f[i - 1][j]
if j >= c:
f[i][j] = (f[i][j] + f[i][j - c]) % mod
return f[-1][n]
| Solution |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/autograd_cache.py | {
"start": 10470,
"end": 14335
} | class ____(FxGraphHashDetails):
"""
Object to capture all the details for a dynamo graph module relevant to computing
a safe and stable cache key for AOTAutograd.
"""
def get_triton_source_codes_from_gm(
self,
gm: torch.fx.GraphModule,
):
assert has_triton_package(), "Triton is not available"
triton_kernels = []
for module in gm.modules():
if not isinstance(module, torch.fx.GraphModule):
continue
for node in module.graph.nodes:
if isinstance(node.target, torch._ops.OpOverloadPacket):
attrs = node.target._dir
for attr in attrs:
if custom_op := getattr(node.target, attr, None):
kernels = torch._library.triton.get_triton_kernels_for_op(
custom_op._name
)
triton_kernels.extend(kernels)
elif isinstance(node.target, torch._ops.OpOverload):
kernels = torch._library.triton.get_triton_kernels_for_op(
node.target._name
)
triton_kernels.extend(kernels)
triton_kernel_source_codes = []
from torch._inductor.codegen.wrapper import (
user_defined_triton_kernel_transitive_closure_source_code,
)
for kernel in triton_kernels:
from triton.runtime.autotuner import Autotuner
if isinstance(kernel, Autotuner):
# Grab the Inner JITFunction
kernel = kernel.fn
source_codes = user_defined_triton_kernel_transitive_closure_source_code(
kernel
)
triton_kernel_source_codes.append(source_codes)
return triton_kernel_source_codes
def __init__(
self,
gm: torch.fx.GraphModule,
example_inputs,
aot_config: AOTConfig,
fx_config: _CompileFxKwargs,
):
# FxGraphHashDetails contains all the keys related to inductor. Also includes some system info
self.aot_config = aot_config
self.grad_enabled = torch.is_grad_enabled()
self.disable_amp = torch._C._is_any_autocast_enabled()
self.deterministic_algorithms = torch.are_deterministic_algorithms_enabled()
self.autograd_config = config.save_config()
self.saved_tensors_hooks_fx_wrap_cache_hashes: tuple[list[str], list[str]] = (
[],
[],
)
if has_triton_package():
self.triton_kernel_source_codes = self.get_triton_source_codes_from_gm(gm)
if hasattr(gm, "saved_tensors_hooks_pack_0"):
def _add_wrapped_user_cache_hashes(_gm, _l):
for node in _gm.graph.nodes:
if node.meta and node.meta.get("is_wrapped", False):
_l.append(node.meta["user_cache_hash"])
_add_wrapped_user_cache_hashes(
gm.saved_tensors_hooks_pack_0,
self.saved_tensors_hooks_fx_wrap_cache_hashes[0],
)
_add_wrapped_user_cache_hashes(
gm.saved_tensors_hooks_unpack_0,
self.saved_tensors_hooks_fx_wrap_cache_hashes[1],
)
try:
# FXGraphCache has constraints on what can be pickled in its inductor
# config. Check that the gm is cacheable by inductor first,
# and if it raises an exception, also bypass on our end.
FxGraphCache._check_can_cache(gm)
super().__init__(gm, example_inputs, fx_config, [])
except BypassFxGraphCache as e:
# Sometimes inductor configs are unpickleable and can fail
raise BypassAOTAutogradCache(str(e)) from e
| AOTAutogradCacheDetails |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/__init__.py | {
"start": 2740,
"end": 4254
} | class ____(argparse.Action, metaclass=abc.ABCMeta):
"""Base class for actions that parse composite arguments."""
documentation_state: dict[t.Type[CompositeAction], DocumentationState] = {}
def __init__(
self,
*args,
**kwargs,
):
self.definition = self.create_parser()
self.documentation_state[type(self)] = documentation_state = DocumentationState()
self.definition.document(documentation_state)
kwargs.update(dest=self.definition.dest)
super().__init__(*args, **kwargs)
register_safe_action(type(self))
@abc.abstractmethod
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
def __call__(
self,
parser,
namespace,
values,
option_string=None,
):
state = ParserState(mode=ParserMode.PARSE, namespaces=[namespace], remainder=values)
try:
self.definition.parse(state)
except ParserError as ex:
error = str(ex)
except CompletionError as ex:
error = ex.message
else:
return
if get_comp_type():
# FUTURE: It may be possible to enhance error handling by surfacing this error message during downstream completion.
return # ignore parse errors during completion to avoid breaking downstream completion
raise argparse.ArgumentError(self, error)
| CompositeAction |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 11436,
"end": 12245
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_job_trigger(self, mock_hook):
mock_hook.return_value.delete_job_trigger.return_value = mock.MagicMock()
operator = CloudDLPDeleteJobTriggerOperator(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID, task_id="id"
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_job_trigger.assert_called_once_with(
job_trigger_id=TRIGGER_ID,
project_id=PROJECT_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPDeleteJobTriggerOperator |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/integration.py | {
"start": 12906,
"end": 14401
} | class ____:
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
if request.method == "POST":
form = InstallationForm(request.POST)
if form.is_valid():
form_data = form.cleaned_data
form_data["url"] = urlparse(form_data["url"]).netloc
if not form_data["public_link"]:
form_data["public_link"] = None
pipeline.bind_state("installation_data", form_data)
pipeline.bind_state(
"oauth_config_information",
{
"access_token_url": "https://{}/login/oauth/access_token".format(
form_data.get("url")
),
"authorize_url": "https://{}/login/oauth/authorize".format(
form_data.get("url")
),
"client_id": form_data.get("client_id"),
"client_secret": form_data.get("client_secret"),
"verify_ssl": form_data.get("verify_ssl"),
},
)
return pipeline.next_step()
else:
form = InstallationForm()
return render_to_response(
template="sentry/integrations/github-enterprise-config.html",
context={"form": form},
request=request,
)
| InstallationConfigView |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/base.py | {
"start": 185,
"end": 581
} | class ____:
@classmethod
def config_key(cls):
"""Name of the test in configuration file, used to override test inputs,"""
class_name = cls.__name__
if class_name.startswith("Test"):
class_name = class_name[len("Test") :]
return inflection.underscore(class_name)
MANDATORY_FOR_TEST_STRICTNESS_LEVELS = [Config.TestStrictnessLevel.high]
| BaseTest |
python | ansible__ansible | lib/ansible/inventory/host.py | {
"start": 1252,
"end": 4938
} | class ____:
"""A single ansible host."""
base_type = InventoryObjectType.HOST
# __slots__ = [ 'name', 'vars', 'groups' ]
def __eq__(self, other):
if not isinstance(other, Host):
return False
return self._uuid == other._uuid
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.get_name()
def __repr__(self):
return self.get_name()
def __init__(self, name: str, port: int | str | None = None, gen_uuid: bool = True) -> None:
name = helpers.remove_trust(name)
self.vars: dict[str, t.Any] = {}
self.groups: list[Group] = []
self._uuid: str | None = None
self.name: str = name
self.address: str = name
if port:
self.set_variable('ansible_port', int(port))
if gen_uuid:
self._uuid = get_unique_id()
self.implicit: bool = False
def get_name(self) -> str:
return self.name
def populate_ancestors(self, additions: c.Iterable[Group] | None = None) -> None:
# populate ancestors
if additions is None:
for group in self.groups:
self.add_group(group)
else:
for group in additions:
if group not in self.groups:
self.groups.append(group)
def add_group(self, group: Group) -> bool:
added = False
# populate ancestors first
for oldg in group.get_ancestors():
if oldg not in self.groups:
self.groups.append(oldg)
# actually add group
if group not in self.groups:
self.groups.append(group)
added = True
return added
def remove_group(self, group: Group) -> bool:
removed = False
if group in self.groups:
self.groups.remove(group)
removed = True
# remove exclusive ancestors, xcept all!
for oldg in group.get_ancestors():
if oldg.name != 'all':
for childg in self.groups:
if oldg in childg.get_ancestors():
break
else:
self.remove_group(oldg)
return removed
def set_variable(self, key: str, value: t.Any) -> None:
key = helpers.remove_trust(key)
try:
validate_variable_name(key)
except AnsibleError as ex:
Display().deprecated(msg=f'Accepting inventory variable with invalid name {key!r}.', version='2.23', help_text=ex._help_text, obj=ex.obj)
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
def get_groups(self) -> list[Group]:
return self.groups
def get_magic_vars(self) -> dict[str, t.Any]:
results: dict[str, t.Any] = dict(
inventory_hostname=self.name,
)
# FUTURE: these values should be dynamically calculated on access ala the rest of magic vars
if patterns['ipv4'].match(self.name) or patterns['ipv6'].match(self.name):
results['inventory_hostname_short'] = self.name
else:
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([g.name for g in self.get_groups() if g.name != 'all'])
return results
def get_vars(self) -> dict[str, t.Any]:
return combine_vars(self.vars, self.get_magic_vars())
| Host |
python | pytest-dev__pytest | src/_pytest/terminal.py | {
"start": 61369,
"end": 64432
} | class ____:
"""Terminal progress reporting plugin using OSC 9;4 ANSI sequences.
Emits OSC 9;4 sequences to indicate test progress to terminal
tabs/windows/etc.
Not all terminal emulators support this feature.
Ref: https://conemu.github.io/en/AnsiEscapeCodes.html#ConEmu_specific_OSC
"""
def __init__(self, tr: TerminalReporter) -> None:
self._tr = tr
self._session: Session | None = None
self._has_failures = False
def _emit_progress(
self,
state: Literal["remove", "normal", "error", "indeterminate", "paused"],
progress: int | None = None,
) -> None:
"""Emit OSC 9;4 sequence for indicating progress to the terminal.
:param state:
Progress state to set.
:param progress:
Progress value 0-100. Required for "normal", optional for "error"
and "paused", otherwise ignored.
"""
assert progress is None or 0 <= progress <= 100
# OSC 9;4 sequence: ESC ] 9 ; 4 ; state ; progress ST
# ST can be ESC \ or BEL. ESC \ seems better supported.
match state:
case "remove":
sequence = "\x1b]9;4;0;\x1b\\"
case "normal":
assert progress is not None
sequence = f"\x1b]9;4;1;{progress}\x1b\\"
case "error":
if progress is not None:
sequence = f"\x1b]9;4;2;{progress}\x1b\\"
else:
sequence = "\x1b]9;4;2;\x1b\\"
case "indeterminate":
sequence = "\x1b]9;4;3;\x1b\\"
case "paused":
if progress is not None:
sequence = f"\x1b]9;4;4;{progress}\x1b\\"
else:
sequence = "\x1b]9;4;4;\x1b\\"
self._tr.write_raw(sequence, flush=True)
@hookimpl
def pytest_sessionstart(self, session: Session) -> None:
self._session = session
# Show indeterminate progress during collection.
self._emit_progress("indeterminate")
@hookimpl
def pytest_collection_finish(self) -> None:
assert self._session is not None
if self._session.testscollected > 0:
# Switch from indeterminate to 0% progress.
self._emit_progress("normal", 0)
@hookimpl
def pytest_runtest_logreport(self, report: TestReport) -> None:
if report.failed:
self._has_failures = True
# Let's consider the "call" phase for progress.
if report.when != "call":
return
# Calculate and emit progress.
assert self._session is not None
collected = self._session.testscollected
if collected > 0:
reported = self._tr.reported_progress
progress = min(reported * 100 // collected, 100)
self._emit_progress("error" if self._has_failures else "normal", progress)
@hookimpl
def pytest_sessionfinish(self) -> None:
self._emit_progress("remove")
| TerminalProgressPlugin |
python | gabrielfalcao__HTTPretty | httpretty/core.py | {
"start": 65117,
"end": 69616
} | class ____(object):
"""`context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ for enabling HTTPretty.
.. tip:: Also available under the alias :py:func:`httpretty.enabled`
.. testcode::
import json
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body=json.dumps({'origin': '42.42.42.42'}))
with httpretty.enabled():
response = requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
assert response.json() == {'origin': '42.42.42.42'}
"""
def __init__(self, allow_net_connect=True, verbose=False):
self.allow_net_connect = allow_net_connect
self.verbose = verbose
def __enter__(self):
httpretty.reset()
httpretty.enable(allow_net_connect=self.allow_net_connect, verbose=self.verbose)
def __exit__(self, exc_type, exc_value, db):
httpretty.disable()
httpretty.reset()
def httprettified(test=None, allow_net_connect=True, verbose=False):
"""decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
"""
def decorate_unittest_TestCase_setUp(klass):
# Prefer addCleanup (added in python 2.7), but fall back
# to using tearDown if it isn't available
use_addCleanup = hasattr(klass, 'addCleanup')
original_setUp = (klass.setUp
if hasattr(klass, 'setUp')
else None)
def new_setUp(self):
httpretty.reset()
httpretty.enable(allow_net_connect, verbose=verbose)
if use_addCleanup:
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
klass.setUp = new_setUp
if not use_addCleanup:
original_tearDown = (klass.setUp
if hasattr(klass, 'tearDown')
else None)
def new_tearDown(self):
httpretty.disable()
httpretty.reset()
if original_tearDown:
original_tearDown(self)
klass.tearDown = new_tearDown
return klass
def decorate_test_methods(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def is_unittest_TestCase(klass):
try:
import unittest
return issubclass(klass, unittest.TestCase)
except ImportError:
return False
def decorate_class(klass):
if is_unittest_TestCase(klass):
return decorate_unittest_TestCase_setUp(klass)
return decorate_test_methods(klass)
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
with httprettized(allow_net_connect):
return test(*args, **kw)
return wrapper
if isinstance(test, type):
return decorate_class(test)
elif callable(test):
return decorate_callable(test)
return decorate_callable
| httprettized |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 17418,
"end": 18093
} | class ____(Instruction):
def __init__(self, parent, val, ptr, ordering, align):
super(StoreAtomicInstr, self).__init__(parent, types.VoidType(),
"store atomic", [val, ptr])
self.ordering = ordering
self.align = align
def descr(self, buf):
val, ptr = self.operands
buf.append("store atomic {0} {1}, {2} {3} {4}, align {5}{6}\n".format(
val.type,
val.get_reference(),
ptr.type,
ptr.get_reference(),
self.ordering,
self.align,
self._stringify_metadata(leading_comma=True),
))
| StoreAtomicInstr |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/translate.py | {
"start": 2397,
"end": 6082
} | class ____(GoogleCloudBaseOperator):
"""
Translate a string or list of strings.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTranslateTextOperator`
See https://cloud.google.com/translate/docs/translating-text
Execute method returns str or list.
This is a list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not all will be present in all cases):
* ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary is set as the XCom return value.
:param values: String or list of strings to translate.
:param target_language: The language to translate results into. This is required by the API.
:param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML.
:param source_language: (Optional) The language of the text to be translated.
:param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``.
:param impersonation_chain: Optional service account to impersonate using short-term credentials, or
chained list of accounts required to get the access_token of the last account in the list, which
will be impersonated in the request. If set as a string, the account must grant the originating
account the Service Account Token Creator IAM role. If set as a sequence, the identities from
the list must grant Service Account Token Creator IAM role to the directly preceding identity,
with the first account from the list granting this role to the originating account (templated).
"""
# [START translate_template_fields]
template_fields: Sequence[str] = (
"values",
"target_language",
"format_",
"source_language",
"model",
"gcp_conn_id",
"impersonation_chain",
)
# [END translate_template_fields]
def __init__(
self,
*,
values: list[str] | str,
target_language: str,
format_: str,
source_language: str | None,
model: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.values = values
self.target_language = target_language
self.format_ = format_
self.source_language = source_language
self.model = model
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudTranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
translation = hook.translate(
values=self.values,
target_language=self.target_language,
format_=self.format_,
source_language=self.source_language,
model=self.model,
)
self.log.debug("Translation %s", translation)
return translation
except ValueError as e:
self.log.error("An error has been thrown from translate method:")
self.log.error(e)
raise AirflowException(e)
| CloudTranslateTextOperator |
python | pytorch__pytorch | torch/_library/fake_class_registry.py | {
"start": 2867,
"end": 2994
} | class ____(Protocol):
@classmethod
def from_real(cls, real_obj: torch.ScriptObject):
pass
| HasStaticMethodFromReal |
python | django__django | django/http/response.py | {
"start": 24970,
"end": 26220
} | class ____(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before ECMAScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be a json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(
self,
data,
encoder=DjangoJSONEncoder,
safe=True,
json_dumps_params=None,
**kwargs,
):
if safe and not isinstance(data, dict):
raise TypeError(
"In order to allow non-dict objects to be serialized set the "
"safe parameter to False."
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault("content_type", "application/json")
data = json.dumps(data, cls=encoder, **json_dumps_params)
super().__init__(content=data, **kwargs)
| JsonResponse |
python | pytorch__pytorch | torch/_inductor/wrapper_benchmark.py | {
"start": 5256,
"end": 16431
} | class ____:
category: str
key: str
self_device_time_ms: float
# the benchmark is run multiple times and we average the count across all the
# runs. It should be an integer but define a float just in case.
count: float
def parse_profile_event_list(
benchmark_name: str,
event_list: torch.autograd.profiler_util.EventList,
wall_time_ms: float,
nruns: int,
device_name: str,
) -> None:
"""
Parse and generate a report for an event_list.
"""
def get_self_device_time(
ev: torch.autograd.profiler_util.EventList,
) -> float:
"""
ev.self_device_time_total is in microsecond. Convert to millisecond.
"""
return ev.self_device_time_total / 1000 / nruns # type: ignore[attr-defined]
all_events: dict[str, list[ProfileEvent]] = defaultdict(list)
def add_event(
ev: torch.autograd.profiler_util.EventList,
category: str,
) -> None:
profile_ev = ProfileEvent(
category=category,
key=ev.key, # type: ignore[attr-defined]
self_device_time_ms=get_self_device_time(ev),
count=ev.count / nruns, # type: ignore[operator] # average across all runs
)
all_events[category].append(profile_ev)
for ev in event_list:
assert not ev.is_legacy, "Don't support the legacy profiler"
if ev.device_type == DeviceType.CPU:
# ignore the event on CPU side
continue
category = "unknown"
if ev.key.startswith("triton_"):
if ev.key.startswith("triton_poi"):
category = "triton_pointwise"
elif ev.key.startswith("triton_red"):
category = "triton_reduction"
elif ev.key.startswith("triton_per"):
category = "triton_persistent_reduction"
else:
category = "triton_unknown"
add_event(ev, category)
def report_category(category: str, profile_events: list[ProfileEvent]) -> float:
if not device_name:
return 0.0
from tabulate import tabulate
profile_events.sort(key=lambda ev: ev.self_device_time_ms, reverse=True)
rows = []
total_time = 0.0
print(f"\n == {category} category kernels == ")
for ev in profile_events:
total_time += ev.self_device_time_ms
percent = f"{ev.self_device_time_ms / wall_time_ms * 100:.2f}%"
rows.append([ev.key[:120], ev.self_device_time_ms, ev.count, percent])
rows.append(
["Total", total_time, "", f"{total_time / wall_time_ms * 100:.2f}%"]
)
print(
tabulate(
rows,
headers=[
"Kernel",
f"Self {device_name.upper()} TIME (ms)",
"Count",
"Percent",
],
)
)
return total_time
def report() -> None:
category_list = [
"triton_pointwise",
"triton_reduction",
"triton_persistent_reduction",
"triton_unknown",
"unknown",
]
assert OrderedSet(all_events.keys()).issubset(OrderedSet(category_list)), (
f"{list(all_events.keys())}"
)
per_category_wall_time = {}
total_device_ms = 0.0
for category in category_list:
if category in all_events:
_time = report_category(category, all_events[category])
per_category_wall_time[category] = _time
total_device_ms += _time
device_busy_percent = f"{total_device_ms / wall_time_ms * 100:.2f}%"
if device_name:
print(
f"\nPercent of time when {device_name.upper()} is busy: {device_busy_percent}"
)
else:
print("No device detected")
print(f"Total wall time {wall_time_ms:.3f} ms")
# output such a line so we can gather such line from all compiled modules from all
# benchmarks and tabulate it!
# Columns: benchmark_name, pointwise_percent, reduction_percent, persistent_reduction_percent,
# unknown_category_percent, device_busy_percent, wall_time_ms
tabulate_line = f"Output for tabulate: {benchmark_name}"
for category in category_list:
percent = (
f"{per_category_wall_time.get(category, 0.0) / wall_time_ms * 100:.2f}%"
)
tabulate_line += f", {percent}"
tabulate_line += f", {device_busy_percent}, {wall_time_ms:.3f}ms"
print(tabulate_line)
report()
PROFILE_DIR = tempfile.gettempdir()
PROFILE_PATH = f"{PROFILE_DIR}/compiled_module_profile.json"
def perf_profile(
wall_time_ms: float,
times: int,
repeat: int,
benchmark_name: str,
benchmark_compiled_module_fn: BenchmarkCallableType,
) -> None:
with torch.profiler.profile(record_shapes=True) as p:
benchmark_compiled_module_fn(times=times, repeat=repeat)
path = PROFILE_PATH
p.export_chrome_trace(path)
print(f"Profiling result for a compiled module of benchmark {benchmark_name}:")
print(f"Chrome trace for the profile is written to {path}")
event_list = p.key_averages(group_by_input_shape=True)
print(event_list.table(sort_by="self_device_time_total", row_limit=10))
parse_profile_event_list(
benchmark_name, event_list, wall_time_ms, times * repeat, p.use_device or ""
)
def ncu_analyzer(
benchmark_name: str,
benchmark_compiled_module_fn: BenchmarkCallableType,
args: argparse.Namespace,
) -> None:
import inspect
import os
import subprocess
kernel_regex = args.ncu_kernel_regex
metrics = args.ncu_metrics
module_file = inspect.getfile(benchmark_compiled_module_fn)
module_dir = os.path.dirname(module_file)
module_name = os.path.splitext(os.path.basename(module_file))[0]
ncu_dir = tempfile.gettempdir()
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
ncu_output = os.path.join(ncu_dir, f"ncu_output_{timestamp}.ncu-rep")
python_cmd = (
f"""import sys; sys.path.insert(0, '{module_dir}'); """
f"""from {module_name} import benchmark_compiled_module; """
"""benchmark_compiled_module(times=1, repeat=1)"""
)
ncu_cmd = [
"ncu",
"--target-processes",
"all",
"--replay-mode",
"kernel",
"--kernel-name-base",
"function",
"--print-units",
"base",
"--import-source",
"yes",
"--force-overwrite",
"--export",
ncu_output,
]
if kernel_regex:
ncu_cmd.extend(["--kernel-name", f"regex:{kernel_regex}"])
if metrics:
ncu_cmd.extend(["--metrics", metrics])
else:
ncu_cmd.extend(["--set", "full"])
ncu_cmd.extend(
[
"python",
"-c",
python_cmd,
]
)
try:
subprocess.run(ncu_cmd, check=True)
print(f"\nNCU profiling results for benchmark {benchmark_name}:")
print(f"NCU report has been written to {ncu_output}")
except subprocess.CalledProcessError as e:
print(f"NCU profiling failed with error: {e}")
return
def collect_memory_snapshot(
benchmark_compiled_module_fn: BenchmarkCallableType,
) -> None:
assert torch.cuda.is_available()
torch.cuda.memory._record_memory_history(max_entries=100000)
benchmark_compiled_module_fn(times=10, repeat=1) # run 10 times
snapshot_path = f"{tempfile.gettempdir()}/memory_snapshot.pickle"
torch.cuda.memory._dump_snapshot(snapshot_path)
torch.cuda.memory._record_memory_history(enabled=None)
print(f"The collect memory snapshot has been written to {snapshot_path}")
# With AOTAutograd cache, we directly call the compiled module. So prevent
# Dynamo from reentering
@torch.compiler.disable # type: ignore[misc]
def compiled_module_main(
benchmark_name: str, benchmark_compiled_module_fn: BenchmarkCallableType
) -> None:
"""
This is the function called in __main__ block of a compiled module.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--benchmark-kernels",
"-k",
action="store_true",
help="Whether to benchmark each individual kernels",
)
parser.add_argument(
"--benchmark-all-configs",
"-c",
action="store_true",
help="Whether to benchmark each individual config for a kernel",
)
parser.add_argument(
"--profile",
"-p",
action="store_true",
help="Whether to profile the compiled module",
)
parser.add_argument(
"--cuda-memory-snapshot",
action="store_true",
help="""
Whether to collect CUDA memory snapshot. Refer to
"https://pytorch.org/blog/understanding-gpu-memory-1/
for details about how to visualize the collected snapshot
""",
)
parser.add_argument(
"--ncu",
action="store_true",
help="Whether to run ncu analysis",
)
parser.add_argument(
"--ncu-kernel-regex",
type=str,
default=None,
help=(
"Filter kernels profiled by NCU using a regex (e.g., '^triton_.*'). "
"Maps to '--kernel-name regex:<regex>'. "
"If None, NCU will profile all kernels."
),
)
parser.add_argument(
"--ncu-metrics",
type=str,
default=None,
help=(
"Comma-separated list of NCU metrics to collect (e.g., 'dram__bytes.sum.per_second'). "
"If None, NCU will use '--set full'."
),
)
parser.add_argument(
"--times",
type=int,
default=10,
help="Number of times to run each benchmark iteration",
)
parser.add_argument(
"--repeat",
type=int,
default=10,
help="Number of repetitions of each benchmark run",
)
args = parser.parse_args()
if args.benchmark_kernels:
benchmark_all_kernels(benchmark_name, args.benchmark_all_configs)
else:
times = args.times
repeat = args.repeat
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
wall_time_ms = benchmark_compiled_module_fn(times=times, repeat=repeat) * 1000
if torch.cuda.is_available():
peak_mem = torch.cuda.max_memory_allocated()
print(f"Peak GPU memory usage {peak_mem / 1e6:.3f} MB")
if torch.cuda.is_available() and args.cuda_memory_snapshot:
collect_memory_snapshot(benchmark_compiled_module_fn)
if args.profile:
perf_profile(
wall_time_ms,
times,
repeat,
benchmark_name,
benchmark_compiled_module_fn,
)
if args.ncu:
ncu_analyzer(
benchmark_name,
benchmark_compiled_module_fn,
args=args,
)
| ProfileEvent |
python | openai__openai-python | src/openai/resources/conversations/items.py | {
"start": 1217,
"end": 11525
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ItemsWithStreamingResponse(self)
def create(
self,
conversation_id: str,
*,
items: Iterable[ResponseInputItemParam],
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItemList:
"""
Create items in a conversation with the given ID.
Args:
items: The items to add to the conversation. You may add up to 20 items at a time.
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}/items",
body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
),
cast_to=ConversationItemList,
)
def retrieve(
self,
item_id: str,
*,
conversation_id: str,
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItem:
"""
Get a single item from a conversation with the given IDs.
Args:
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return cast(
ConversationItem,
self._get(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
)
def list(
self,
conversation_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ConversationItem]:
"""
List all items for a conversation with the given ID.
Args:
after: An item ID to list items after, used in pagination.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
f"/conversations/{conversation_id}/items",
page=SyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
item_list_params.ItemListParams,
),
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
def delete(
self,
item_id: str,
*,
conversation_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Delete an item from a conversation with the given IDs.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return self._delete(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
| Items |
python | python-poetry__poetry | src/poetry/installation/chooser.py | {
"start": 814,
"end": 13930
} | class ____:
"""
A Chooser chooses an appropriate release archive for packages.
"""
def __init__(
self, pool: RepositoryPool, env: Env, config: Config | None = None
) -> None:
self._pool = pool
self._env = env
self._config = config or Config.create()
self._no_binary_policy: PackageFilterPolicy = PackageFilterPolicy(
self._config.get("installer.no-binary", [])
)
self._only_binary_policy: PackageFilterPolicy = PackageFilterPolicy(
self._config.get("installer.only-binary", [])
)
def choose_for(self, package: Package) -> Link:
"""
Return the url of the selected archive for a given package.
"""
links = []
# these are used only for providing insightful errors to the user
unsupported_wheels = set()
links_seen = 0
wheels_skipped = 0
sdists_skipped = 0
for link in self._get_links(package):
links_seen += 1
if link.is_wheel:
if (
# exact package name must reject wheel, even if `only-binary` includes it
self._no_binary_policy.has_exact_package(package.name)
# `:all:` reject wheel only if `only-binary` does not include it
or (
not self._no_binary_policy.allows(package.name)
and not self._only_binary_policy.has_exact_package(package.name)
)
):
logger.debug(
"Skipping wheel for %s as requested in no binary policy for"
" package (%s)",
link.filename,
package.name,
)
wheels_skipped += 1
continue
if not Wheel(link.filename).is_supported_by_environment(self._env):
logger.debug(
"Skipping wheel %s as this is not supported by the current"
" environment",
link.filename,
)
unsupported_wheels.add(link.filename)
continue
if link.ext in {".egg", ".exe", ".msi", ".rpm", ".srpm"}:
logger.debug("Skipping unsupported distribution %s", link.filename)
continue
if link.is_sdist and (
# exact package name must reject sdist, even if `no-binary` includes it
self._only_binary_policy.has_exact_package(package.name)
# `:all:` reject sdist only if `no-binary` does not include it
or (
not self._only_binary_policy.allows(package.name)
and not self._no_binary_policy.has_exact_package(package.name)
)
):
logger.debug(
"Skipping source distribution for %s as requested in only binary policy for"
" package (%s)",
link.filename,
package.name,
)
sdists_skipped += 1
continue
links.append(link)
if not links:
raise self._no_links_found_error(
package, links_seen, wheels_skipped, sdists_skipped, unsupported_wheels
)
# Get the best link
chosen = max(links, key=lambda link: self._sort_key(package, link))
return chosen
def _no_links_found_error(
self,
package: Package,
links_seen: int,
wheels_skipped: int,
sdists_skipped: int,
unsupported_wheels: set[str],
) -> PoetryRuntimeError:
messages = []
info = (
f"This is likely not a Poetry issue.\n\n"
f" - {links_seen} candidate(s) were identified for the package\n"
)
if wheels_skipped > 0:
info += f" - {wheels_skipped} wheel(s) were skipped due to your <c1>installer.no-binary</> policy\n"
if sdists_skipped > 0:
info += f" - {sdists_skipped} source distribution(s) were skipped due to your <c1>installer.only-binary</> policy\n"
if unsupported_wheels:
info += (
f" - {len(unsupported_wheels)} wheel(s) were skipped as your project's environment does not support "
f"the identified abi tags\n"
)
messages.append(ConsoleMessage(info.strip()))
if unsupported_wheels:
messages += [
ConsoleMessage(
"The following wheel(s) were skipped as the current project environment does not support them "
"due to abi compatibility issues.",
debug=True,
),
ConsoleMessage("\n".join(unsupported_wheels), debug=True)
.indent(" - ")
.wrap("warning"),
ConsoleMessage(
"If you would like to see the supported tags in your project environment, you can execute "
"the following command:\n\n"
" <c1>poetry debug tags</>",
debug=True,
),
]
source_hint = ""
if package.source_type and package.source_reference:
source_hint += f" ({package.source_reference})"
messages.append(
ConsoleMessage(
f"Make sure the lockfile is up-to-date. You can try one of the following;\n\n"
f" 1. <b>Regenerate lockfile: </><fg=yellow>poetry lock --no-cache --regenerate</>\n"
f" 2. <b>Update package : </><fg=yellow>poetry update --no-cache {package.name}</>\n\n"
# FIXME: In the future, it would be better to suggest a more targeted
# cache clear command for just the package in question. E.g.
# `poetry cache clear {package.source_reference}:{package.name}:{package.version}`
# but `package.source_reference` currently resolves to `None` because
# repository names are case sensitive at the moment (`PyPI` vs `pypi`).
f"If any of those solutions worked, you will have to clear your caches using (<c1>poetry cache clear --all</>).\n\n"
f"If neither works, please first check to verify that the {package.name} has published wheels "
f"available from your configured source{source_hint} that are compatible with your environment"
f"- ie. operating system, architecture (x86_64, arm64 etc.), python interpreter."
)
.make_section("Solutions")
.wrap("info")
)
return PoetryRuntimeError(
reason=f"Unable to find installation candidates for {package}",
messages=messages,
)
def _get_links(self, package: Package) -> list[Link]:
if package.source_type:
assert package.source_reference is not None
repository = self._pool.repository(package.source_reference)
elif not self._pool.has_repository("pypi"):
repository = self._pool.repositories[0]
else:
repository = self._pool.repository("pypi")
links = repository.find_links_for_package(package)
locked_hashes = {f["hash"] for f in package.files}
if not locked_hashes:
return links
selected_links = []
skipped = []
locked_hash_names = {h.split(":")[0] for h in locked_hashes}
for link in links:
if not link.hashes:
selected_links.append(link)
continue
link_hash: str | None = None
if (candidates := locked_hash_names.intersection(link.hashes.keys())) and (
hash_name := get_highest_priority_hash_type(candidates, link.filename)
):
link_hash = f"{hash_name}:{link.hashes[hash_name]}"
elif isinstance(repository, HTTPRepository):
link_hash = repository.calculate_sha256(link)
if link_hash not in locked_hashes:
skipped.append((link.filename, link_hash))
logger.debug(
"Skipping %s as %s checksum does not match expected value",
link.filename,
link_hash,
)
continue
selected_links.append(link)
if links and not selected_links:
reason = f"Downloaded distributions for <b>{package.pretty_name} ({package.pretty_version})</> did not match any known checksums in your lock file."
link_hashes = "\n".join(f" - {link}({h})" for link, h in skipped)
known_hashes = "\n".join(f" - {h}" for h in locked_hashes)
messages = [
ConsoleMessage(
"<options=bold>Causes:</>\n"
" - invalid or corrupt cache either during locking or installation\n"
" - network interruptions or errors causing corrupted downloads\n\n"
"<b>Solutions:</>\n"
" 1. Try running your command again using the <c1>--no-cache</> global option enabled.\n"
" 2. Try regenerating your lock file using (<c1>poetry lock --no-cache --regenerate</>).\n\n"
"If any of those solutions worked, you will have to clear your caches using (<c1>poetry cache clear --all CACHE_NAME</>)."
),
ConsoleMessage(
f"Poetry retrieved the following links:\n"
f"{link_hashes}\n\n"
f"The lockfile contained only the following hashes:\n"
f"{known_hashes}",
debug=True,
),
]
raise PoetryRuntimeError(reason, messages)
return selected_links
def _sort_key(
self, package: Package, link: Link
) -> tuple[int, int, int, Version, tuple[Any, ...], int]:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
build_tag: tuple[Any, ...] = ()
binary_preference = 0
if link.is_wheel:
wheel = Wheel(link.filename)
if not wheel.is_supported_by_environment(self._env):
raise RuntimeError(
f"{wheel.filename} is not a supported wheel for this platform. It "
"can't be sorted."
)
# TODO: Binary preference
pri = -(wheel.get_minimum_supported_index(self._env.supported_tags) or 0)
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
if not match:
raise ValueError(f"Unable to parse build tag: {wheel.build_tag}")
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
support_num = len(self._env.supported_tags)
pri = -support_num
has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package))
yank_value = int(not link.yanked)
return (
has_allowed_hash,
yank_value,
binary_preference,
package.version,
build_tag,
pri,
)
def _is_link_hash_allowed_for_package(self, link: Link, package: Package) -> bool:
if not link.hashes:
return True
link_hashes = {f"{name}:{h}" for name, h in link.hashes.items()}
locked_hashes = {f["hash"] for f in package.files}
return bool(link_hashes & locked_hashes)
| Chooser |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 50626,
"end": 51910
} | class ____(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _logpdf(self, x):
c = np.cos(x)
return xpx.apply_where(c != -1, c,
lambda c: np.log1p(c) - np.log(2*np.pi),
fill_value=-np.inf)
def _cdf(self, x):
return scu._cosine_cdf(x)
def _sf(self, x):
return scu._cosine_cdf(-x)
def _ppf(self, p):
return scu._cosine_invcdf(p)
def _isf(self, p):
return -scu._cosine_invcdf(p)
def _stats(self):
v = (np.pi * np.pi / 3.0) - 2.0
k = -6.0 * (np.pi**4 - 90) / (5.0 * (np.pi * np.pi - 6)**2)
return 0.0, v, 0.0, k
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
| cosine_gen |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/perceptron.py | {
"start": 491,
"end": 2679
} | class ____():
"""The Perceptron. One layer neural network classifier.
Parameters:
-----------
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
activation_function: class
The activation that shall be used for each neuron.
Possible choices: Sigmoid, ExpLU, ReLU, LeakyReLU, SoftPlus, TanH
loss: class
The loss function used to assess the model's performance.
Possible choices: SquareLoss, CrossEntropy
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, n_iterations=20000, activation_function=Sigmoid, loss=SquareLoss, learning_rate=0.01):
self.n_iterations = n_iterations
self.learning_rate = learning_rate
self.loss = loss()
self.activation_func = activation_function()
self.progressbar = progressbar.ProgressBar(widgets=bar_widgets)
def fit(self, X, y):
n_samples, n_features = np.shape(X)
_, n_outputs = np.shape(y)
# Initialize weights between [-1/sqrt(N), 1/sqrt(N)]
limit = 1 / math.sqrt(n_features)
self.W = np.random.uniform(-limit, limit, (n_features, n_outputs))
self.w0 = np.zeros((1, n_outputs))
for i in self.progressbar(range(self.n_iterations)):
# Calculate outputs
linear_output = X.dot(self.W) + self.w0
y_pred = self.activation_func(linear_output)
# Calculate the loss gradient w.r.t the input of the activation function
error_gradient = self.loss.gradient(y, y_pred) * self.activation_func.gradient(linear_output)
# Calculate the gradient of the loss with respect to each weight
grad_wrt_w = X.T.dot(error_gradient)
grad_wrt_w0 = np.sum(error_gradient, axis=0, keepdims=True)
# Update weights
self.W -= self.learning_rate * grad_wrt_w
self.w0 -= self.learning_rate * grad_wrt_w0
# Use the trained model to predict labels of X
def predict(self, X):
y_pred = self.activation_func(X.dot(self.W) + self.w0)
return y_pred
| Perceptron |
python | numba__numba | numba/cuda/tests/cudapy/test_overload.py | {
"start": 3892,
"end": 8251
} | class ____(CUDATestCase):
def check_overload(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
cuda.jit(kernel)[1, 1](x)
self.assertEqual(x[0], expected)
def check_overload_cpu(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
njit(kernel)(x)
self.assertEqual(x[0], expected)
def test_generic(self):
def kernel(x):
generic_func_1(x)
expected = GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda(self):
def kernel(x):
cuda_func_1(x)
expected = CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_and_cuda(self):
def kernel(x):
generic_func_1(x)
cuda_func_1(x)
expected = GENERIC_FUNCTION_1 * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_two_generic_calls(self):
def kernel(x):
generic_func_1(x)
generic_func_2(x)
expected = GENERIC_FUNCTION_1 * GENERIC_FUNCTION_2
self.check_overload(kernel, expected)
def test_call_two_cuda_calls(self):
def kernel(x):
cuda_func_1(x)
cuda_func_2(x)
expected = CUDA_FUNCTION_1 * CUDA_FUNCTION_2
self.check_overload(kernel, expected)
def test_generic_calls_generic(self):
def kernel(x):
generic_calls_generic(x)
expected = GENERIC_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_calls_cuda(self):
def kernel(x):
generic_calls_cuda(x)
expected = GENERIC_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_generic(self):
def kernel(x):
cuda_calls_generic(x)
expected = CUDA_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_cuda(self):
def kernel(x):
cuda_calls_cuda(x)
expected = CUDA_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_target_overloaded(self):
def kernel(x):
target_overloaded(x)
expected = CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_generic_calls_target_overloaded(self):
def kernel(x):
generic_calls_target_overloaded(x)
expected = GENERIC_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_cuda_calls_target_overloaded(self):
def kernel(x):
cuda_calls_target_overloaded(x)
expected = CUDA_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_target_overloaded_calls_target_overloaded(self):
def kernel(x):
target_overloaded_calls_target_overloaded(x)
# Check the CUDA overloads are used on CUDA
expected = CUDA_TARGET_OL_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
# Also check that the CPU overloads are used on the CPU
expected = GENERIC_TARGET_OL_CALLS_TARGET_OL * GENERIC_TARGET_OL
self.check_overload_cpu(kernel, expected)
def test_overload_attribute_target(self):
MyDummy, MyDummyType = self.make_dummy_type()
mydummy_type = typeof(MyDummy())
@overload_attribute(MyDummyType, 'cuda_only', target='cuda')
def ov_dummy_cuda_attr(obj):
def imp(obj):
return 42
return imp
# Ensure that we cannot use the CUDA target-specific attribute on the
# CPU, and that an appropriate typing error is raised
with self.assertRaisesRegex(TypingError,
"Unknown attribute 'cuda_only'"):
@njit(types.int64(mydummy_type))
def illegal_target_attr_use(x):
return x.cuda_only
# Ensure that the CUDA target-specific attribute is usable and works
# correctly when the target is CUDA - note eager compilation via
# signature
@cuda.jit(types.void(types.int64[::1], mydummy_type))
def cuda_target_attr_use(res, dummy):
res[0] = dummy.cuda_only
if __name__ == '__main__':
unittest.main()
| TestOverload |
python | doocs__leetcode | solution/2300-2399/2334.Subarray With Elements Greater Than Varying Threshold/Solution2.py | {
"start": 0,
"end": 734
} | class ____:
def validSubarraySize(self, nums: List[int], threshold: int) -> int:
n = len(nums)
left = [-1] * n
right = [n] * n
stk = []
for i, v in enumerate(nums):
while stk and nums[stk[-1]] >= v:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and nums[stk[-1]] >= nums[i]:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
for i, v in enumerate(nums):
k = right[i] - left[i] - 1
if v > threshold // k:
return k
return -1
| Solution |
python | PyCQA__pylint | tests/functional/n/no/no_member.py | {
"start": 1054,
"end": 1245
} | class ____(Base):
label = "I exist!"
print(Derived.label)
# Regression test for https://github.com/pylint-dev/pylint/issues/5832
starter_path = Path(__file__).parents[3].resolve()
| Derived |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 47653,
"end": 52640
} | class ____(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {
"concat_pos": True,
"max_resolution": (224, 224),
"num_bands": 64,
"sine_only": False,
}
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="pixels",
spatial_downsample=1,
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
>>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for image classification, for tasks such as ImageNet.
This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy
of 82.1 on ImageNet.
[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
(with `prep_type="conv"`) to preprocess the input images, and
[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
[`PerceiverModel`] into classification logits.
"""
)
| PerceiverForImageClassificationFourier |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py | {
"start": 1415,
"end": 1627
} | class ____:
""" __getnewargs_ex__ returns tuple with incorrect arg length"""
def __getnewargs_ex__(self): # [invalid-getnewargs-ex-returned]
return (tuple(1), dict(x="y"), 1)
| SecondBadGetNewArgsEx |
python | pennersr__django-allauth | allauth/socialaccount/providers/google/views.py | {
"start": 2033,
"end": 3961
} | class ____(OAuth2Adapter):
provider_id = "google"
access_token_url = ACCESS_TOKEN_URL
authorize_url = AUTHORIZE_URL
id_token_issuer = ID_TOKEN_ISSUER
identity_url = IDENTITY_URL
fetch_userinfo = FETCH_USERINFO
def complete_login(self, request, app, token, response, **kwargs):
data = None
id_token = response.get("id_token")
if id_token:
data = self._decode_id_token(app, id_token)
if self.fetch_userinfo and "picture" not in data:
info = self._fetch_user_info(token.token)
picture = info.get("picture")
if picture:
data["picture"] = picture
else:
data = self._fetch_user_info(token.token)
login = self.get_provider().sociallogin_from_response(request, data)
return login
def _decode_id_token(self, app, id_token):
"""
If the token was received by direct communication protected by
TLS between this library and Google, we are allowed to skip checking the
token signature according to the OpenID Connect Core 1.0 specification.
https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
"""
verify_signature = not self.did_fetch_access_token
return _verify_and_decode(app, id_token, verify_signature=verify_signature)
def _fetch_user_info(self, access_token):
resp = (
get_adapter()
.get_requests_session()
.get(
self.identity_url,
headers={"Authorization": "Bearer {}".format(access_token)},
)
)
if not resp.ok:
raise OAuth2Error("Request to user info failed")
return resp.json()
oauth2_login = OAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
| GoogleOAuth2Adapter |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 116425,
"end": 117310
} | class ____(MaybeAlignPartitions):
_parameters = ["frame", "func", "meta", "kwargs"]
enforce_metadata = False
def __str__(self):
return f"UFunc({funcname(self.func)})"
@functools.cached_property
def args(self):
return self.operands[len(self._parameters) :]
@functools.cached_property
def _dfs(self):
return [df for df in self.args if isinstance(df, Expr)]
@functools.cached_property
def _meta(self):
if self.operand("meta") is not no_default:
return self.operand("meta")
return _get_meta_ufunc(self._dfs, self.args, self.func)
def _lower(self):
args = maybe_align_partitions(*self.args, divisions=self._divisions())
dfs = [x for x in args if isinstance(x, Expr) and x.ndim > 0]
return UFuncElemwise(dfs[0], self.func, self._meta, False, self.kwargs, *args)
| UFuncAlign |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_type_lookup.py | {
"start": 9179,
"end": 12048
} | class ____(dict[int, int]):
pass
def using_generic(instance: MyGeneric[T]) -> T:
return instance.arg
def using_concrete_generic(instance: MyGeneric[int]) -> int:
return instance.arg
def test_generic_origin_empty():
with pytest.raises(ResolutionFailed):
check_can_generate_examples(st.builds(using_generic))
def test_issue_2951_regression():
lines_strat = st.builds(Lines, lines=st.lists(st.text()))
prev_seq_int_repr = repr(st.from_type(Sequence[int]))
with temp_registered(Lines, lines_strat):
assert st.from_type(Lines) == lines_strat
# Now let's test that the strategy for ``Sequence[int]`` did not
# change just because we registered a strategy for ``Lines``:
assert repr(st.from_type(Sequence[int])) == prev_seq_int_repr
def test_issue_2951_regression_two_params():
map_strat = st.builds(SpecificDict, st.dictionaries(st.integers(), st.integers()))
expected = repr(st.from_type(dict[int, int]))
with temp_registered(SpecificDict, map_strat):
assert st.from_type(SpecificDict) == map_strat
assert expected == repr(st.from_type(dict[int, int]))
@pytest.mark.parametrize(
"generic",
(
Union[str, int],
str | int,
Sequence[Sequence[int]],
MyGeneric[str],
Callable[..., str],
Callable[[int], str],
),
ids=repr,
)
@pytest.mark.parametrize("strategy", [st.none(), lambda _: st.none()])
def test_generic_origin_with_type_args(generic, strategy):
with pytest.raises(InvalidArgument):
st.register_type_strategy(generic, strategy)
assert generic not in types._global_type_lookup
@pytest.mark.parametrize(
"generic",
(
Callable,
list,
Sequence,
# you can register types with all generic parameters
_List[T],
getattr(typing, "Sequence", None)[T], # pyupgrade workaround
list[T],
Sequence[T],
# User-defined generics should also work
MyGeneric,
MyGeneric[T],
),
)
def test_generic_origin_without_type_args(generic):
with temp_registered(generic, st.just("example")):
pass
@pytest.mark.parametrize(
"strat, type_",
[
(st.from_type, MyGeneric[T]),
(st.from_type, MyGeneric[int]),
(st.from_type, MyGeneric),
(st.builds, using_generic),
(st.builds, using_concrete_generic),
],
ids=get_pretty_function_description,
)
def test_generic_origin_from_type(strat, type_):
with temp_registered(MyGeneric, st.builds(MyGeneric)):
check_can_generate_examples(strat(type_))
def test_generic_origin_concrete_builds():
with temp_registered(MyGeneric, st.builds(MyGeneric, st.integers())):
assert_all_examples(
st.builds(using_generic), lambda example: isinstance(example, int)
)
| SpecificDict |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trends.py | {
"start": 1314,
"end": 1930
} | class ____(TypedDict):
aggregate_range_1: SelectType
aggregate_range_2: SelectType
count_range_1: SelectType
count_range_2: SelectType
t_test: SelectType
trend_percentage: SelectType
trend_difference: SelectType
count_percentage: SelectType
# This is to flip conditions between trend types
CORRESPONDENCE_MAP = {
">": "<",
">=": "<=",
"<": ">",
"<=": ">=",
"=": "=",
"!=": "!=",
}
IMPROVED = "improved"
REGRESSION = "regression"
TREND_TYPES = [IMPROVED, REGRESSION]
# TODO: move this to the builder file and introduce a top-events version instead
| TrendColumns |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 1846,
"end": 3022
} | class ____:
def test_has_errors(self):
result_with_errors = ValidationResult(
symbol_info=SymbolInfo(symbol_path="test.Class", file_path=Path("/test.py")),
errors=["Error 1", "Error 2"],
warnings=[],
)
result_without_errors = ValidationResult(
symbol_info=SymbolInfo(symbol_path="test.Class", file_path=Path("/test.py")),
errors=[],
warnings=["Warning 1"],
)
assert result_with_errors.has_errors() is True
assert result_without_errors.has_errors() is False
def test_has_warnings(self):
result_with_warnings = ValidationResult(
symbol_info=SymbolInfo(symbol_path="test.Class", file_path=Path("/test.py")),
errors=[],
warnings=["Warning 1"],
)
result_without_warnings = ValidationResult(
symbol_info=SymbolInfo(symbol_path="test.Class", file_path=Path("/test.py")),
errors=["Error 1"],
warnings=[],
)
assert result_with_warnings.has_warnings() is True
assert result_without_warnings.has_warnings() is False
| TestValidationResult |
python | wandb__wandb | tests/system_tests/test_launch/test_launch_kubernetes.py | {
"start": 8704,
"end": 12435
} | class ____:
def __init__(self, mock_api_client, pods):
# self.context = mock_api_client["context_name"]
self.pods = pods
self.namespaces = []
async def list_namespaced_pod(self, label_selector, namespace, **kwargs):
ret = []
k, v = label_selector.split("=")
if k == "job-name":
for pod in self.pods.items:
if pod.job_name == v:
ret.append(pod)
return MockPodList(ret)
async def create_namespace(self, body):
self.namespaces.append(body)
async def delete_namespace(self, name):
self.namespaces.remove(name)
def pods(job_name):
return MockPodList(
[
MockDict(
{
"metadata": MockDict(
{
"name": "pod1",
}
),
"job_name": job_name,
"log": "test log string",
}
)
]
)
def setup_mock_kubernetes_client(monkeypatch, jobs, pods, mock_job_base):
monkeypatch.setattr(
kubernetes_asyncio.client,
"BatchV1Api",
lambda api_client: MockBatchV1Api(api_client, jobs),
)
monkeypatch.setattr(
kubernetes_asyncio.client,
"CoreV1Api",
lambda api_client: MockCoreV1Api(api_client, pods),
)
monkeypatch.setattr(
kubernetes_asyncio.utils,
"create_from_dict",
lambda _, yaml_objects, namespace: mock_create_from_dict(
yaml_objects, jobs, mock_job_base
),
)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_monitor.LaunchKubernetesMonitor",
AsyncMock(),
)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.LaunchKubernetesMonitor",
AsyncMock(),
)
async def _mock_get_context_and_client(*args, **kwargs):
return None, None
monkeypatch.setattr(
kubernetes_monitor,
"get_kube_context_and_api_client",
_mock_get_context_and_client,
)
async def mock_create_from_dict(jobd, jobs_dict, mock_status):
name = jobd["metadata"].get("name")
if not name:
name = jobd["metadata"]["generateName"] + "testname"
jobd["metadata"]["name"] = name
metadata = MockDict(jobd["metadata"])
metadata.labels = metadata.get("labels", {})
metadata.labels["job-name"] = name # assign name
job_spec = MockDict(jobd["spec"])
job_spec.backoff_limit = job_spec.get("backoffLimit", 6) # kube defaults
job_spec.completions = job_spec.get("completions", 1)
job_spec.parallelism = job_spec.get("parallelism", 1)
job_spec.suspend = job_spec.get("suspend", False)
pod_spec = MockDict(jobd["spec"]["template"]["spec"])
pod_spec.restart_policy = pod_spec.get("restartPolicy", "Never")
pod_spec.preemption_policy = pod_spec.get(
"preemptionPolicy", "PreemptLowerPriority"
)
pod_spec.node_name = pod_spec.get("nodeName", None)
pod_spec.node_selector = pod_spec.get("nodeSelector", {})
pod_spec.containers = pod_spec.get("containers")
for i, cont in enumerate(pod_spec.containers):
pod_spec.containers[i] = MockDict(cont)
job_spec.template = MockDict(
{
"spec": pod_spec,
}
)
mock_job = MockDict(
{
"status": mock_status,
"spec": job_spec,
"metadata": MockDict(metadata),
}
)
jobs_dict[name] = mock_job
return [mock_job]
| MockCoreV1Api |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 14278,
"end": 15103
} | class ____:
"""Describes where an input from an AOTAutograd produced FX graph comes from"""
def expr(self) -> str:
raise NotImplementedError("Subclasses must implement expr()")
def is_param(self) -> bool:
"""True if this input is a parameter or derived from a parameter (e.g., subclass attr)"""
return False
def is_buffer(self) -> bool:
"""True if this input is a buffer or derived from a buffer (e.g., subclass attr)"""
return False
def is_tangent(self) -> bool:
"""True if this input is a tangent or derived from a tangent (e.g., subclass attr)"""
return False
# Note: Currently, our typing discipline for differentiable versus not is not
# very good, so feel free to rely on runtime tests instead.
@dataclasses.dataclass(frozen=True)
| AOTInput |
python | doocs__leetcode | solution/2200-2299/2208.Minimum Operations to Halve Array Sum/Solution.py | {
"start": 0,
"end": 318
} | class ____:
def halveArray(self, nums: List[int]) -> int:
s = sum(nums) / 2
pq = []
for x in nums:
heappush(pq, -x)
ans = 0
while s > 0:
t = -heappop(pq) / 2
s -= t
heappush(pq, -t)
ans += 1
return ans
| Solution |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial007_py39.py | {
"start": 170,
"end": 373
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: set[str] = set()
images: Union[list[Image], None] = None
| Item |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 23759,
"end": 24450
} | class ____(ProjectNotificationsMixin, GenericView):
http_method_names = ["post"]
success_message = _("Notification deleted")
def post(self, request, *args, **kwargs):
project = self.get_project()
try:
project.emailhook_notifications.get(
email=request.POST.get("email"),
).delete()
except EmailHook.DoesNotExist:
try:
project.webhook_notifications.get(
url=request.POST.get("email"),
).delete()
except WebHook.DoesNotExist:
raise Http404
return HttpResponseRedirect(self.get_success_url())
| ProjectNotificationsDelete |
python | scikit-learn__scikit-learn | examples/applications/plot_out_of_core_classification.py | {
"start": 1657,
"end": 13490
} | class ____(HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding="latin-1"):
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = "start_" + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = "end_" + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r"\s+", r" ", self.body)
self.docs.append(
{"title": self.title, "body": self.body, "topics": self.topics}
)
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.tar.gz"
ARCHIVE_SHA256 = "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30"
ARCHIVE_FILENAME = "reuters21578.tar.gz"
if data_path is None:
data_path = Path(get_data_home()) / "reuters"
else:
data_path = Path(data_path)
if not data_path.exists():
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" % data_path)
data_path.mkdir(parents=True, exist_ok=True)
def progress(blocknum, bs, size):
total_sz_mb = "%.2f MB" % (size / 1e6)
current_sz_mb = "%.2f MB" % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write("\rdownloaded %s / %s" % (current_sz_mb, total_sz_mb))
archive_path = data_path / ARCHIVE_FILENAME
urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress)
if _not_in_sphinx():
sys.stdout.write("\r")
# Check that the archive was not tampered:
assert sha256(archive_path.read_bytes()).hexdigest() == ARCHIVE_SHA256
print("untarring Reuters dataset...")
with tarfile.open(archive_path, "r:gz") as fp:
fp.extractall(data_path, filter="data")
print("done.")
parser = ReutersParser()
for filename in data_path.glob("*.sgm"):
for doc in parser.parse(open(filename, "rb")):
yield doc
# %%
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(
decode_error="ignore", n_features=2**18, alternate_sign=False
)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = "acq"
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
"SGD": SGDClassifier(max_iter=5),
"Perceptron": Perceptron(),
"NB Multinomial": MultinomialNB(alpha=0.01),
"Passive-Aggressive": SGDClassifier(
loss="hinge", penalty=None, learning_rate="pa1", eta0=1.0
),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [
("{title}\n\n{body}".format(**doc), pos_class in doc["topics"])
for doc in itertools.islice(doc_iter, size)
if doc["topics"]
]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {"n_test": 0, "n_test_pos": 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats["n_test"] += len(y_test)
test_stats["n_test_pos"] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats["t0"]
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats["n_train"] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {
"n_train": 0,
"n_train_pos": 0,
"accuracy": 0.0,
"accuracy_history": [(0, 0)],
"t0": time.time(),
"runtime_history": [(0, 0)],
"total_fit_time": 0.0,
}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]["total_fit_time"] += time.time() - tick
cls_stats[cls_name]["n_train"] += X_train.shape[0]
cls_stats[cls_name]["n_train_pos"] += sum(y_train)
tick = time.time()
cls_stats[cls_name]["accuracy"] = cls.score(X_test, y_test)
cls_stats[cls_name]["prediction_time"] = time.time() - tick
acc_history = (cls_stats[cls_name]["accuracy"], cls_stats[cls_name]["n_train"])
cls_stats[cls_name]["accuracy_history"].append(acc_history)
run_history = (
cls_stats[cls_name]["accuracy"],
total_vect_time + cls_stats[cls_name]["total_fit_time"],
)
cls_stats[cls_name]["runtime_history"].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print("\n")
# %%
# Plot results
# ------------
#
# The plot represents the learning curve of the classifier: the evolution
# of classification accuracy over the course of the mini-batches. Accuracy is
# measured on the first 1000 samples, held out as a validation set.
#
# To limit the memory consumption, we queue examples up to a fixed amount
# before feeding them to the learner.
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title("Classification accuracy as a function of %s" % x_legend)
plt.xlabel("%s" % x_legend)
plt.ylabel("Accuracy")
plt.grid(True)
plt.plot(x, y)
rcParams["legend.fontsize"] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats["accuracy_history"])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc="best")
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats["runtime_history"])
plot_accuracy(runtime, accuracy, "runtime (s)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc="best")
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = [stats["total_fit_time"] for cls_name, stats in sorted(cls_stats.items())]
cls_runtime.append(total_vect_time)
cls_names.append("Vectorization")
bar_colors = ["b", "g", "r", "c", "m", "y"]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel("runtime (s)")
ax.set_title("Training Times")
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.0,
1.05 * height,
"%.4f" % height,
ha="center",
va="bottom",
)
plt.setp(plt.xticks()[1], rotation=30)
autolabel(rectangles)
plt.tight_layout()
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats["prediction_time"])
cls_runtime.append(parsing_time)
cls_names.append("Read/Parse\n+Feat.Extr.")
cls_runtime.append(vectorizing_time)
cls_names.append("Hashing\n+Vect.")
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel("runtime (s)")
ax.set_title("Prediction Times (%d instances)" % n_test_documents)
autolabel(rectangles)
plt.tight_layout()
plt.show()
| ReutersParser |
python | google__pytype | pytype/tests/test_typed_dict.py | {
"start": 108,
"end": 11080
} | class ____(test_base.BaseTest):
"""Tests for typing.TypedDict."""
def test_init(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
a = A(x=1, y='2')
b = A(x=1, y=2) # wrong-arg-types[e1]
c = A(x=1) # missing-parameter[e2]
d = A(y='1') # missing-parameter
e = A(1, '2') # missing-parameter
""")
self.assertErrorSequences(
err,
{
"e1": ["Expected", "(*, x, y: str)", "Actual", "(x, y: int)"],
"e2": ["Expected", "(*, x, y)", "Actual", "(x)"],
},
)
def test_key_error(self):
# TODO(b/63407497): Enabling --strict-parameter-checks leads to an extra
# wrong-arg-types error on line 8.
self.options.tweak(strict_parameter_checks=False)
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
a = A(x=1, y="2")
a["z"] = 10 # typed-dict-error[e1]
a[10] = 10 # typed-dict-error[e2]
b = a["z"] # typed-dict-error
del a["z"] # typed-dict-error
""")
self.assertErrorSequences(
err,
{
"e1": ["TypedDict A", "key z"],
"e2": ["TypedDict A", "requires all keys", "strings"],
},
)
def test_value_error(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
a = A(x=1, y="2")
a["x"] = "10" # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
err,
{
"e": [
"Type annotation",
"key x",
"TypedDict A",
"Annotation: int",
"Assignment: str",
]
},
)
def test_union_type(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
from typing import Union
class A(TypedDict):
x: Union[int, str]
y: Union[int, str]
a = A(x=1, y="2")
a["x"] = "10"
a["y"] = [] # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
err,
{
"e": [
"Type annotation",
"key y",
"TypedDict A",
"Annotation: Union[int, str]",
"Assignment: list[nothing]",
]
},
)
def test_bad_base_class(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class Foo: pass
class Bar(TypedDict, Foo): # base-class-error[e]
x: int
""")
self.assertErrorSequences(
err,
{"e": ["Invalid base class", "Foo", "TypedDict Bar", "cannot inherit"]},
)
def test_inheritance(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
class Foo(TypedDict):
x: int
class Bar(TypedDict):
y: str
class Baz(Foo, Bar):
z: bool
a = Baz(x=1, y='2', z=False)
a['x'] = 1
a['y'] = 2 # annotation-type-mismatch
a['z'] = True
a['w'] = True # typed-dict-error
""")
def test_inheritance_clash(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class Foo(TypedDict):
x: int
class Bar(TypedDict):
y: str
class Baz(Foo, Bar): # base-class-error[e]
x: bool
""")
self.assertErrorSequences(err, {"e": ["Duplicate", "key x", "Foo", "Baz"]})
def test_annotation(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
a: A = {'x': '10', 'z': 20} # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
err,
{
"e": [
"Annotation: A",
"extra keys",
"z",
"type errors",
"{'x': ...}",
"expected int",
"got str",
]
},
)
def test_annotated_global_var(self):
ty = self.Infer("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
a: A = {'x': 10}
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypedDict
class A(TypedDict):
x: int
a: A
""",
)
def test_annotated_local_var(self):
ty = self.Infer("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
def f():
a: A = {'x': 10}
return a
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypedDict
class A(TypedDict):
x: int
def f() -> A: ...
""",
)
def test_return_type(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
def f() -> A:
return {'x': '10', 'z': 20} # bad-return-type[e]
""")
self.assertErrorSequences(
err,
{
"e": [
"Expected: A",
"extra keys",
"z",
"type errors",
"{'x': ...}",
"expected int",
"got str",
]
},
)
def test_total_with_constructor(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
class Foo(TypedDict, total=True):
w: int
x: int
class Bar(TypedDict, total=False):
y: str
z: bool
class Baz(Foo, Bar):
a: int
a = Baz(w=1, x=1, y='2', z=False, a=2)
b = Baz(w=1, x=1, a=2)
c = Baz(w=1, x=1, y='2') # missing-parameter
d = Baz(w=1, x=1, a=2, b=3) # wrong-keyword-args
""")
def test_total_with_annotation(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class Foo(TypedDict, total=True):
w: int
x: int
class Bar(TypedDict, total=False):
y: str
z: bool
class Baz(Foo, Bar):
a: int
a: Baz = {'w': 1, 'x': 1, 'y': '2', 'z': False, 'a': 2}
b: Baz = {'w': 1, 'x': 1, 'a': 2}
c: Baz = {'w': 1, 'y': '2', 'z': False, 'a': 2} # annotation-type-mismatch[e1]
d: Baz = {'w': 1, 'x': 1, 'y': '2', 'b': False, 'a': 2} # annotation-type-mismatch[e2]
""")
self.assertErrorSequences(
err,
{
"e1": ["missing keys", "x"],
"e2": ["extra keys", "b"],
},
)
def test_function_arg_matching(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
def f(a: A):
pass
a: A = {'x': 10, 'y': 'a'}
b = {'x': 10, 'y': 'a'}
c = {'x': 10}
f(a)
f(b)
f(c) # wrong-arg-types[e]
""")
self.assertErrorSequences(err, {"e": ["TypedDict", "missing keys", "y"]})
def test_function_arg_instantiation(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: str
def f(a: A):
a['z'] = 10 # typed-dict-error
""")
def test_function_arg_getitem(self):
self.CheckWithErrors("""
from typing import Union
from typing_extensions import TypedDict
class A(TypedDict):
x: int
y: Union[int, str]
def f(a: A) -> int:
assert_type(a['x'], int)
assert_type(a['y'], Union[int, str])
return a['z'] # typed-dict-error
""")
def test_output_type(self):
ty = self.Infer("""
from typing_extensions import TypedDict
class Foo(TypedDict):
x: int
y: str
def f(x: Foo) -> None:
pass
foo = Foo(x=1, y="2")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypedDict
foo: Foo
class Foo(TypedDict):
x: int
y: str
def f(x: Foo) -> None: ...
""",
)
def test_instantiate(self):
self.Check("""
from typing_extensions import TypedDict
class Foo(TypedDict):
x: int
def f(x: Foo):
pass
x: Foo
f(x)
""")
def test_key_existence_check(self):
self.Check("""
from typing import Union
from typing_extensions import TypedDict
class Foo(TypedDict):
a: int
class Bar(TypedDict):
b: str
class Baz(TypedDict):
c: Union[Foo, Bar]
baz: Baz = {'c': {'a': 0}}
assert 'a' in baz['c']
print(baz['c']['a'])
""")
def test_get(self):
self.Check("""
from typing_extensions import TypedDict
class X(TypedDict):
a: int
b: str
def f(x: X):
assert_type(x.get('a'), int)
assert_type(x.get('c'), None)
assert_type(x.get('c', ''), str)
""")
def test_generic_holder(self):
self.Check("""
from dataclasses import dataclass
from typing import Generic, TypeVar
from typing_extensions import TypedDict
T = TypeVar('T')
class Animal(TypedDict):
name: str
@dataclass
class GenericHolder(Generic[T]):
a: T
def get(self) -> T:
return self.a
class AnimalHolder(GenericHolder[Animal]):
def get2(self) -> Animal:
return self.get()
""")
def test_match_mapping(self):
self.CheckWithErrors("""
from typing import Mapping
from typing_extensions import TypedDict
class A(TypedDict):
x: int
def f1(a: Mapping[str, int]):
pass
def f2(a: Mapping[int, str]):
pass
f1(A(x=0)) # ok
f2(A(x=0)) # wrong-arg-types
""")
def test_typed_dict_dataclass(self):
self.Check("""
import dataclasses
from typing_extensions import TypedDict
@dataclasses.dataclass
class A(TypedDict):
x: int
def f():
return A(x=0)
""")
def test_iterable_generic_class_and_recursive_type_interaction(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Any, Generic, Iterable, TypeVar, Union
_ShapeType = TypeVar('_ShapeType')
_DType = TypeVar('_DType')
class ndarray(Generic[_ShapeType, _DType]):
def __iter__(self) -> Any: ...
ArrayTree = Union[Iterable[ArrayTree], ndarray]
""",
)]):
self.Check("""
import foo
from typing_extensions import TypedDict
class TD(TypedDict):
x: foo.ArrayTree
def f() -> TD:
return __any_object__
""")
def test_duplicate_key(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
class TD(TypedDict): # invalid-annotation
x: int
x: str
""")
| TypedDictTest |
python | pypa__hatch | tests/project/test_frontend.py | {
"start": 7527,
"end": 9124
} | class ____:
@pytest.mark.parametrize(
("backend_pkg", "backend_api"),
[pytest.param(backend_pkg, backend_api, id=backend_pkg) for backend_pkg, backend_api in BACKENDS],
)
def test_standard(self, temp_dir, temp_dir_data, platform, global_application, backend_pkg, backend_api):
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / "pyproject.toml").write_text(
f"""\
[build-system]
requires = ["{backend_pkg}"]
build-backend = "{backend_api}"
[project]
name = "foo"
version = "9000.42"
description = "text"
"""
)
package_dir = project_dir / "foo"
package_dir.mkdir()
(package_dir / "__init__.py").touch()
project = Project(project_dir)
project.build_env = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
temp_dir_data,
temp_dir_data,
platform,
0,
global_application,
)
output_dir = temp_dir / "output"
output_dir.mkdir()
script = project.build_frontend.scripts.build_sdist(output_dir=str(output_dir), project_root=str(project_dir))
platform.check_command([sys.executable, "-c", script])
work_dir = output_dir / "work"
output = json.loads((output_dir / "output.json").read_text())
sdist_path = work_dir / output["return_val"]
assert sdist_path.is_file()
assert sdist_path.name == "foo-9000.42.tar.gz"
| TestSourceDistribution |
python | openai__openai-python | src/openai/types/responses/parsed_response.py | {
"start": 2364,
"end": 3232
} | class ____(ResponseFunctionToolCall):
parsed_arguments: object = None
__api_exclude__ = {"parsed_arguments"}
ParsedResponseOutputItem: TypeAlias = Annotated[
Union[
ParsedResponseOutputMessage[ContentType],
ParsedResponseFunctionToolCall,
ResponseFileSearchToolCall,
ResponseFunctionWebSearch,
ResponseComputerToolCall,
ResponseReasoningItem,
McpCall,
McpApprovalRequest,
ImageGenerationCall,
LocalShellCall,
LocalShellCallAction,
McpListTools,
ResponseCodeInterpreterToolCall,
ResponseCustomToolCall,
ResponseFunctionShellToolCall,
ResponseFunctionShellToolCallOutput,
ResponseApplyPatchToolCall,
ResponseApplyPatchToolCallOutput,
],
PropertyInfo(discriminator="type"),
]
| ParsedResponseFunctionToolCall |
python | pytorch__pytorch | test/inductor/test_quantization.py | {
"start": 436,
"end": 727
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
relued = torch.relu(x1)
tanhed = torch.tanh(relued)
tensor = torch.matmul(
tanhed,
x2,
)
return tensor
| TargetCPModule |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 3105,
"end": 3598
} | class ____(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
| MovedModule |
python | pypa__pipenv | pipenv/patched/pip/_internal/cache.py | {
"start": 6652,
"end": 7419
} | class ____:
def __init__(
self,
link: Link,
persistent: bool,
):
self.link = link
self.persistent = persistent
self.origin: Optional[DirectUrl] = None
origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME
if origin_direct_url_path.exists():
try:
self.origin = DirectUrl.from_json(
origin_direct_url_path.read_text(encoding="utf-8")
)
except Exception as e:
logger.warning(
"Ignoring invalid cache entry origin file %s for %s (%s)",
origin_direct_url_path,
link.filename,
e,
)
| CacheEntry |
python | django__django | tests/auth_tests/test_basic.py | {
"start": 412,
"end": 5615
} | class ____(TestCase):
def test_user(self):
"Users can be created and can set their password"
u = User.objects.create_user("testuser", "test@example.com", "testpw")
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password("bad"))
self.assertTrue(u.check_password("testpw"))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password("testpw"))
self.assertFalse(u.has_usable_password())
u.set_password("testpw")
self.assertTrue(u.check_password("testpw"))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), "testuser")
# Check authentication/permissions
self.assertFalse(u.is_anonymous)
self.assertTrue(u.is_authenticated)
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user("testuser2", "test2@example.com")
self.assertFalse(u2.has_usable_password())
async def test_acreate(self):
u = await User.objects.acreate_user("testuser", "test@example.com", "testpw")
self.assertTrue(u.has_usable_password())
self.assertFalse(await u.acheck_password("bad"))
self.assertTrue(await u.acheck_password("testpw"))
def test_unicode_username(self):
User.objects.create_user("jörg")
User.objects.create_user("Григорий")
# Two equivalent Unicode normalized usernames are duplicates.
omega_username = "iamtheΩ" # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = "iamtheΩ" # U+2126 OHM SIGN
User.objects.create_user(ohm_username)
with self.assertRaises(IntegrityError):
User.objects.create_user(omega_username)
def test_user_no_email(self):
"Users can be created without an email"
cases = [
{},
{"email": ""},
{"email": None},
]
for i, kwargs in enumerate(cases):
with self.subTest(**kwargs):
u = User.objects.create_user("testuser{}".format(i), **kwargs)
self.assertEqual(u.email, "")
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser("super", "super@example.com", "super")
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
async def test_asuperuser(self):
"Check the creation and properties of a superuser"
super = await User.objects.acreate_superuser(
"super", "super@example.com", "super"
)
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_superuser_no_email_or_password(self):
cases = [
{},
{"email": ""},
{"email": None},
{"password": None},
]
for i, kwargs in enumerate(cases):
with self.subTest(**kwargs):
superuser = User.objects.create_superuser("super{}".format(i), **kwargs)
self.assertEqual(superuser.email, "")
self.assertFalse(superuser.has_usable_password())
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL="auth_tests.CustomUser")
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL="badsetting")
def test_swappable_user_bad_setting(self):
"""
The alternate user setting must point to something in the format
app.model
"""
msg = "AUTH_USER_MODEL must be of the form 'app_label.model_name'"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_user_model()
@override_settings(AUTH_USER_MODEL="thismodel.doesntexist")
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
msg = (
"AUTH_USER_MODEL refers to model 'thismodel.doesntexist' "
"that has not been installed"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override("en"):
self.assertEqual(User._meta.verbose_name, "user")
self.assertEqual(User._meta.verbose_name_plural, "users")
with translation.override("es"):
self.assertEqual(User._meta.verbose_name, "usuario")
self.assertEqual(User._meta.verbose_name_plural, "usuarios")
| BasicTestCase |
python | tornadoweb__tornado | tornado/web.py | {
"start": 4411,
"end": 79199
} | class ____:
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
Applications should not construct `RequestHandler` objects
directly and subclasses should not override ``__init__`` (override
`~RequestHandler.initialize` instead).
"""
SUPPORTED_METHODS: Tuple[str, ...] = (
"GET",
"HEAD",
"POST",
"DELETE",
"PATCH",
"PUT",
"OPTIONS",
)
_template_loaders = {} # type: Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
_stream_request_body = False
# Will be set in _execute.
_transforms = None # type: List[OutputTransform]
path_args = None # type: List[str]
path_kwargs = None # type: Dict[str, str]
def __init__(
self,
application: "Application",
request: httputil.HTTPServerRequest,
**kwargs: Any,
) -> None:
super().__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._prepared_future = None
self.ui = ObjectDict(
(n, self._ui_method(m)) for n, m in application.ui_methods.items()
)
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self, application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
assert self.request.connection is not None
# TODO: need to add set_close_callback to HTTPConnection interface
self.request.connection.set_close_callback( # type: ignore
self.on_connection_close
)
self.initialize(**kwargs) # type: ignore
def _initialize(self) -> None:
pass
initialize = _initialize # type: Callable[..., None]
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a ``URLSpec`` will be
supplied as keyword arguments to ``initialize()``.
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
@property
def settings(self) -> Dict[str, Any]:
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def _unimplemented_method(self, *args: str, **kwargs: str) -> None:
raise HTTPError(405)
head = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
get = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
post = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
delete = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
patch = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
put = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
options = _unimplemented_method # type: Callable[..., Optional[Awaitable[None]]]
def prepare(self) -> Optional[Awaitable[None]]:
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method. There is no guarantee that ``prepare`` will
be called if an error occurs that is handled by the framework.
Asynchronous support: Use ``async def`` or decorate this method with
`.gen.coroutine` to make it asynchronous.
If this method returns an ``Awaitable`` execution will not proceed
until the ``Awaitable`` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self) -> None:
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc. This method is primarily intended as
a counterpart to `prepare`. However, there are a few error cases where ``on_finish`` may be
called when ``prepare`` has not. (These are considered bugs and may be fixed in the future,
but for now you may need to check to see if the initialization work done in ``prepare`` has
occurred)
``on_finish`` may not produce any output, as it is called after the response has been sent
to the client.
"""
pass
def on_connection_close(self) -> None:
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request._body_future.done():
self.request._body_future.set_exception(iostream.StreamClosedError())
self.request._body_future.exception()
def clear(self) -> None:
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders(
{
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
}
)
self.set_default_headers()
self._write_buffer = [] # type: List[bytes]
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self) -> None:
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code: int, reason: Optional[str] = None) -> None:
"""Sets the status code for our response.
:arg int status_code: Response status code.
:arg str reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
self._reason = httputil.responses.get(status_code, "Unknown")
def get_status(self) -> int:
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name: str, value: _HeaderTypes) -> None:
"""Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header).
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name: str, value: _HeaderTypes) -> None:
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name: str) -> None:
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
# https://www.rfc-editor.org/rfc/rfc9110#name-field-values
_VALID_HEADER_CHARS = re.compile(r"[\x09\x20-\x7e\x80-\xff]*")
def _convert_header_value(self, value: _HeaderTypes) -> str:
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes):
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode("latin1")
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._VALID_HEADER_CHARS.fullmatch(retval) is None:
raise ValueError("Unsafe header value %r", retval)
return retval
@overload
def get_argument(self, name: str, default: str, strip: bool = True) -> str:
pass
@overload
def get_argument( # noqa: F811
self, name: str, default: _ArgDefaultMarker = _ARG_DEFAULT, strip: bool = True
) -> str:
pass
@overload
def get_argument( # noqa: F811
self, name: str, default: None, strip: bool = True
) -> Optional[str]:
pass
def get_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
This method searches both the query and body arguments.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
@overload
def get_body_argument(self, name: str, default: str, strip: bool = True) -> str:
pass
@overload
def get_body_argument( # noqa: F811
self, name: str, default: _ArgDefaultMarker = _ARG_DEFAULT, strip: bool = True
) -> str:
pass
@overload
def get_body_argument( # noqa: F811
self, name: str, default: None, strip: bool = True
) -> Optional[str]:
pass
def get_body_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments, strip)
def get_body_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
@overload
def get_query_argument(self, name: str, default: str, strip: bool = True) -> str:
pass
@overload
def get_query_argument( # noqa: F811
self, name: str, default: _ArgDefaultMarker = _ARG_DEFAULT, strip: bool = True
) -> str:
pass
@overload
def get_query_argument( # noqa: F811
self, name: str, default: None, strip: bool = True
) -> Optional[str]:
pass
def get_query_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.query_arguments, strip)
def get_query_arguments(self, name: str, strip: bool = True) -> List[str]:
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(
self,
name: str,
default: Union[None, str, _ArgDefaultMarker],
source: Dict[str, List[bytes]],
strip: bool = True,
) -> Optional[str]:
args = self._get_arguments(name, source, strip=strip)
if not args:
if isinstance(default, _ArgDefaultMarker):
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(
self, name: str, source: Dict[str, List[bytes]], strip: bool = True
) -> List[str]:
values = []
for v in source.get(name, []):
s = self.decode_argument(v, name=name)
if isinstance(s, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
s = RequestHandler._remove_control_chars_regex.sub(" ", s)
if strip:
s = s.strip()
values.append(s)
return values
def decode_argument(self, value: bytes, name: Optional[str] = None) -> str:
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(
400, "Invalid unicode in {}: {!r}".format(name or "url", value[:40])
)
@property
def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
@overload
def get_cookie(self, name: str, default: str) -> str:
pass
@overload
def get_cookie(self, name: str, default: None = None) -> Optional[str]:
pass
def get_cookie(self, name: str, default: Optional[str] = None) -> Optional[str]:
"""Returns the value of the request cookie with the given name.
If the named cookie is not present, returns ``default``.
This method only returns cookies that were present in the request.
It does not see the outgoing cookies set by `set_cookie` in this
handler.
"""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(
self,
name: str,
value: Union[str, bytes],
domain: Optional[str] = None,
expires: Optional[Union[float, Tuple, datetime.datetime]] = None,
path: str = "/",
expires_days: Optional[float] = None,
# Keyword-only args start here for historical reasons.
*,
max_age: Optional[int] = None,
httponly: bool = False,
secure: bool = False,
samesite: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Sets an outgoing cookie name/value with the given options.
Newly-set cookies are not immediately visible via `get_cookie`;
they are not present until the next request.
Most arguments are passed directly to `http.cookies.Morsel` directly.
See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie
for more information.
``expires`` may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a
`datetime.datetime` object. ``expires_days`` is provided as a convenience
to set an expiration time in days from today (if both are set, ``expires``
is used).
.. deprecated:: 6.3
Keyword arguments are currently accepted case-insensitively.
In Tornado 7.0 this will be changed to only accept lowercase
arguments.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError(f"Invalid cookie {name!r}: {value!r}")
if not hasattr(self, "_new_cookie"):
self._new_cookie = (
http.cookies.SimpleCookie()
) # type: http.cookies.SimpleCookie
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=expires_days
)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
if max_age:
# Note change from _ to -.
morsel["max-age"] = str(max_age)
if httponly:
# Note that SimpleCookie ignores the value here. The presense of an
# httponly (or secure) key is treated as true.
morsel["httponly"] = True
if secure:
morsel["secure"] = True
if samesite:
morsel["samesite"] = samesite
if kwargs:
# The setitem interface is case-insensitive, so continue to support
# kwargs for backwards compatibility until we can remove deprecated
# features.
for k, v in kwargs.items():
morsel[k] = v
warnings.warn(
f"Deprecated arguments to set_cookie: {set(kwargs.keys())} "
"(should be lowercase)",
DeprecationWarning,
)
def clear_cookie(self, name: str, **kwargs: Any) -> None:
"""Deletes the cookie with the given name.
This method accepts the same arguments as `set_cookie`, except for
``expires`` and ``max_age``. Clearing a cookie requires the same
``domain`` and ``path`` arguments as when it was set. In some cases the
``samesite`` and ``secure`` arguments are also required to match. Other
arguments are ignored.
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 6.3
Now accepts all keyword arguments that ``set_cookie`` does.
The ``samesite`` and ``secure`` flags have recently become
required for clearing ``samesite="none"`` cookies.
"""
for excluded_arg in ["expires", "max_age"]:
if excluded_arg in kwargs:
raise TypeError(
f"clear_cookie() got an unexpected keyword argument '{excluded_arg}'"
)
expires = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
days=365
)
self.set_cookie(name, value="", expires=expires, **kwargs)
def clear_all_cookies(self, **kwargs: Any) -> None:
"""Attempt to delete all the cookies the user sent with this request.
See `clear_cookie` for more information on keyword arguments. Due to
limitations of the cookie protocol, it is impossible to determine on the
server side which values are necessary for the ``domain``, ``path``,
``samesite``, or ``secure`` arguments, this method can only be
successful if you consistently use the same values for these arguments
when setting cookies.
Similar to `set_cookie`, the effect of this method will not be seen
until the following request.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
.. versionchanged:: 6.3
Now accepts all keyword arguments that ``set_cookie`` does.
.. deprecated:: 6.3
The increasingly complex rules governing cookies have made it
impossible for a ``clear_all_cookies`` method to work reliably
since all we know about cookies are their names. Applications
should generally use ``clear_cookie`` one at a time instead.
"""
for name in self.request.cookies:
self.clear_cookie(name, **kwargs)
def set_signed_cookie(
self,
name: str,
value: Union[str, bytes],
expires_days: Optional[float] = 30,
version: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_signed_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_signed_cookie`.
A value of None limits the lifetime to the current browser session.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
Similar to `set_cookie`, the effect of this method will not be
seen until the following request.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
.. versionchanged:: 6.3
Renamed from ``set_secure_cookie`` to ``set_signed_cookie`` to
avoid confusion with other uses of "secure" in cookie attributes
and prefixes. The old name remains as an alias.
"""
self.set_cookie(
name,
self.create_signed_value(name, value, version=version),
expires_days=expires_days,
**kwargs,
)
set_secure_cookie = set_signed_cookie
def create_signed_value(
self, name: str, value: Union[str, bytes], version: Optional[int] = None
) -> bytes:
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_signed_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_signed_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(
secret, name, value, version=version, key_version=key_version
)
def get_signed_cookie(
self,
name: str,
value: Optional[str] = None,
max_age_days: float = 31,
min_version: Optional[int] = None,
) -> Optional[bytes]:
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
Similar to `get_cookie`, this method only returns cookies that
were present in the request. It does not see outgoing cookies set by
`set_signed_cookie` in this handler.
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
.. versionchanged:: 6.3
Renamed from ``get_secure_cookie`` to ``get_signed_cookie`` to
avoid confusion with other uses of "secure" in cookie attributes
and prefixes. The old name remains as an alias.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(
self.application.settings["cookie_secret"],
name,
value,
max_age_days=max_age_days,
min_version=min_version,
)
get_secure_cookie = get_signed_cookie
def get_signed_cookie_key_version(
self, name: str, value: Optional[str] = None
) -> Optional[int]:
"""Returns the signing key version of the secure cookie.
The version is returned as int.
.. versionchanged:: 6.3
Renamed from ``get_secure_cookie_key_version`` to
``set_signed_cookie_key_version`` to avoid confusion with other
uses of "secure" in cookie attributes and prefixes. The old name
remains as an alias.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
if value is None:
return None
return get_signature_key_version(value)
get_secure_cookie_key_version = get_signed_cookie_key_version
def redirect(
self, url: str, permanent: bool = False, status: Optional[int] = None
) -> None:
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk: Union[str, bytes, dict]) -> None:
"""Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name: str, **kwargs: Any) -> "Future[None]":
"""Renders the template with the given arguments as the response.
``render()`` calls ``finish()``, so no other output methods can be called
after it.
Returns a `.Future` with the same semantics as the one returned by `finish`.
Awaiting this `.Future` is optional.
.. versionchanged:: 5.1
Now returns a `.Future` instead of ``None``.
"""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(_unicode(file_part))
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(_unicode(file_part))
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b"</body>")
html = html[:sloc] + utf8(js) + b"\n" + html[sloc:]
if js_embed:
js_bytes = self.render_embed_js(js_embed)
sloc = html.rindex(b"</body>")
html = html[:sloc] + js_bytes + b"\n" + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b"</head>")
html = html[:hloc] + utf8(css) + b"\n" + html[hloc:]
if css_embed:
css_bytes = self.render_embed_css(css_embed)
hloc = html.index(b"</head>")
html = html[:hloc] + css_bytes + b"\n" + html[hloc:]
if html_heads:
hloc = html.index(b"</head>")
html = html[:hloc] + b"".join(html_heads) + b"\n" + html[hloc:]
if html_bodies:
hloc = html.index(b"</body>")
html = html[:hloc] + b"".join(html_bodies) + b"\n" + html[hloc:]
return self.finish(html)
def render_linked_js(self, js_files: Iterable[str]) -> str:
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set() # type: Set[str]
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<script src="'
+ escape.xhtml_escape(p)
+ '" type="text/javascript"></script>'
for p in paths
)
def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes:
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return (
b'<script type="text/javascript">\n//<![CDATA[\n'
+ b"\n".join(js_embed)
+ b"\n//]]>\n</script>"
)
def render_linked_css(self, css_files: Iterable[str]) -> str:
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set() # type: Set[str]
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths
)
def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes:
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>"
def render_string(self, template_name: str, **kwargs: Any) -> bytes:
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file and frame.f_back is not None:
frame = frame.f_back
assert frame.f_code.co_filename is not None
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self) -> Dict[str, Any]:
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url,
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path: str) -> template.BaseLoader:
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers: bool = False) -> "Future[None]":
"""Flushes the current output buffer to the network.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
assert self.request.connection is not None
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
assert chunk is not None
(
self._status_code,
self._headers,
chunk,
) = transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers
)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = b""
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine("", self._status_code, self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk
)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk)
else:
future = Future() # type: Future[None]
future.set_result(None)
return future
def finish(self, chunk: Optional[Union[str, bytes, dict]] = None) -> "Future[None]":
"""Finishes this response, ending the HTTP request.
Passing a ``chunk`` to ``finish()`` is equivalent to passing that
chunk to ``write()`` and then calling ``finish()`` with no arguments.
Returns a `.Future` which may optionally be awaited to track the sending
of the response to the client. This `.Future` resolves when all the response
data has been sent, and raises an error if the connection is closed before all
data can be sent.
.. versionchanged:: 5.1
Now returns a `.Future` instead of ``None``.
"""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (
self._status_code == 200
and self.request.method in ("GET", "HEAD")
and "Etag" not in self._headers
):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code in (204, 304) or (100 <= self._status_code < 200):
assert not self._write_buffer, (
"Cannot send body with %s" % self._status_code
)
self._clear_representation_headers()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
assert self.request.connection is not None
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None) # type: ignore
future = self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
return future
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all
further HTTP processing. Intended for implementing protocols
like websockets that tunnel over an HTTP handshake.
This method is only supported when HTTP/1.1 is used.
.. versionadded:: 5.1
"""
self._finished = True
# TODO: add detach to HTTPConnection?
return self.request.connection.detach() # type: ignore
def _break_cycles(self) -> None:
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None # type: ignore
def send_error(self, status_code: int = 500, **kwargs: Any) -> None:
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response", exc_info=True)
return
self.clear()
reason = kwargs.get("reason")
if "exc_info" in kwargs:
exception = kwargs["exc_info"][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code: int, **kwargs: Any) -> None:
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header("Content-Type", "text/plain")
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish(
"<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>"
% {"code": status_code, "message": self._reason}
)
@property
def locale(self) -> tornado.locale.Locale:
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
loc = self.get_user_locale()
if loc is not None:
self._locale = loc
else:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value: tornado.locale.Locale) -> None:
self._locale = value
def get_user_locale(self) -> Optional[tornado.locale.Locale]:
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default: str = "en_US") -> tornado.locale.Locale:
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].strip().startswith("q="):
try:
score = float(parts[1].strip()[2:])
if score < 0:
raise ValueError()
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
if score > 0:
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [loc[0] for loc in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self) -> Any:
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_signed_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_signed_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value: Any) -> None:
self._current_user = value
def get_current_user(self) -> Any:
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self) -> str:
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self) -> Optional[str]:
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self) -> bytes:
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
This property is of type `bytes`, but it contains only ASCII
characters. If a character string is required, there is no
need to base64-encode it; just decode the byte string as
UTF-8.
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join(
[
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp))),
]
)
else:
raise ValueError("unknown xsrf cookie version %d", output_version)
if version is None:
if self.current_user and "expires_days" not in cookie_kwargs:
cookie_kwargs["expires_days"] = 30
cookie_name = self.settings.get("xsrf_cookie_name", "_xsrf")
self.set_cookie(cookie_name, self._xsrf_token, **cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]:
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, "_raw_xsrf_token"):
cookie_name = self.settings.get("xsrf_cookie_name", "_xsrf")
cookie = self.get_cookie(cookie_name)
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
assert token is not None
assert timestamp is not None
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(
self, cookie: str
) -> Tuple[Optional[int], Optional[bytes], Optional[float]]:
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask_str, masked_token, timestamp_str = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask_str))
token = _websocket_mask(mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp_str)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True)
return None, None, None
def check_xsrf_cookie(self) -> None:
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
# Prior to release 1.1.1, this check was ignored if the HTTP header
# ``X-Requested-With: XMLHTTPRequest`` was present. This exception
# has been shown to be insecure and has been removed. For more
# information please see
# http://www.djangoproject.com/weblog/2011/feb/08/security/
# http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
input_token = (
self.get_argument("_xsrf", None)
or self.request.headers.get("X-Xsrftoken")
or self.request.headers.get("X-Csrftoken")
)
if not input_token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(input_token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not hmac.compare_digest(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self) -> str:
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return (
'<input type="hidden" name="_xsrf" value="'
+ escape.xhtml_escape(self.xsrf_token)
+ '"/>'
)
def static_url(
self, path: str, include_host: Optional[bool] = None, **kwargs: Any
) -> str:
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get(
"static_handler_class", StaticFileHandler
).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name: str, feature: str = "this feature") -> None:
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception(
"You must define the '%s' setting in your "
"application to use %s" % (name, feature)
)
def reverse_url(self, name: str, *args: Any) -> str:
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self) -> None:
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self) -> bool:
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
rb'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b"*":
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x: bytes) -> bytes:
return x[2:] if x.startswith(b"W/") else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
async def _execute(
self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes
) -> None:
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
# If we're not in stream_request_body mode, this is the place where we parse the body.
if not _has_stream_request_body(self.__class__):
try:
self.request._parse_body()
except httputil.HTTPInputError as e:
raise HTTPError(400, "Invalid body: %s" % e) from e
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = {
k: self.decode_argument(v, name=k) for (k, v) in kwargs.items()
}
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in (
"GET",
"HEAD",
"OPTIONS",
) and self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = await result # type: ignore
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
future_set_result_unless_cancelled(self._prepared_future, None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
await self.request._body_future
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = await result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
finally:
# Unset result to avoid circular references
result = None
if self._prepared_future is not None and not self._prepared_future.done():
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
May be a coroutine for flow control.
"""
raise NotImplementedError()
def _log(self) -> None:
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self) -> str:
return "{} {} ({})".format(
self.request.method,
self.request.uri,
self.request.remote_ip,
)
def _handle_request_exception(self, e: BaseException) -> None:
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
log_message = value.get_message()
if log_message:
format = "%d %s: %s"
args = [value.status_code, self._request_summary(), log_message]
gen_log.warning(format, *args)
else:
app_log.error(
"Uncaught exception %s\n%r",
self._request_summary(),
self.request,
exc_info=(typ, value, tb), # type: ignore
)
def _ui_module(self, name: str, module: Type["UIModule"]) -> Callable[..., str]:
def render(*args, **kwargs) -> str: # type: ignore
if not hasattr(self, "_active_modules"):
self._active_modules = {} # type: Dict[str, UIModule]
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return _unicode(rendered)
return render
def _ui_method(self, method: Callable[..., str]) -> Callable[..., str]:
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_representation_headers(self) -> None:
# 304 responses should not contain representation metadata
# headers (defined in
# https://tools.ietf.org/html/rfc7231#section-3.1)
# not explicitly allowed by
# https://tools.ietf.org/html/rfc7232#section-4.1
headers = ["Content-Encoding", "Content-Language", "Content-Type"]
for h in headers:
self.clear_header(h)
_RequestHandlerType = TypeVar("_RequestHandlerType", bound=RequestHandler)
def stream_request_body(cls: Type[_RequestHandlerType]) -> Type[_RequestHandlerType]:
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/stable/demos/file_upload/>`_
for example usage.
""" # noqa: E501
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls: Type[RequestHandler]) -> bool:
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return cls._stream_request_body
def removeslash(
method: Callable[..., Optional[Awaitable[None]]],
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return None
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(
method: Callable[..., Optional[Awaitable[None]]],
) -> Callable[..., Optional[Awaitable[None]]]:
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return None
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
| RequestHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1051599,
"end": 1051983
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("UserStatus", graphql_name="node")
"""The item at the end of the edge."""
| UserStatusEdge |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-aws-datalake/destination_aws_datalake/stream_writer.py | {
"start": 682,
"end": 1120
} | class ____(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, (pd.Timestamp, datetime)):
# all timestamps and datetimes are converted to UTC
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
if isinstance(obj, date):
return obj.strftime("%Y-%m-%d")
return super(DictEncoder, self).default(obj)
| DictEncoder |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 6883,
"end": 7327
} | class ____():
def __init__(self):
self.__var = 1
#? int()
self.__var
#? ['__var']
self.__var
def __private_func(self):
return 1
#? int()
__private_func()
def wrap_private(self):
return self.__private_func()
#? []
PrivateVar().__var
#?
PrivateVar().__var
#? []
PrivateVar().__private_func
#? []
PrivateVar.__private_func
#? int()
PrivateVar().wrap_private()
| PrivateVar |
python | getsentry__sentry-python | sentry_sdk/integrations/chalice.py | {
"start": 900,
"end": 3408
} | class ____(ChaliceEventSourceHandler): # type: ignore
def __call__(self, event, context):
# type: (Any, Any) -> Any
client = sentry_sdk.get_client()
with sentry_sdk.isolation_scope() as scope:
with capture_internal_exceptions():
configured_time = context.get_remaining_time_in_millis()
scope.add_event_processor(
_make_request_event_processor(event, context, configured_time)
)
try:
return ChaliceEventSourceHandler.__call__(self, event, context)
except Exception:
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "chalice", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
client.flush()
reraise(*exc_info)
def _get_view_function_response(app, view_function, function_args):
# type: (Any, F, Any) -> F
@wraps(view_function)
def wrapped_view_function(**function_args):
# type: (**Any) -> Any
client = sentry_sdk.get_client()
with sentry_sdk.isolation_scope() as scope:
with capture_internal_exceptions():
configured_time = app.lambda_context.get_remaining_time_in_millis()
scope.set_transaction_name(
app.lambda_context.function_name,
source=TransactionSource.COMPONENT,
)
scope.add_event_processor(
_make_request_event_processor(
app.current_request.to_dict(),
app.lambda_context,
configured_time,
)
)
try:
return view_function(**function_args)
except Exception as exc:
if isinstance(exc, ChaliceViewError):
raise
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "chalice", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
client.flush()
raise
return wrapped_view_function # type: ignore
| EventSourceHandler |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 27554,
"end": 28069
} | class ____(torch.nn.Module):
# Sequential module(self.layer) contains three duplicated ReLU module.
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
self.layer = torch.nn.Sequential(
torch.nn.Linear(10, 20),
self.relu,
torch.nn.Linear(20, 20),
self.relu,
torch.nn.Linear(20, 10),
self.relu,
)
def forward(self, x):
return self.layer(x)
| SequentialWithDuplicatedModule |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 2798,
"end": 4128
} | class ____(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "bigscience/bloom-1b7"
# Constant values
EXPECTED_RELATIVE_DIFFERENCE = (
2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change
)
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University")
EXPECTED_OUTPUTS.add("Hello my name is John and I am 25 years old.")
EXPECTED_OUTPUTS.add("Hello my name is John and I am a student at the University of")
# Expected values on Intel XPU and NV A100
EXPECTED_OUTPUTS.add("Hello my name is Alina. I have been working as a professional")
MAX_NEW_TOKENS = 10
def setUp(self):
# Models and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
@apply_skip_if_not_implemented
| Base4bitTest |
python | sphinx-doc__sphinx | tests/test_search.py | {
"start": 1377,
"end": 16371
} | class ____:
def __init__(
self, name: str, data: Iterable[tuple[str, str, str, str, str, int]]
) -> None:
self.name = name
self.data = data
self.object_types: dict[str, ObjType] = {}
def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]:
return self.data
def load_searchindex(path: Path) -> Any:
searchindex = path.read_text(encoding='utf8')
assert searchindex.startswith('Search.setIndex(')
assert searchindex.endswith(')')
return json.loads(searchindex[16:-1])
def is_registered_term(index: Any, keyword: str) -> bool:
return index['terms'].get(keyword, []) != []
FILE_CONTENTS = """\
section_title
=============
.. test that comments are not indexed: boson
another_title
=============
test that non-comments are indexed: fermion
"""
@pytest.mark.sphinx('html', testroot='ext-viewcode')
def test_objects_are_escaped(app: SphinxTestApp) -> None:
app.build(force_all=True)
index = load_searchindex(app.outdir / 'searchindex.js')
for item in index.get('objects').get(''):
if item[-1] == 'n::Array<T, d>': # n::Array<T,d> is escaped
break
else:
raise AssertionError(index.get('objects').get(''))
@pytest.mark.sphinx('html', testroot='search')
def test_meta_keys_are_handled_for_language_en(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = load_searchindex(app.outdir / 'searchindex.js')
assert not is_registered_term(searchindex, 'thisnoteith')
assert is_registered_term(searchindex, 'thisonetoo')
assert is_registered_term(searchindex, 'findthiskey')
assert is_registered_term(searchindex, 'thistoo')
assert not is_registered_term(searchindex, 'onlygerman')
assert is_registered_term(searchindex, 'notgerman')
assert not is_registered_term(searchindex, 'onlytoogerman')
@pytest.mark.sphinx(
'html',
testroot='search',
confoverrides={'html_search_language': 'de'},
freshenv=True,
)
def test_meta_keys_are_handled_for_language_de(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = load_searchindex(app.outdir / 'searchindex.js')
assert not is_registered_term(searchindex, 'thisnoteith')
assert is_registered_term(searchindex, 'thisonetoo')
assert not is_registered_term(searchindex, 'findthiskey')
assert not is_registered_term(searchindex, 'thistoo')
assert is_registered_term(searchindex, 'onlygerman')
assert not is_registered_term(searchindex, 'notgerman')
assert is_registered_term(searchindex, 'onlytoogerman')
@pytest.mark.sphinx('html', testroot='search')
def test_stemmer_does_not_remove_short_words(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = (app.outdir / 'searchindex.js').read_text(encoding='utf8')
assert 'bat' in searchindex
@pytest.mark.sphinx('html', testroot='search')
def test_stemmer(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = load_searchindex(app.outdir / 'searchindex.js')
print(searchindex)
assert is_registered_term(searchindex, 'findthisstemmedkey')
assert is_registered_term(searchindex, 'intern')
@pytest.mark.sphinx('html', testroot='search')
def test_term_in_heading_and_section(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = (app.outdir / 'searchindex.js').read_text(encoding='utf8')
# if search term is in the title of one doc and in the text of another
# both documents should be a hit in the search index as a title,
# respectively text hit
assert '"textinhead":2' in searchindex
assert '"textinhead":0' in searchindex
@pytest.mark.sphinx('html', testroot='search')
def test_term_in_raw_directive(app: SphinxTestApp) -> None:
app.build(force_all=True)
searchindex = load_searchindex(app.outdir / 'searchindex.js')
assert not is_registered_term(searchindex, 'raw')
assert is_registered_term(searchindex, 'rawword')
assert not is_registered_term(searchindex, 'latex_keyword')
def test_IndexBuilder():
settings = frontend.get_default_settings(rst.Parser)
parser = rst.Parser()
domain1 = DummyDomain(
'dummy1',
[
('objname1', 'objdispname1', 'objtype1', 'docname1_1', '#anchor', 1),
('objname2', 'objdispname2', 'objtype2', 'docname1_2', '', -1),
],
)
domain2 = DummyDomain(
'dummy2',
[
('objname1', 'objdispname1', 'objtype1', 'docname2_1', '#anchor', 1),
('objname2', 'objdispname2', 'objtype2', 'docname2_2', '', -1),
],
)
env = DummyEnvironment('1.0', DummyDomainsContainer(dummy1=domain1, dummy2=domain2))
doc = utils.new_document('test data', settings)
doc['file'] = 'dummy'
parser.parse(FILE_CONTENTS, doc)
# feed
index = IndexBuilder(env, 'en', {}, '')
index.feed('docname1_1', 'filename1_1', 'title1_1', doc)
index.feed('docname1_2', 'filename1_2', 'title1_2', doc)
index.feed('docname2_2', 'filename2_2', 'title2_2', doc)
index.feed('docname2_1', 'filename2_1', 'title2_1', doc)
assert index._titles == {
'docname1_1': 'title1_1',
'docname1_2': 'title1_2',
'docname2_1': 'title2_1',
'docname2_2': 'title2_2',
}
assert index._filenames == {
'docname1_1': 'filename1_1',
'docname1_2': 'filename1_2',
'docname2_1': 'filename2_1',
'docname2_2': 'filename2_2',
}
# note: element iteration order (sort order) is important when the index
# is frozen (serialized) during build -- however, the _mapping-related
# dictionaries below may be iterated in arbitrary order by Python at
# runtime.
assert index._mapping == {
'fermion': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
'comment': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
'non': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
'index': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
'test': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
}
assert index._title_mapping == {
'another_titl': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
'section_titl': {'docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'},
}
assert index._objtypes == {}
assert index._objnames == {}
# freeze
assert index.freeze() == {
'docnames': ('docname1_1', 'docname1_2', 'docname2_1', 'docname2_2'),
'envversion': '1.0',
'filenames': ['filename1_1', 'filename1_2', 'filename2_1', 'filename2_2'],
'objects': {
'': [
(0, 0, 1, '#anchor', 'objdispname1'),
(2, 1, 1, '#anchor', 'objdispname1'),
]
},
'objnames': {
0: ('dummy1', 'objtype1', 'objtype1'),
1: ('dummy2', 'objtype1', 'objtype1'),
},
'objtypes': {0: 'dummy1:objtype1', 1: 'dummy2:objtype1'},
'terms': {
'comment': [0, 1, 2, 3],
'fermion': [0, 1, 2, 3],
'index': [0, 1, 2, 3],
'non': [0, 1, 2, 3],
'test': [0, 1, 2, 3],
},
'titles': ('title1_1', 'title1_2', 'title2_1', 'title2_2'),
'titleterms': {
'another_titl': [0, 1, 2, 3],
'section_titl': [0, 1, 2, 3],
},
'alltitles': {
'another_title': [
(0, 'another-title'),
(1, 'another-title'),
(2, 'another-title'),
(3, 'another-title'),
],
'section_title': [(0, None), (1, None), (2, None), (3, None)],
},
'indexentries': {},
}
assert index._objtypes == {('dummy1', 'objtype1'): 0, ('dummy2', 'objtype1'): 1}
assert index._objnames == {
0: ('dummy1', 'objtype1', 'objtype1'),
1: ('dummy2', 'objtype1', 'objtype1'),
}
env = DummyEnvironment('1.0', DummyDomainsContainer(dummy1=domain1, dummy2=domain2))
# dump / load
stream = BytesIO()
index.dump(stream, 'pickle')
stream.seek(0)
index2 = IndexBuilder(env, 'en', {}, '')
index2.load(stream, 'pickle')
assert index2._titles == index._titles
assert index2._filenames == index._filenames
assert index2._mapping == index._mapping
assert index2._title_mapping == index._title_mapping
assert index2._objtypes == {}
assert index2._objnames == {}
# freeze after load
assert index2.freeze() == index.freeze()
assert index2._objtypes == index._objtypes
assert index2._objnames == index._objnames
# prune
index.prune(['docname1_2', 'docname2_2'])
assert index._titles == {'docname1_2': 'title1_2', 'docname2_2': 'title2_2'}
assert index._filenames == {
'docname1_2': 'filename1_2',
'docname2_2': 'filename2_2',
}
assert index._mapping == {
'fermion': {'docname1_2', 'docname2_2'},
'comment': {'docname1_2', 'docname2_2'},
'non': {'docname1_2', 'docname2_2'},
'index': {'docname1_2', 'docname2_2'},
'test': {'docname1_2', 'docname2_2'},
}
assert index._title_mapping == {
'another_titl': {'docname1_2', 'docname2_2'},
'section_titl': {'docname1_2', 'docname2_2'},
}
assert index._objtypes == {('dummy1', 'objtype1'): 0, ('dummy2', 'objtype1'): 1}
assert index._objnames == {
0: ('dummy1', 'objtype1', 'objtype1'),
1: ('dummy2', 'objtype1', 'objtype1'),
}
# freeze after prune
assert index.freeze() == {
'docnames': ('docname1_2', 'docname2_2'),
'envversion': '1.0',
'filenames': ['filename1_2', 'filename2_2'],
'objects': {},
'objnames': {
0: ('dummy1', 'objtype1', 'objtype1'),
1: ('dummy2', 'objtype1', 'objtype1'),
},
'objtypes': {0: 'dummy1:objtype1', 1: 'dummy2:objtype1'},
'terms': {
'comment': [0, 1],
'fermion': [0, 1],
'index': [0, 1],
'non': [0, 1],
'test': [0, 1],
},
'titles': ('title1_2', 'title2_2'),
'titleterms': {
'another_titl': [0, 1],
'section_titl': [0, 1],
},
'alltitles': {
'another_title': [(0, 'another-title'), (1, 'another-title')],
'section_title': [(0, None), (1, None)],
},
'indexentries': {},
}
assert index._objtypes == {('dummy1', 'objtype1'): 0, ('dummy2', 'objtype1'): 1}
assert index._objnames == {
0: ('dummy1', 'objtype1', 'objtype1'),
1: ('dummy2', 'objtype1', 'objtype1'),
}
def test_IndexBuilder_lookup():
env = DummyEnvironment('1.0', {})
# zh
index = IndexBuilder(env, 'zh', {}, '')
assert index.lang.lang == 'zh'
# zh_CN
index = IndexBuilder(env, 'zh_CN', {}, '')
assert index.lang.lang == 'zh'
@pytest.mark.sphinx(
'html',
testroot='search',
confoverrides={'html_search_language': 'zh'},
srcdir='search_zh',
)
def test_search_index_gen_zh(app: SphinxTestApp) -> None:
app.build(force_all=True)
index = load_searchindex(app.outdir / 'searchindex.js')
assert 'chinesetest ' not in index['terms']
assert 'chinesetest' in index['terms']
assert 'chinesetesttwo' in index['terms']
assert 'cas' in index['terms']
@pytest.mark.sphinx(
'html',
testroot='search',
freshenv=True,
)
def test_nosearch(app: SphinxTestApp) -> None:
app.build()
index = load_searchindex(app.outdir / 'searchindex.js')
assert index['docnames'] == ['index', 'nosearch', 'tocitem']
# latex is in 'nosearch.rst', and nowhere else
assert 'latex' not in index['terms']
# cat is in 'index.rst' but is marked with the 'no-search' class
assert 'cat' not in index['terms']
# bat is indexed from 'index.rst' and 'tocitem.rst' (document IDs 0, 2), and
# not from 'nosearch.rst' (document ID 1)
assert 'bat' in index['terms']
assert index['terms']['bat'] == [0, 2]
@pytest.mark.sphinx(
'html',
testroot='search',
parallel=3,
freshenv=True,
)
def test_parallel(app: SphinxTestApp) -> None:
app.build()
index = load_searchindex(app.outdir / 'searchindex.js')
assert index['docnames'] == ['index', 'nosearch', 'tocitem']
@pytest.mark.sphinx('html', testroot='search')
def test_search_index_is_deterministic(app: SphinxTestApp) -> None:
app.build(force_all=True)
index = load_searchindex(app.outdir / 'searchindex.js')
# Pretty print the index. Only shown by pytest on failure.
print(f'searchindex.js contents:\n\n{json.dumps(index, indent=2)}')
assert_is_sorted(index, '')
def is_title_tuple_type(item: list[int | str]) -> bool:
"""In the search index, titles inside .alltitles are stored as a tuple of
(document_idx, title_anchor). Tuples are represented as lists in JSON,
but their contents must not be sorted. We cannot sort them anyway, as
document_idx is an int and title_anchor is a str.
"""
return len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], str)
def assert_is_sorted(
item: dict[str, str] | list[int | str] | int | str, path: str
) -> None:
lists_not_to_sort = {
# Each element of .titles is related to the element of .docnames in the same position.
# The ordering is deterministic because .docnames is sorted.
'.titles',
# Each element of .filenames is related to the element of .docnames in the same position.
# The ordering is deterministic because .docnames is sorted.
'.filenames',
}
err_path = path or '<root>'
if isinstance(item, dict):
assert list(item.keys()) == sorted(item.keys()), f'{err_path} is not sorted'
for key, value in item.items():
assert_is_sorted(value, f'{path}.{key}')
elif isinstance(item, list):
if not is_title_tuple_type(item) and path not in lists_not_to_sort:
# sort nulls last; http://stackoverflow.com/questions/19868767/
assert item == sorted(item, key=lambda x: (x is None, x)), (
f'{err_path} is not sorted'
)
for i, child in enumerate(item):
assert_is_sorted(child, f'{path}[{i}]')
@pytest.mark.parametrize('directory', JAVASCRIPT_TEST_ROOTS, ids=lambda p: p.name)
def test_check_js_search_indexes(make_app, sphinx_test_tempdir, directory):
app = make_app(
'html',
srcdir=directory,
builddir=sphinx_test_tempdir / directory.name,
)
app.build()
fresh_searchindex = app.outdir / 'searchindex.js'
existing_searchindex = (
TESTS_ROOT / 'js' / 'fixtures' / directory.name / 'searchindex.js'
)
msg = (
f'Search index fixture {existing_searchindex} does not match regenerated copy.'
)
assert fresh_searchindex.read_bytes() == existing_searchindex.read_bytes(), msg
| DummyDomain |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/openai_functions.py | {
"start": 6182,
"end": 9949
} | class ____(OutputFunctionsParser):
"""Parse an output as a Pydantic object.
This parser is used to parse the output of a chat model that uses OpenAI function
format to invoke functions.
The parser extracts the function call invocation and matches them to the Pydantic
schema provided.
An exception will be raised if the function call does not match the provided schema.
Example:
```python
message = AIMessage(
content="This is a test message",
additional_kwargs={
"function_call": {
"name": "cookie",
"arguments": json.dumps({"name": "value", "age": 10}),
}
},
)
chat_generation = ChatGeneration(message=message)
class Cookie(BaseModel):
name: str
age: int
class Dog(BaseModel):
species: str
# Full output
parser = PydanticOutputFunctionsParser(
pydantic_schema={"cookie": Cookie, "dog": Dog}
)
result = parser.parse_result([chat_generation])
```
"""
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
"""The Pydantic schema to parse the output with.
If multiple schemas are provided, then the function name will be used to
determine which schema to use.
"""
@model_validator(mode="before")
@classmethod
def validate_schema(cls, values: dict) -> Any:
"""Validate the Pydantic schema.
Args:
values: The values to validate.
Returns:
The validated values.
Raises:
ValueError: If the schema is not a Pydantic schema.
"""
schema = values["pydantic_schema"]
if "args_only" not in values:
values["args_only"] = (
isinstance(schema, type)
and not isinstance(schema, GenericAlias)
and issubclass(schema, BaseModel)
)
elif values["args_only"] and isinstance(schema, dict):
msg = (
"If multiple pydantic schemas are provided then args_only should be"
" False."
)
raise ValueError(msg)
return values
@override
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
Raises:
ValueError: If the Pydantic schema is not valid.
Returns:
The parsed JSON object.
"""
result_ = super().parse_result(result)
if self.args_only:
if hasattr(self.pydantic_schema, "model_validate_json"):
pydantic_args = self.pydantic_schema.model_validate_json(result_)
else:
pydantic_args = self.pydantic_schema.parse_raw(result_) # type: ignore[attr-defined]
else:
fn_name = result_["name"]
args = result_["arguments"]
if isinstance(self.pydantic_schema, dict):
pydantic_schema = self.pydantic_schema[fn_name]
else:
pydantic_schema = self.pydantic_schema
if issubclass(pydantic_schema, BaseModel):
pydantic_args = pydantic_schema.model_validate_json(args)
elif issubclass(pydantic_schema, BaseModelV1):
pydantic_args = pydantic_schema.parse_raw(args)
else:
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
raise ValueError(msg)
return pydantic_args
| PydanticOutputFunctionsParser |
python | scipy__scipy | benchmarks/benchmarks/signal_filtering.py | {
"start": 1387,
"end": 2101
} | class ____(Benchmark):
timeout = 100
timer = timeit.default_timer
param_names = ['n_samples', 'threads']
params = [
[1e3, 10e3],
[1, 2, 4]
]
def setup(self, n_samples, threads):
self.filt = butter(8, 8e-6, "lowpass", output="sos")
self.data = np.arange(int(n_samples) * 3000).reshape(int(n_samples), 3000)
self.chunks = np.array_split(self.data, threads)
def time_sosfilt(self, n_samples, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
futures = []
for i in range(threads):
futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))
wait(futures)
| ParallelSosfilt |
python | mlflow__mlflow | mlflow/cli/genai_eval_utils.py | {
"start": 1470,
"end": 9160
} | class ____:
"""Container for formatted table data."""
headers: list[str]
rows: list[list[Cell]]
def _format_assessment_cell(assessment: Assessment | None) -> Cell:
"""
Format a single assessment cell for table display.
Args:
assessment: Assessment object with result, rationale, and error fields
Returns:
Cell object with formatted value and assessment metadata
"""
if not assessment:
return Cell(value=NA_VALUE)
if assessment.error:
display_value = f"error: {assessment.error}"
elif assessment.result is not None and assessment.rationale:
display_value = f"value: {assessment.result}, rationale: {assessment.rationale}"
elif assessment.result is not None:
display_value = f"value: {assessment.result}"
elif assessment.rationale:
display_value = f"rationale: {assessment.rationale}"
else:
display_value = NA_VALUE
return Cell(value=display_value, assessment=assessment)
def resolve_scorers(scorer_names: list[str], experiment_id: str) -> list[Scorer]:
"""
Resolve scorer names to scorer objects.
Checks built-in scorers first, then registered scorers.
Supports both class names (e.g., "RelevanceToQuery") and snake_case
scorer names (e.g., "relevance_to_query").
Args:
scorer_names: List of scorer names to resolve
experiment_id: Experiment ID for looking up registered scorers
Returns:
List of resolved scorer objects
Raises:
click.UsageError: If a scorer is not found or no valid scorers specified
"""
resolved_scorers = []
builtin_scorers = get_all_scorers()
# Build map with both class name and snake_case name for lookup
builtin_scorer_map = {}
for scorer in builtin_scorers:
# Map by class name (e.g., "RelevanceToQuery")
builtin_scorer_map[scorer.__class__.__name__] = scorer
# Map by scorer.name (snake_case, e.g., "relevance_to_query")
if scorer.name is not None:
builtin_scorer_map[scorer.name] = scorer
for scorer_name in scorer_names:
if scorer_name in builtin_scorer_map:
resolved_scorers.append(builtin_scorer_map[scorer_name])
else:
# Try to get it as a registered scorer
try:
registered_scorer = get_scorer(name=scorer_name, experiment_id=experiment_id)
resolved_scorers.append(registered_scorer)
except MlflowException as e:
error_message = str(e)
if "not found" in error_message.lower():
available_builtin = ", ".join(
sorted({scorer.__class__.__name__ for scorer in builtin_scorers})
)
raise click.UsageError(
f"Could not identify Scorer '{scorer_name}'. "
f"Only built-in or registered scorers can be resolved. "
f"Available built-in scorers: {available_builtin}. "
f"To use a custom scorer, register it first in experiment {experiment_id} "
f"using the register_scorer() API."
)
else:
raise click.UsageError(
f"An error occurred when retrieving information for Scorer "
f"`{scorer_name}`: {error_message}"
)
if not resolved_scorers:
raise click.UsageError("No valid scorers specified")
return resolved_scorers
def extract_assessments_from_results(
results_df: pd.DataFrame, evaluation_run_id: str
) -> list[EvalResult]:
"""
Extract assessments from evaluation results DataFrame.
The evaluate() function returns results with a DataFrame that contains
an 'assessments' column. Each row has a list of assessment dictionaries
with metadata including AssessmentMetadataKey.SOURCE_RUN_ID that we use to
filter assessments from this specific evaluation run.
Args:
results_df: DataFrame from evaluate() results containing assessments column
evaluation_run_id: The MLflow run ID from the evaluation that generated the assessments
Returns:
List of EvalResult objects with trace_id and assessments
"""
output_data = []
for _, row in results_df.iterrows():
trace_id = row.get("trace_id", "unknown")
assessments_list = []
for assessment_dict in row.get("assessments", []):
# Only consider assessments from the evaluation run
metadata = assessment_dict.get("metadata", {})
source_run_id = metadata.get(AssessmentMetadataKey.SOURCE_RUN_ID)
if source_run_id != evaluation_run_id:
continue
assessment_name = assessment_dict.get("assessment_name")
assessment_result = None
assessment_rationale = None
assessment_error = None
if (feedback := assessment_dict.get("feedback")) and isinstance(feedback, dict):
assessment_result = feedback.get("value")
if rationale := assessment_dict.get("rationale"):
assessment_rationale = rationale
if error := assessment_dict.get("error"):
assessment_error = str(error)
assessments_list.append(
Assessment(
name=assessment_name,
result=assessment_result,
rationale=assessment_rationale,
error=assessment_error,
)
)
# If no assessments were found for this trace, add error markers
if not assessments_list:
assessments_list.append(
Assessment(
name=NA_VALUE,
result=None,
rationale=None,
error="No assessments found on trace",
)
)
output_data.append(EvalResult(trace_id=trace_id, assessments=assessments_list))
return output_data
def format_table_output(output_data: list[EvalResult]) -> TableOutput:
"""
Format evaluation results as table data.
Args:
output_data: List of EvalResult objects with assessments
Returns:
TableOutput dataclass containing headers and rows
"""
# Extract unique assessment names from output_data to use as column headers
# Note: assessment name can be None, so we filter it out
assessment_names_set = set()
for trace_result in output_data:
for assessment in trace_result.assessments:
if assessment.name and assessment.name != NA_VALUE:
assessment_names_set.add(assessment.name)
# Sort for consistent ordering
assessment_names = sorted(assessment_names_set)
headers = ["trace_id"] + assessment_names
table_data = []
for trace_result in output_data:
# Create Cell for trace_id column
row = [Cell(value=trace_result.trace_id)]
# Build a map of assessment name -> assessment for this trace
assessment_map = {
assessment.name: assessment
for assessment in trace_result.assessments
if assessment.name and assessment.name != NA_VALUE
}
# For each assessment name in headers, get the corresponding assessment
for assessment_name in assessment_names:
cell_content = _format_assessment_cell(assessment_map.get(assessment_name))
row.append(cell_content)
table_data.append(row)
return TableOutput(headers=headers, rows=table_data)
| TableOutput |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots2.py | {
"start": 490,
"end": 706
} | class ____:
__slots__ = ("values",)
# This should not generate an error because class variables
# in a dataclass are replaced by instance variables.
values: list[int] = field(default_factory=list)
| Slots3 |
python | joke2k__faker | faker/providers/ssn/en_PH/__init__.py | {
"start": 31,
"end": 2638
} | class ____(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ("##-#######-#",)
gsis_formats = ("###########",)
philhealth_formats = ("##-#########-#",)
pagibig_formats = ("####-####-####",)
umid_formats = ("####-#######-#",)
def sss(self) -> str:
return self.numerify(self.random_element(self.sss_formats))
def gsis(self) -> str:
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self) -> str:
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self) -> str:
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self) -> str:
return self.numerify(self.random_element(self.umid_formats))
def ssn(self) -> str:
# Use UMID as SSN in the interim till its deprecation
return self.umid()
| Provider |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 829801,
"end": 830187
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PackageFile", graphql_name="node")
"""The item at the end of the edge."""
| PackageFileEdge |
python | py-pdf__pypdf | make_release.py | {
"start": 315,
"end": 10011
} | class ____:
"""Capture the data of a git commit."""
commit_hash: str
prefix: str
message: str
author: str
author_login: str
def main(changelog_path: str) -> None:
"""
Create a changelog.
Args:
changelog_path: The location of the CHANGELOG file
"""
changelog = get_changelog(changelog_path)
git_tag = get_most_recent_git_tag()
changes, changes_with_author = get_formatted_changes(git_tag)
if changes == "":
print("No changes")
return
new_version = version_bump(git_tag)
new_version = get_version_interactive(new_version, changes)
adjust_version_py(new_version)
today = datetime.now(tz=timezone.utc)
header = f"## Version {new_version}, {today:%Y-%m-%d}\n"
url = f"https://github.com/{GH_ORG}/{GH_PROJECT}/compare/{git_tag}...{new_version}"
trailer = f"\n[Full Changelog]({url})\n\n"
new_entry = header + changes + trailer
print(new_entry)
write_commit_msg_file(new_version, changes_with_author + trailer)
# write_release_msg_file(new_version, changes_with_author + trailer, today)
# Make the script idempotent by checking if the new entry is already in the changelog
if new_entry in changelog:
print("Changelog is already up-to-date!")
return
new_changelog = "# CHANGELOG\n\n" + new_entry + strip_header(changelog)
write_changelog(new_changelog, changelog_path)
print_instructions(new_version)
def print_instructions(new_version: str) -> None:
"""Print release instructions."""
print("=" * 80)
print(f"☑ {VERSION_FILE_PATH} was adjusted to '{new_version}'")
print(f"☑ {CHANGELOG_FILE_PATH} was adjusted")
print()
print("Now run:")
print(" git commit -eF RELEASE_COMMIT_MSG.md")
print(" git push")
def adjust_version_py(version: str) -> None:
"""Adjust the __version__ string."""
with open(VERSION_FILE_PATH, "w") as fp:
fp.write(f'__version__ = "{version}"\n')
def get_version_interactive(new_version: str, changes: str) -> str:
"""Get the new __version__ interactively."""
from rich.prompt import Prompt # noqa: PLC0415
print("The changes are:")
print(changes)
orig = new_version
new_version = Prompt.ask("New semantic version", default=orig)
while not is_semantic_version(new_version):
new_version = Prompt.ask(
"That was not a semantic version. Please enter a semantic version",
default=orig,
)
return new_version
def is_semantic_version(version: str) -> bool:
"""Check if the given version is a semantic version."""
# This doesn't cover the edge-cases like pre-releases
if version.count(".") != 2:
return False
try:
return bool([int(part) for part in version.split(".")])
except Exception:
return False
def write_commit_msg_file(new_version: str, commit_changes: str) -> None:
"""
Write a file that can be used as a commit message.
Like this:
git commit -eF RELEASE_COMMIT_MSG.md && git push
"""
with open("RELEASE_COMMIT_MSG.md", "w") as fp:
fp.write(f"REL: {new_version}\n\n")
fp.write("## What's new\n")
fp.write(commit_changes)
def write_release_msg_file(
new_version: str, commit_changes: str, today: datetime
) -> None:
"""
Write a file that can be used as a git tag message.
Like this:
git tag -eF RELEASE_TAG_MSG.md && git push
"""
with open("RELEASE_TAG_MSG.md", "w") as fp:
fp.write(f"Version {new_version}, {today:%Y-%m-%d}\n\n")
fp.write("## What's new\n")
fp.write(commit_changes)
def strip_header(md: str) -> str:
"""Remove the 'CHANGELOG' header."""
return md.removeprefix("# CHANGELOG").lstrip()
def version_bump(git_tag: str) -> str:
"""
Increase the patch version of the git tag by one.
Args:
git_tag: Old version tag
Returns:
The new version where the patch version is bumped.
"""
# just assume a patch version change
major, minor, patch = git_tag.split(".")
return f"{major}.{minor}.{int(patch) + 1}"
def get_changelog(changelog_path: str) -> str:
"""
Read the changelog.
Args:
changelog_path: Path to the CHANGELOG file
Returns:
Data of the CHANGELOG
"""
with open(changelog_path, encoding="utf-8") as fh:
return fh.read()
def write_changelog(new_changelog: str, changelog_path: str) -> None:
"""
Write the changelog.
Args:
new_changelog: Contents of the new CHANGELOG
changelog_path: Path where the CHANGELOG file is
"""
with open(changelog_path, "w", encoding="utf-8") as fh:
fh.write(new_changelog)
def get_formatted_changes(git_tag: str) -> tuple[str, str]:
"""
Format the changes done since the last tag.
Args:
git_tag: the reference tag
Returns:
Changes done since git_tag
"""
commits = get_git_commits_since_tag(git_tag)
# Group by prefix
grouped = {}
for commit in commits:
if commit.prefix not in grouped:
grouped[commit.prefix] = []
grouped[commit.prefix].append(
{"msg": commit.message, "author": commit.author_login}
)
# Order prefixes
order = [
"SEC",
"DEP",
"ENH",
"PI",
"BUG",
"ROB",
"DOC",
"DEV",
"CI",
"MAINT",
"TST",
"STY",
]
abbrev2long = {
"SEC": "Security",
"DEP": "Deprecations",
"ENH": "New Features",
"BUG": "Bug Fixes",
"ROB": "Robustness",
"DOC": "Documentation",
"DEV": "Developer Experience",
"CI": "Continuous Integration",
"MAINT": "Maintenance",
"TST": "Testing",
"STY": "Code Style",
"PI": "Performance Improvements",
}
# Create output
output = ""
output_with_user = ""
for prefix in order:
if prefix not in grouped:
continue
tmp = f"\n### {abbrev2long[prefix]} ({prefix})\n" # header
output += tmp
output_with_user += tmp
for commit in grouped[prefix]:
output += f"- {commit['msg']}\n"
output_with_user += f"- {commit['msg']} by @{commit['author']}\n"
del grouped[prefix]
if grouped:
output += "\n### Other\n"
output_with_user += "\n### Other\n"
for prefix, commits in grouped.items():
for commit in commits:
output += f"- {prefix}: {commit['msg']}\n"
output_with_user += (
f"- {prefix}: {commit['msg']} by @{commit['author']}\n"
)
return output, output_with_user
def get_most_recent_git_tag() -> str:
"""
Get the git tag most recently created.
Returns:
Most recently created git tag.
"""
return subprocess.check_output(
["git", "describe", "--tag", "--abbrev=0"], stderr=subprocess.STDOUT, text=True
).strip()
def get_author_mapping(line_count: int) -> dict[str, str]:
"""
Get the authors for each commit.
Args:
line_count: Number of lines from Git log output. Used for determining how
many commits to fetch.
Returns:
A mapping of long commit hashes to author login handles.
"""
per_page = min(line_count, 100)
page = 1
mapping: dict[str, str] = {}
for _ in range(0, line_count, per_page):
with urllib.request.urlopen(
f"https://api.github.com/repos/{GH_ORG}/{GH_PROJECT}/commits?per_page={per_page}&page={page}"
) as response:
commits = json.loads(response.read())
page += 1
for commit in commits:
mapping[commit["sha"]] = commit["author"]["login"]
return mapping
def get_git_commits_since_tag(git_tag: str) -> list[Change]:
"""
Get all commits since the last tag.
Args:
git_tag: Reference tag from which the changes to the current commit are
fetched.
Returns:
List of all changes since git_tag.
"""
commits = (
subprocess.check_output(
[
"git",
"--no-pager",
"log",
f"{git_tag}..HEAD",
'--pretty=format:"%H:::%s:::%aN"',
],
stderr=subprocess.STDOUT,
)
.decode("UTF-8")
.strip()
)
lines = commits.splitlines()
authors = get_author_mapping(len(lines))
return [parse_commit_line(line, authors) for line in lines if line != ""]
def parse_commit_line(line: str, authors: dict[str, str]) -> Change:
"""
Parse the first line of a git commit message.
Args:
line: The first line of a git commit message.
Returns:
The parsed Change object
Raises:
ValueError: The commit line is not well-structured
"""
parts = line.strip().strip('"\\').split(":::")
if len(parts) != 3:
raise ValueError(f"Invalid commit line: '{line}'")
commit_hash, rest, author = parts
if ":" in rest:
prefix, message = rest.split(": ", 1)
else:
prefix = ""
message = rest
# Standardize
message = message.strip()
commit_hash = commit_hash.strip()
author_login = authors[commit_hash]
prefix = prefix.strip()
if prefix == "DOCS":
prefix = "DOC"
return Change(
commit_hash=commit_hash,
prefix=prefix,
message=message,
author=author,
author_login=author_login,
)
if __name__ == "__main__":
main(CHANGELOG_FILE_PATH)
| Change |
python | numba__numba | numba/core/ir.py | {
"start": 33834,
"end": 37148
} | class ____(EqualityCheckMixin):
"""
Attributes
-----------
- parent: Scope
Parent scope
- localvars: VarMap
Scope-local variable map
- loc: Loc
Start of scope location
"""
def __init__(self, parent, loc):
assert parent is None or isinstance(parent, Scope)
assert isinstance(loc, Loc)
self.parent = parent
self.localvars = VarMap()
self.loc = loc
self.redefined = defaultdict(int)
self.var_redefinitions = defaultdict(set)
def define(self, name, loc):
"""
Define a variable
"""
v = Var(scope=self, name=name, loc=loc)
self.localvars.define(v.name, v)
return v
def get(self, name):
"""
Refer to a variable. Returns the latest version.
"""
if name in self.redefined:
name = "%s.%d" % (name, self.redefined[name])
return self.get_exact(name)
def get_exact(self, name):
"""
Refer to a variable. The returned variable has the exact
name (exact variable version).
"""
try:
return self.localvars.get(name)
except NotDefinedError:
if self.has_parent:
return self.parent.get(name)
else:
raise
def get_or_define(self, name, loc):
if name in self.redefined:
name = "%s.%d" % (name, self.redefined[name])
if name not in self.localvars:
return self.define(name, loc)
else:
return self.localvars.get(name)
def redefine(self, name, loc, rename=True):
"""
Redefine if the name is already defined
"""
if name not in self.localvars:
return self.define(name, loc)
elif not rename:
# Must use the same name if the variable is a cellvar, which
# means it could be captured in a closure.
return self.localvars.get(name)
else:
while True:
ct = self.redefined[name]
self.redefined[name] = ct + 1
newname = "%s.%d" % (name, ct + 1)
try:
res = self.define(newname, loc)
except RedefinedError:
continue
else:
self.var_redefinitions[name].add(newname)
return res
def get_versions_of(self, name):
"""
Gets all known versions of a given name
"""
vers = set()
def walk(thename):
redefs = self.var_redefinitions.get(thename, None)
if redefs:
for v in redefs:
vers.add(v)
walk(v)
walk(name)
return vers
def make_temp(self, loc):
n = len(self.localvars)
v = Var(scope=self, name='$%d' % n, loc=loc)
self.localvars.define(v.name, v)
return v
@property
def has_parent(self):
return self.parent is not None
def __repr__(self):
return "Scope(has_parent=%r, num_vars=%d, %s)" % (self.has_parent,
len(self.localvars),
self.loc)
| Scope |
python | openai__openai-python | src/openai/resources/beta/chatkit/sessions.py | {
"start": 5857,
"end": 10527
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSessionsWithStreamingResponse(self)
async def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
"/chatkit/sessions",
body=await async_maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
async def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel a ChatKit session
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
| AsyncSessions |
python | ApeWorX__ape | src/ape_ethereum/multicall/handlers.py | {
"start": 4590,
"end": 8176
} | class ____(BaseMulticall):
"""
Create a sequence of calls to execute at once using ``eth_call`` via the Multicall3 contract.
Usage example::
from ape_ethereum import multicall
call = multicall.Call()
call.add(contract.myMethod, *call_args)
call.add(contract.myMethod, *call_args)
... # Add as many calls as desired
call.add(contract.myMethod, *call_args)
a, b, ..., z = call() # Performs multicall
# or, using a builder pattern:
call = multicall.Call()
.add(contract.myMethod, *call_args)
.add(contract.myMethod, *call_args)
... # Add as many calls as desired
.add(contract.myMethod, *call_args)
a, b, ..., z = call() # Performs multicall
"""
def __init__(
self,
address: "AddressType" = MULTICALL3_ADDRESS,
supported_chains: Optional[list[int]] = None,
) -> None:
super().__init__(address=address, supported_chains=supported_chains)
self.abis: list[MethodABI] = []
self._result: Union[None, list[tuple[bool, HexBytes]]] = None
@property
def handler(self) -> ContractCallHandler: # type: ignore[override]
return super().handler.call # NOTE: all Multicall3 methods are mutable calls by default
def add(self, call: ContractMethodHandler, *args, **kwargs):
if "value" in kwargs:
raise InvalidOption("value")
super().add(call, *args, **kwargs)
self.abis.append(_select_method_abi(call.abis, args))
return self
@property
def returnData(self) -> list["HexBytes"]:
# NOTE: this property is kept camelCase to align with the raw EVM struct
result = self._result # Declare for typing reasons.
return [res.returnData if res.success else None for res in result] # type: ignore
def _decode_results(self) -> Iterator[Any]:
for abi, data in zip(self.abis, self.returnData):
if data is None:
# The call failed.
yield data
continue
try:
result = self.provider.network.ecosystem.decode_returndata(abi, data)
except DecodingError as err:
logger.error(err)
yield data # Yield the raw data
continue
if isinstance(result, (list, tuple)) and len(result) == 1:
yield result[0]
else:
yield result
def __call__(self, **call_kwargs) -> Iterator[Any]:
"""
Perform the Multicall call. This call will trigger again every time the ``Call`` object
is called.
Raises:
:class:`~ape_ethereum.multicall.exceptions.UnsupportedChainError`:
If there is not an instance of Multicall3 deployed
on the current chain at the expected address.
Args:
**call_kwargs: the kwargs to pass through to the call handler.
Returns:
Iterator[Any]: the sequence of values produced by performing each call stored
by this instance.
"""
self._result = self.handler(self.calls, **call_kwargs)
return self._decode_results()
def as_transaction(self, **txn_kwargs) -> "TransactionAPI":
"""
Encode the Multicall transaction as a ``TransactionAPI`` object, but do not execute it.
Returns:
:class:`~ape.api.transactions.TransactionAPI`
"""
return self.handler.as_transaction(self.calls, **txn_kwargs)
| Call |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_health_checks.py | {
"start": 4742,
"end": 4907
} | class ____(RuleBasedStateMachine):
_ = rule()(lambda self: None)
@initialize()
def r(self):
return "any non-None value"
| ReturningInitializeMachine |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 161553,
"end": 172740
} | class ____:
def test_basic(self):
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_equal(np.median(a0), 1)
assert_allclose(np.median(a1), 0.5)
assert_allclose(np.median(a2), 2.5)
assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
assert_equal(np.median(a2, axis=1), [1, 4])
assert_allclose(np.median(a2, axis=None), 2.5)
a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
a = np.array([0.0463301, 0.0444502, 0.141249])
assert_equal(a[0], np.median(a))
a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a))
# check array scalar result
assert_equal(np.median(a).ndim, 0)
a[1] = np.nan
assert_equal(np.median(a).ndim, 0)
def test_axis_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
orig = a.copy()
np.median(a, axis=None)
for ax in range(a.ndim):
np.median(a, axis=ax)
assert_array_equal(a, orig)
assert_allclose(np.median(a3, axis=0), [3, 4])
assert_allclose(np.median(a3.T, axis=1), [3, 4])
assert_allclose(np.median(a3), 3.5)
assert_allclose(np.median(a3, axis=None), 3.5)
assert_allclose(np.median(a3.T), 3.5)
def test_overwrite_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
assert_allclose(
np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
assert_allclose(
np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4])
a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
np.random.shuffle(a4.ravel())
assert_allclose(np.median(a4, axis=None),
np.median(a4.copy(), axis=None, overwrite_input=True))
assert_allclose(np.median(a4, axis=0),
np.median(a4.copy(), axis=0, overwrite_input=True))
assert_allclose(np.median(a4, axis=1),
np.median(a4.copy(), axis=1, overwrite_input=True))
assert_allclose(np.median(a4, axis=2),
np.median(a4.copy(), axis=2, overwrite_input=True))
def test_array_like(self):
x = [1, 2, 3]
assert_almost_equal(np.median(x), 2)
x2 = [x]
assert_almost_equal(np.median(x2), 2)
assert_allclose(np.median(x2, axis=0), x)
def test_subclass(self):
# gh-3846
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def mean(self, axis=None, dtype=None, out=None):
return -7
a = MySubClass([1, 2, 3])
assert_equal(np.median(a), -7)
@pytest.mark.parametrize('arr',
([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.))
def test_subclass2(self, arr):
"""Check that we return subclasses, even if a NaN scalar."""
class MySubclass(np.ndarray):
pass
m = np.median(np.array(arr).view(MySubclass))
assert isinstance(m, MySubclass)
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a, axis=0), np.nan)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a).ndim, 0)
# axis0
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 0), b)
# axis1
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 1), b)
# axis02
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly")
def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0
a = np.array([], dtype=float)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
assert_equal(len(w), 2)
# multiple dimensions
a = np.array([], dtype=float, ndmin=3)
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
# axis 0 and 1
b = np.array([], dtype=float, ndmin=2)
assert_equal(np.median(a, axis=0), b)
assert_equal(np.median(a, axis=1), b)
# axis 2
b = np.array(np.nan, dtype=float, ndmin=2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
def test_object(self):
o = np.arange(7.)
assert_(type(np.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.median(o.astype(object))), float)
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.median(d, axis=(0, 1, 2))[0],
np.median(d[:, :, :, 0].flatten()))
assert_equal(np.median(d, axis=(0, 1, 3))[1],
np.median(d[:, :, 1, :].flatten()))
assert_equal(np.median(d, axis=(3, 1, -4))[2],
np.median(d[:, :, 2, :].flatten()))
assert_equal(np.median(d, axis=(3, 1, 2))[2],
np.median(d[2, :, :, :].flatten()))
assert_equal(np.median(d, axis=(3, 2))[2, 1],
np.median(d[2, 1, :, :].flatten()))
assert_equal(np.median(d, axis=(1, -2))[2, 1],
np.median(d[2, :, :, 1].flatten()))
assert_equal(np.median(d, axis=(1, 3))[2, 2],
np.median(d[2, :, 2, :].flatten()))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(AxisError, np.median, d, axis=-5)
assert_raises(AxisError, np.median, d, axis=(0, -5))
assert_raises(AxisError, np.median, d, axis=4)
assert_raises(AxisError, np.median, d, axis=(0, 4))
assert_raises(ValueError, np.median, d, axis=(1, 1))
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
@pytest.mark.parametrize(
argnames='axis',
argvalues=[
None,
1,
(1, ),
(0, 1),
(-3, -1),
]
)
def test_keepdims_out(self, axis):
d = np.ones((3, 5, 7, 11))
if axis is None:
shape_out = (1,) * d.ndim
else:
axis_norm = normalize_axis_tuple(axis, d.ndim)
shape_out = tuple(
1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
out = np.empty(shape_out)
result = np.median(d, axis=axis, keepdims=True, out=out)
assert result is out
assert_equal(result.shape, shape_out)
@pytest.mark.parametrize("dtype", ["m8[s]"])
@pytest.mark.parametrize("pos", [0, 23, 10])
def test_nat_behavior(self, dtype, pos):
# TODO: Median does not support Datetime, due to `mean`.
# NaT and NaN should behave the same, do basic tests for NaT.
a = np.arange(0, 24, dtype=dtype)
a[pos] = "NaT"
res = np.median(a)
assert res.dtype == dtype
assert np.isnat(res)
res = np.percentile(a, [30, 60])
assert res.dtype == dtype
assert np.isnat(res).all()
a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3)
a[pos, 1] = "NaT"
res = np.median(a, axis=0)
assert_array_equal(np.isnat(res), [False, True, False])
| TestMedian |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_response_output_text.py | {
"start": 825,
"end": 999
} | class ____(BaseModel):
type: Literal["url"]
"""Type discriminator that is always `url`."""
url: str
"""URL referenced by the annotation."""
| AnnotationURLSource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.