language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 14990,
"end": 16185
} | class ____(AssignmentBase):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.codegen.ast import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
Assignment(x, y)
>>> Assignment(x, 0)
Assignment(x, 0)
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
Assignment(A, Matrix([[x, y, z]]))
>>> Assignment(A[0, 1], x)
Assignment(A[0, 1], x)
"""
op = ':='
| Assignment |
python | takluyver__flit | flit_core/flit_core/config.py | {
"start": 597,
"end": 1580
} | class ____(ValueError):
pass
metadata_list_fields = {
'classifiers',
'requires',
'dev-requires'
}
pep621_allowed_fields = {
'name',
'version',
'description',
'readme',
'requires-python',
'license',
'license-files',
'authors',
'maintainers',
'keywords',
'classifiers',
'urls',
'scripts',
'gui-scripts',
'entry-points',
'dependencies',
'optional-dependencies',
'dynamic',
'import-names', # PEP 794
'import-namespaces'
}
allowed_dynamic_fields = {
'version',
'description',
'import-names',
'import-namespaces'
}
default_license_files_globs = ['COPYING*', 'LICEN[CS]E*', 'NOTICE*', 'AUTHORS*']
license_files_allowed_chars = re.compile(r'^[\w\-\.\/\*\?\[\]]+$')
def read_flit_config(path):
"""Read and check the `pyproject.toml` file with data about the package.
"""
d = tomllib.loads(path.read_text('utf-8'))
return prep_toml_config(d, path)
| ConfigError |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_release_details.py | {
"start": 9630,
"end": 11177
} | class ____(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.commits = [{"id": "a" * 40}, {"id": "b" * 40}]
self.ref = "master"
self.url = "https://example.com"
self.dateReleased = "1000-10-10T06:06"
def test_simple(self) -> None:
serializer = ReleaseSerializer(
data={
"ref": self.ref,
"url": self.url,
"dateReleased": self.dateReleased,
"commits": self.commits,
}
)
assert serializer.is_valid()
assert set(serializer.fields.keys()) == {"ref", "url", "dateReleased", "commits", "status"}
result = serializer.validated_data
assert result["ref"] == self.ref
assert result["url"] == self.url
assert result["dateReleased"] == datetime(1000, 10, 10, 6, 6, tzinfo=UTC)
assert result["commits"] == self.commits
def test_fields_not_required(self) -> None:
serializer = ReleaseSerializer(data={})
assert serializer.is_valid()
def test_do_not_allow_null_commits(self) -> None:
serializer = ReleaseSerializer(data={"commits": None})
assert not serializer.is_valid()
def test_ref_limited_by_max_version_length(self) -> None:
serializer = ReleaseSerializer(data={"ref": "a" * MAX_VERSION_LENGTH})
assert serializer.is_valid()
serializer = ReleaseSerializer(data={"ref": "a" * (MAX_VERSION_LENGTH + 1)})
assert not serializer.is_valid()
| ReleaseSerializerTest |
python | pytorch__pytorch | torch/_inductor/codegen/halide.py | {
"start": 6985,
"end": 16245
} | class ____(OpOverrides):
@staticmethod
def to_dtype(
x,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types=True,
):
if dtype == torch.bool:
return f"({x} != 0)"
return f"hl.cast({halide_type(dtype)}, {x})"
@staticmethod
def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype):
if src_dtype in (torch.float16, torch.bfloat16):
x = f"hl.cast({halide_type(src_dtype)}, {x})" # body compute is upcast to fp32
line = f"hl.reinterpret({halide_type(dtype)}, {x})"
if dtype in (torch.float16, torch.bfloat16):
line = f"hl.cast(hl.Float(32), {line})"
return line
@classmethod
def constant(cls, value, dtype):
return cls.to_dtype(halide_constant(value), dtype)
@staticmethod
def abs(x):
return f"hl.abs({x})"
@staticmethod
def exp(x):
if not hasattr(x, "name"):
return f"hl.exp({x})"
return f"hl.fast_exp(hl.cast(hl.Float(32), {x})) if {x.name}.type().bits() <= 32 else hl.exp({x})"
@staticmethod
def sqrt(x):
return f"hl.sqrt({x})"
@staticmethod
def minimum(a, b):
# return f"hl.min({a}, {b})" <== handles nan wrong
if not hasattr(a, "name"):
return f"hl.min({a}, {b})"
b = f"hl.cast({a.name}.type(), {b})"
return f"hl.select(({a}<{b})|hl.is_nan({a}), {a}, {b}) if {a.name}.type().is_float() else hl.min({a}, {b})"
@staticmethod
def maximum(a, b):
# return f"hl.max({a}, {b})" <== handles nan wrong
if not hasattr(a, "name"):
return f"hl.max({a}, {b})"
b = f"hl.cast({a.name}.type(), {b})"
return f"hl.select(({a}>{b})|hl.is_nan({a}), {a}, {b}) if {a.name}.type().is_float() else hl.max({a}, {b})"
@staticmethod
def where(a, b, c):
if hasattr(b, "name"):
c = f"hl.cast({b.name}.type(), {c})"
return f"hl.select({a}, {b}, {c})"
@staticmethod
def cos(x):
return f"hl.cos({x})"
@staticmethod
def sin(x):
return f"hl.sin({x})"
@staticmethod
def lgamma(x):
raise Unsupported("lgamma")
@staticmethod
def erf(x):
return f"hl.erf({x})"
@staticmethod
def cosh(x):
return f"hl.cosh({x})"
@staticmethod
def sinh(x):
return f"hl.sinh({x})"
@staticmethod
def acos(x):
return f"hl.acos({x})"
@staticmethod
def acosh(x):
return f"hl.acosh({x})"
@staticmethod
def asin(x):
return f"hl.asin({x})"
@staticmethod
def asinh(x):
return f"hl.asinh({x})"
@staticmethod
def atan2(x, y):
return f"hl.atan2({x}, {y})"
@staticmethod
def atan(x):
return f"hl.atan({x})"
@staticmethod
def atanh(x):
return f"hl.atanh({x})"
@staticmethod
def copysign(x, y):
raise Unsupported("copysign")
@staticmethod
def erfinv(x):
raise Unsupported("erfinv")
@staticmethod
def hypot(x, y):
return f"hl.hypot({x}, {y})"
@staticmethod
def nextafter(x, y):
raise Unsupported("nextafter")
@staticmethod
def logical_and(a, b):
return f"{a} & {b}"
@staticmethod
def logical_not(a):
return f"{a} == 0"
@staticmethod
def logical_or(a, b):
return f"{a} | {b}"
@staticmethod
def logical_xor(a, b):
return f"({a} ^ {b})"
@staticmethod
def bitwise_and(a, b):
return f"{a} & {b}"
@staticmethod
def bitwise_not(a):
return f"~{a}"
@staticmethod
def bitwise_or(a, b):
return f"{a} | {b}"
@staticmethod
def bitwise_xor(a, b):
return f"{a} ^ {b}"
@staticmethod
def bitwise_left_shift(a, b):
return f"{a} << {b}"
@staticmethod
def bitwise_right_shift(a, b):
return f"{a} >> {b}"
@staticmethod
def rand(seed, offset):
return f"halide_helpers.rand({seed}, {offset})"
@staticmethod
def randn(seed, offset):
return f"halide_helpers.randn({seed}, {offset})"
@staticmethod
def randint64(seed, offset, low, high):
return f"halide_helpers.randint64({seed}, {offset}, {low}, {high})"
@staticmethod
def load_seed(name, offset):
return f"{ops.load(name, 0)} + {V.kernel.args.seed_offset('load_seed_offset', offset)}"
@staticmethod
def rsqrt(x):
# return f"hl.fast_inverse_sqrt({x})" <== accuracy issues
return f"1./hl.sqrt({x})"
@staticmethod
def tan(x):
return f"hl.tan({x})"
@staticmethod
def tanh(x):
return f"hl.tanh({x})"
@staticmethod
def signbit(x):
return f"(hl.reinterpret(hl.UInt(32), hl.cast(hl.Float(32), {x})) >> 31) != 0"
@staticmethod
def fmod(a, b):
# TODO(jansel): find a better way to do this, builtin % has wrong sign
return f"{a} - hl.trunc({a}/{b})*{b}"
@staticmethod
def pow(a, b):
return f"hl.pow({a}, {b})" # hl.fast_pow fails accuracy
@staticmethod
def log(x):
return f"hl.log({x})" # hl.fast_log fails accuracy
@staticmethod
def log2(x):
raise NotImplementedError("log2")
@staticmethod
def isinf(x):
# workaround https://github.com/halide/Halide/issues/8309
return f"hl.is_inf(hl.cast(hl.Float(32), {x}))"
@staticmethod
def isnan(x):
# workaround https://github.com/halide/Halide/issues/8309
return f"hl.is_nan(hl.cast(hl.Float(32), {x}))"
@staticmethod
def round(x):
return f"hl.round({x})"
@staticmethod
def floor(x):
return f"hl.floor({x})"
@staticmethod
def int_truediv(a, b):
return f"({a}) / ({b} + hl.f32(0))"
@staticmethod
def floordiv(a, b):
# TODO(jansel): find a better ways to do this, the select-based trick from triton.py didn't work
return (
f"hl.floor(hl.cast(hl.Float(max(32, {a.name}.type().bits())), {a}) / {b})"
)
@classmethod
def sign(cls, x):
left = ops.to_dtype(ops.lt("0", x), torch.int8)
right = ops.to_dtype(ops.lt(x, "0"), torch.int8)
sub = ops.sub(left, right)
return f"hl.cast({x.name}.type(), {sub})"
@staticmethod
def trunc(x):
return f"hl.trunc({x})"
@staticmethod
def truncdiv(a, b):
# this causes crashes with floating point exception, see test_div_zero_dim_cpu
# return f"hl.div_round_to_zero({a}, {b})"
return (
f"hl.trunc(hl.cast(hl.Float(max(32, {a.name}.type().bits())), {a}) / {b})"
)
@staticmethod
def ceil(x):
return f"hl.ceil({x})"
@staticmethod
def relu(x):
return f"hl.max({x}, 0)"
@classmethod
def index_expr(cls, expr, dtype):
index = V.kernel.prepare_indexing(expr)
var = V.kernel.genfunc(
V.kernel.index_to_str(index),
V.kernel.used_dims_from_index(index),
bounds=get_bounds_index_expr(expr),
)
if dtype not in (torch.int32, torch.int64):
return ops.to_dtype(var, dtype)
return var
@classmethod
def indirect_indexing(cls, index_var, size, check=True, wrap_neg=True):
# TODO(jansel): Halide only supports 32-bit indexing, we should error on overflow
index_var = ops.to_dtype(index_var, torch.int32)
index_var = ops.halide_clamp(index_var, size, check)
index_var.indirect_indexing_size = size
return sympy_index_symbol(str(index_var))
@classmethod
def halide_clamp(cls, value, size, check):
end = V.kernel.kexpr(V.kernel.rename_indexing(size) - 1)
if not isinstance(size, (int, sympy.Integer)):
end = f"hl.cast({value.name}.type(), {end})"
# Skip unsafe_promise_clamped to workaround: https://github.com/halide/Halide/issues/8261#issuecomment-2148835692
# return f"hl.unsafe_promise_clamped({value}, 0, {end})"
return f"hl.clamp({value}, 0, {end})"
@staticmethod
def masked(mask, body, other):
with V.kernel.mask_loads(mask, other) as new_mask:
result = body()
if result.bounds.is_bool:
other = bool(other)
# Take dtype from result to prevent accidental promotion
other = V.kernel.genfunc(
f"hl.cast({result.name}.type(), {halide_constant(other)})",
[],
bounds=ValueRanges.wrap(other),
shape=result.shape,
)
# TODO(jansel): look into removing the where in the same places triton does
return ops.where(new_mask, result, other)
@staticmethod
def frexp(x):
raise NotImplementedError("frexp")
@staticmethod
def device_assert_async(cond, msg):
raise NotImplementedError("device_assert_async")
@staticmethod
# pyrefly: ignore [bad-override]
def partial_accumulate(
name: str,
reduction_type: str,
value: CSEVariable,
extra_meta: dict[str, Any],
) -> None:
raise NotImplementedError
HalideOverrides._initialize_pointwise_overrides("halide")
| HalideOverrides |
python | getsentry__sentry | src/sentry/core/endpoints/scim/utils.py | {
"start": 4598,
"end": 4875
} | class ____(OrganizationSCIMPermission):
scope_map = {
"GET": ["team:read", "team:write", "team:admin"],
"POST": ["team:write", "team:admin"],
"PATCH": ["team:write", "team:admin"],
"DELETE": ["team:admin"],
}
| OrganizationSCIMTeamPermission |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_offsets.py | {
"start": 20596,
"end": 26683
} | class ____:
def setup_method(self):
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
@pytest.mark.parametrize("kwd", sorted(liboffsets._relativedelta_kwds))
def test_constructor(self, kwd, request):
if kwd == "millisecond":
request.applymarker(
pytest.mark.xfail(
raises=NotImplementedError,
reason="Constructing DateOffset object with `millisecond` is not "
"yet supported.",
)
)
offset = DateOffset(**{kwd: 2})
assert offset.kwds == {kwd: 2}
assert getattr(offset, kwd) == 2
def test_default_constructor(self, dt):
assert (dt + DateOffset(2)) == datetime(2008, 1, 4)
def test_copy(self):
assert DateOffset(months=2).copy() == DateOffset(months=2)
assert DateOffset(milliseconds=1).copy() == DateOffset(milliseconds=1)
@pytest.mark.parametrize(
"arithmatic_offset_type, expected",
zip(
_ARITHMETIC_DATE_OFFSET,
[
"2009-01-02",
"2008-02-02",
"2008-01-09",
"2008-01-03",
"2008-01-02 01:00:00",
"2008-01-02 00:01:00",
"2008-01-02 00:00:01",
"2008-01-02 00:00:00.001000000",
"2008-01-02 00:00:00.000001000",
],
strict=True,
),
)
def test_add(self, arithmatic_offset_type, expected, dt):
assert DateOffset(**{arithmatic_offset_type: 1}) + dt == Timestamp(expected)
assert dt + DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)
@pytest.mark.parametrize(
"arithmatic_offset_type, expected",
zip(
_ARITHMETIC_DATE_OFFSET,
[
"2007-01-02",
"2007-12-02",
"2007-12-26",
"2008-01-01",
"2008-01-01 23:00:00",
"2008-01-01 23:59:00",
"2008-01-01 23:59:59",
"2008-01-01 23:59:59.999000000",
"2008-01-01 23:59:59.999999000",
],
strict=True,
),
)
def test_sub(self, arithmatic_offset_type, expected, dt):
assert dt - DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)
with pytest.raises(TypeError, match="Cannot subtract datetime from offset"):
DateOffset(**{arithmatic_offset_type: 1}) - dt
@pytest.mark.parametrize(
"arithmatic_offset_type, n, expected",
zip(
_ARITHMETIC_DATE_OFFSET,
range(1, 10),
[
"2009-01-02",
"2008-03-02",
"2008-01-23",
"2008-01-06",
"2008-01-02 05:00:00",
"2008-01-02 00:06:00",
"2008-01-02 00:00:07",
"2008-01-02 00:00:00.008000000",
"2008-01-02 00:00:00.000009000",
],
strict=True,
),
)
def test_mul_add(self, arithmatic_offset_type, n, expected, dt):
assert DateOffset(**{arithmatic_offset_type: 1}) * n + dt == Timestamp(expected)
assert n * DateOffset(**{arithmatic_offset_type: 1}) + dt == Timestamp(expected)
assert dt + DateOffset(**{arithmatic_offset_type: 1}) * n == Timestamp(expected)
assert dt + n * DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)
@pytest.mark.parametrize(
"arithmatic_offset_type, n, expected",
zip(
_ARITHMETIC_DATE_OFFSET,
range(1, 10),
[
"2007-01-02",
"2007-11-02",
"2007-12-12",
"2007-12-29",
"2008-01-01 19:00:00",
"2008-01-01 23:54:00",
"2008-01-01 23:59:53",
"2008-01-01 23:59:59.992000000",
"2008-01-01 23:59:59.999991000",
],
strict=True,
),
)
def test_mul_sub(self, arithmatic_offset_type, n, expected, dt):
assert dt - DateOffset(**{arithmatic_offset_type: 1}) * n == Timestamp(expected)
assert dt - n * DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)
def test_leap_year(self):
d = datetime(2008, 1, 31)
assert (d + DateOffset(months=1)) == datetime(2008, 2, 29)
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
assert DateOffset(milliseconds=3) != DateOffset(milliseconds=7)
@pytest.mark.parametrize(
"offset_kwargs, expected_arg",
[
({"microseconds": 1, "milliseconds": 1}, "2022-01-01 00:00:00.001001"),
({"seconds": 1, "milliseconds": 1}, "2022-01-01 00:00:01.001"),
({"minutes": 1, "milliseconds": 1}, "2022-01-01 00:01:00.001"),
({"hours": 1, "milliseconds": 1}, "2022-01-01 01:00:00.001"),
({"days": 1, "milliseconds": 1}, "2022-01-02 00:00:00.001"),
({"weeks": 1, "milliseconds": 1}, "2022-01-08 00:00:00.001"),
({"months": 1, "milliseconds": 1}, "2022-02-01 00:00:00.001"),
({"years": 1, "milliseconds": 1}, "2023-01-01 00:00:00.001"),
],
)
def test_milliseconds_combination(self, offset_kwargs, expected_arg):
# GH 49897
offset = DateOffset(**offset_kwargs)
ts = Timestamp("2022-01-01")
result = ts + offset
expected = Timestamp(expected_arg)
assert result == expected
def test_offset_invalid_arguments(self):
msg = "^Invalid argument/s or bad combination of arguments"
with pytest.raises(ValueError, match=msg):
DateOffset(picoseconds=1)
| TestDateOffset |
python | joblib__joblib | joblib/externals/loky/backend/context.py | {
"start": 13200,
"end": 14280
} | class ____(LokyContext):
"""Extra context with LokyProcess, which does load the main module
This context is used for compatibility in the case ``cloudpickle`` is not
present on the running system. This permits to load functions defined in
the ``main`` module, using proper safeguards. The declaration of the
``executor`` should be protected by ``if __name__ == "__main__":`` and the
functions and variable used from main should be out of this block.
This mimics the default behavior of multiprocessing under Windows and the
behavior of the ``spawn`` start method on a posix system.
For more details, see the end of the following section of python doc
https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
"""
_name = "loky_init_main"
Process = LokyInitMainProcess
# Register loky context so it works with multiprocessing.get_context
ctx_loky = LokyContext()
mp.context._concrete_contexts["loky"] = ctx_loky
mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
| LokyInitMainContext |
python | ray-project__ray | python/ray/autoscaler/_private/prom_metrics.py | {
"start": 30,
"end": 11817
} | class ____:
"""Mock metric class to be used in case of prometheus_client import error."""
def set(self, *args, **kwargs):
pass
def observe(self, *args, **kwargs):
pass
def inc(self, *args, **kwargs):
pass
def labels(self, *args, **kwargs):
return self
def clear(self):
pass
try:
from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram
# The metrics in this class should be kept in sync with
# python/ray/tests/test_metrics_agent.py
class AutoscalerPrometheusMetrics:
def __init__(
self, session_name: str = None, registry: Optional[CollectorRegistry] = None
):
self.registry: CollectorRegistry = registry or CollectorRegistry(
auto_describe=True
)
self._session_name = session_name
# Buckets: 5 seconds, 10 seconds, 20 seconds, 30 seconds,
# 45 seconds, 1 minute, 1.5 minutes, 2 minutes,
# 3 minutes, 4 minutes, 5 minutes, 6 minutes,
# 8 minutes, 10 minutes, 12 minutes, 15 minutes
# 20 minutes, 25 minutes, 30 minutes
# used for both worker launch time and worker update time
histogram_buckets = [
5,
10,
20,
30,
45,
60,
90,
120,
180,
240,
300,
360,
480,
600,
720,
900,
1200,
1500,
1800,
]
# Buckets: .01 seconds to 1000 seconds.
# Used for autoscaler update time.
update_time_buckets = [0.01, 0.1, 1, 10, 100, 1000]
self.worker_create_node_time: Histogram = Histogram(
"worker_create_node_time_seconds",
"Worker launch time. This is the time it takes for a call to "
"a node provider's create_node method to return. Note that "
"when nodes are launched in batches, the launch time for that "
"batch will be observed once for *each* node in that batch. "
"For example, if 8 nodes are launched in 3 minutes, a launch "
"time of 3 minutes will be observed 8 times.",
labelnames=("SessionName",),
unit="seconds",
namespace="autoscaler",
registry=self.registry,
buckets=histogram_buckets,
).labels(SessionName=session_name)
self.worker_update_time: Histogram = Histogram(
"worker_update_time_seconds",
"Worker update time. This is the time between when an updater "
"thread begins executing and when it exits successfully. This "
"metric only observes times for successful updates.",
labelnames=("SessionName",),
unit="seconds",
namespace="autoscaler",
registry=self.registry,
buckets=histogram_buckets,
).labels(SessionName=session_name)
self.update_time: Histogram = Histogram(
"update_time",
"Autoscaler update time. This is the time for an autoscaler "
"update iteration to complete.",
labelnames=("SessionName",),
unit="seconds",
namespace="autoscaler",
registry=self.registry,
buckets=update_time_buckets,
).labels(SessionName=session_name)
self.pending_nodes: Gauge = Gauge(
"pending_nodes",
"Number of nodes pending to be started.",
labelnames=(
"NodeType",
"SessionName",
),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
)
self.active_nodes: Gauge = Gauge(
"active_nodes",
"Number of nodes in the cluster.",
labelnames=(
"NodeType",
"SessionName",
),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
)
self.recently_failed_nodes = Gauge(
"recently_failed_nodes",
"The number of recently failed nodes. This count could reset "
"at undefined times.",
labelnames=(
"NodeType",
"SessionName",
),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
)
self.started_nodes: Counter = Counter(
"started_nodes",
"Number of nodes started.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.stopped_nodes: Counter = Counter(
"stopped_nodes",
"Number of nodes stopped.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.updating_nodes: Gauge = Gauge(
"updating_nodes",
"Number of nodes in the process of updating.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.recovering_nodes: Gauge = Gauge(
"recovering_nodes",
"Number of nodes in the process of recovering.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.running_workers: Gauge = Gauge(
"running_workers",
"Number of worker nodes running.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.failed_create_nodes: Counter = Counter(
"failed_create_nodes",
"Number of nodes that failed to be created due to an "
"exception in the node provider's create_node method.",
labelnames=("SessionName",),
unit="nodes",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.failed_updates: Counter = Counter(
"failed_updates",
"Number of failed worker node updates.",
labelnames=("SessionName",),
unit="updates",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.successful_updates: Counter = Counter(
"successful_updates",
"Number of succesfful worker node updates.",
labelnames=("SessionName",),
unit="updates",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.failed_recoveries: Counter = Counter(
"failed_recoveries",
"Number of failed node recoveries.",
labelnames=("SessionName",),
unit="recoveries",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.successful_recoveries: Counter = Counter(
"successful_recoveries",
"Number of successful node recoveries.",
labelnames=("SessionName",),
unit="recoveries",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.update_loop_exceptions: Counter = Counter(
"update_loop_exceptions",
"Number of exceptions raised in the update loop of the autoscaler.",
labelnames=("SessionName",),
unit="exceptions",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.node_launch_exceptions: Counter = Counter(
"node_launch_exceptions",
"Number of exceptions raised while launching nodes.",
labelnames=("SessionName",),
unit="exceptions",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.reset_exceptions: Counter = Counter(
"reset_exceptions",
"Number of exceptions raised while resetting the autoscaler.",
labelnames=("SessionName",),
unit="exceptions",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.config_validation_exceptions: Counter = Counter(
"config_validation_exceptions",
"Number of exceptions raised while validating the config "
"during a reset.",
labelnames=("SessionName",),
unit="exceptions",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
self.drain_node_exceptions: Counter = Counter(
"drain_node_exceptions",
"Number of exceptions raised when making a DrainNode rpc"
"prior to node termination.",
labelnames=("SessionName",),
unit="exceptions",
namespace="autoscaler",
registry=self.registry,
).labels(SessionName=session_name)
# This represents the autoscaler's view of essentially
# `ray.cluster_resources()`, it may be slightly different from the
# core metric from an eventual consistency perspective.
self.cluster_resources: Gauge = Gauge(
"cluster_resources",
"Total logical resources in the cluster.",
labelnames=("resource", "SessionName"),
unit="resources",
namespace="autoscaler",
registry=self.registry,
)
# This represents the pending launches + nodes being set up for the
# autoscaler.
self.pending_resources: Gauge = Gauge(
"pending_resources",
"Pending logical resources in the cluster.",
labelnames=("resource", "SessionName"),
unit="resources",
namespace="autoscaler",
registry=self.registry,
)
@property
def session_name(self):
return self._session_name
except ImportError:
class AutoscalerPrometheusMetrics(object):
def __init__(self, session_name: str = None):
pass
def __getattr__(self, attr):
return NullMetric()
| NullMetric |
python | pypa__warehouse | warehouse/admin/views/organizations.py | {
"start": 1627,
"end": 2081
} | class ____(wtforms.Form):
username = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(message="Specify username"),
]
)
role_name = wtforms.SelectField(
choices=[(role.value, role.value) for role in OrganizationRoleType],
coerce=OrganizationRoleType,
validators=[
wtforms.validators.InputRequired(message="Select a role"),
],
)
| AddOrganizationRoleForm |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/functional_utils.py | {
"start": 14217,
"end": 15696
} | class ____:
"""
This should be equal whenever has_same_metadata would return True
"""
size: tuple[SymIntEqByExpr, ...]
layout: torch.layout
is_sparse: bool
# these are empty when is_sparse
stride: tuple[SymIntEqByExpr, ...] | None
storage_offset: SymIntEqByExpr | None
is_conj: bool
is_neg: bool
@staticmethod
def make(t):
is_sparse = is_sparse_any(t)
return MetadataKey(
size=tuple(SymIntEqByExpr(s) for s in t.size()),
layout=t.layout,
is_sparse=is_sparse,
stride=None if is_sparse else tuple(SymIntEqByExpr(s) for s in t.stride()),
storage_offset=None if is_sparse else SymIntEqByExpr(t.storage_offset()),
is_conj=t.is_conj(),
is_neg=t.is_neg(),
)
# ViewMeta sequence wrapper for equality comparisons.
#
# Even though we can compare each ViewMeta instance, we compare the resulting
# tensor metadata, instead. That's because the creation of synthetic bases + the
# re-generation of input views might end-up creating a different sequence of
# ViewMeta that is semantically equivalent. i.e. gets to a tensor with the same
# metadata.
#
# Therefore, we store what the end result should look like as serializable
# metadata.
#
# When logging, this class should look like:
#
# ViewMetaSequence(view, select_int, slice_Tensor)
#
# i.e. a parenthesized list of view operations within that ViewMeta sequence.
| MetadataKey |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/vertex_ai.py | {
"start": 1667,
"end": 4403
} | class ____(BaseTrigger):
"""
Base class for Vertex AI job triggers.
This trigger polls the Vertex AI job and checks its status.
In order to use it properly, you must:
- implement the following methods `_wait_job()`.
- override required `job_type_verbose_name` attribute to provide meaningful message describing your
job type.
- override required `job_serializer_class` attribute to provide proto.Message class that will be used
to serialize your job with `to_dict()` class method.
"""
job_type_verbose_name: str = "Vertex AI Job"
job_serializer_class: Message = None
statuses_success = {
JobState.JOB_STATE_PAUSED,
JobState.JOB_STATE_SUCCEEDED,
}
def __init__(
self,
conn_id: str,
project_id: str,
location: str,
job_id: str,
poll_interval: int,
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.conn_id = conn_id
self.project_id = project_id
self.location = location
self.job_id = job_id
self.poll_interval = poll_interval
self.impersonation_chain = impersonation_chain
self.trigger_class_path = (
f"airflow.providers.google.cloud.triggers.vertex_ai.{self.__class__.__name__}"
)
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
self.trigger_class_path,
{
"conn_id": self.conn_id,
"project_id": self.project_id,
"location": self.location,
"job_id": self.job_id,
"poll_interval": self.poll_interval,
"impersonation_chain": self.impersonation_chain,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
try:
job = await self._wait_job()
except AirflowException as ex:
yield TriggerEvent(
{
"status": "error",
"message": str(ex),
}
)
return
status = "success" if job.state in self.statuses_success else "error"
message = f"{self.job_type_verbose_name} {job.name} completed with status {job.state.name}"
yield TriggerEvent(
{
"status": status,
"message": message,
"job": self._serialize_job(job),
}
)
async def _wait_job(self) -> Any:
"""Awaits a Vertex AI job instance for a status examination."""
raise NotImplementedError
def _serialize_job(self, job: Any) -> Any:
return self.job_serializer_class.to_dict(job)
| BaseVertexAIJobTrigger |
python | huggingface__transformers | src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py | {
"start": 10115,
"end": 11753
} | class ____(DINOv3ConvNextPreTrainedModel, BackboneMixin):
config: DINOv3ConvNextConfig
def __init__(self, config: DINOv3ConvNextConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.num_channels] + list(config.hidden_sizes)
self.stages = nn.ModuleList([DINOv3ConvNextStage(config, s) for s in range(config.num_stages)])
self.post_init()
def get_input_embeddings(self):
return None
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> BackboneOutput:
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
hidden_states = pixel_values
all_hidden_states: list[torch.Tensor] = [hidden_states]
for stage in self.stages:
hidden_states = stage(hidden_states)
all_hidden_states.append(hidden_states)
# hidden_states are already in NCHW (batch_size, channels, height, width) format
feature_maps: list[torch.Tensor] = []
for stage, hidden_states in zip(self.stage_names, all_hidden_states):
if stage in self.out_features:
feature_maps.append(hidden_states)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=tuple(all_hidden_states) if output_hidden_states else None,
)
__all__ = ["DINOv3ConvNextModel", "DINOv3ConvNextPreTrainedModel", "DINOv3ConvNextBackbone"]
| DINOv3ConvNextBackbone |
python | great-expectations__great_expectations | docs/docusaurus/docs/reference/learn/data_quality_use_cases/freshness_resources/freshness_workflow.py | {
"start": 1328,
"end": 2183
} | class ____(gxe.ExpectColumnMaxToBeBetween):
"""Custom Expectation class to validate the freshness of sensor readings in the database."""
column: str = "created_at"
min_value: datetime.datetime = datetime.datetime.now() - datetime.timedelta(
minutes=5
)
description: str = "New sensor readings should have arrived in the database within the last 5 minutes."
# Validate the sample data with the custom freshness Expectation.
validation_result = batch.validate(ExpectSensorDataToBeFresh())
print(f"Freshness check passed: {validation_result['success']}")
print(f"Most recent reading timestamp: {validation_result['result']['observed_value']}")
# </snippet>
assert validation_result["success"] is False
assert validation_result["result"]["observed_value"] == datetime.datetime(
2024, 11, 22, 14, 49
)
| ExpectSensorDataToBeFresh |
python | django__django | tests/queries/tests.py | {
"start": 119649,
"end": 120477
} | class ____(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name="pk")
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(
Related.objects.filter(custom__isnull=False), [notnull]
)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
| IsNullTests |
python | numpy__numpy | numpy/f2py/tests/test_data.py | {
"start": 2523,
"end": 2895
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")]
# For gh-23276
def test_data_stmts(self):
assert len(self.module.mycom.mytab) == 3
assert self.module.mycom.mytab[0] == 0
assert self.module.mycom.mytab[1] == 4
assert self.module.mycom.mytab[2] == 0
| TestDataWithCommentsF77 |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/take_while_test.py | {
"start": 5194,
"end": 6270
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, num_elements, upper_bound, options=None):
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.take_while(predicate=lambda x: x < upper_bound)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True]),
combinations.combine(num_elements=[10, 23], upper_bound=[10, 23])))
def test(self, verify_fn, symbolic_checkpoint, num_elements, upper_bound):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(self,
lambda: self._build_dataset(num_elements, upper_bound, options),
min(num_elements, upper_bound))
if __name__ == "__main__":
test.main()
| TakeWhileCheckpointTest |
python | huggingface__transformers | src/transformers/models/esm/modeling_esmfold.py | {
"start": 39170,
"end": 40215
} | class ____(nn.Module):
def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
super().__init__()
self.layernorm = nn.LayerNorm(sequence_state_dim)
self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
init.zeros_(self.proj.bias)
init.zeros_(self.o_proj.bias)
def forward(self, sequence_state):
"""
Inputs:
sequence_state: B x L x sequence_state_dim
Output:
pairwise_state: B x L x L x pairwise_state_dim
Intermediate state:
B x L x L x 2*inner_dim
"""
assert len(sequence_state.shape) == 3
s = self.layernorm(sequence_state)
s = self.proj(s)
q, k = s.chunk(2, dim=-1)
prod = q[:, None, :, :] * k[:, :, None, :]
diff = q[:, None, :, :] - k[:, :, None, :]
x = torch.cat([prod, diff], dim=-1)
x = self.o_proj(x)
return x
| EsmFoldSequenceToPair |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/config.py | {
"start": 4089,
"end": 7303
} | class ____(graphene.Interface):
message = graphene.NonNull(graphene.String)
path = non_null_list(graphene.String)
stack = graphene.NonNull(GrapheneEvaluationStack)
reason = graphene.NonNull(GrapheneEvaluationErrorReason)
class Meta:
name = "PipelineConfigValidationError" # back-compat
@staticmethod
def from_dagster_error(
get_config_type: Callable[[str], ConfigTypeSnap], error: DagsterEvaluationError
):
check.inst_param(error, "error", DagsterEvaluationError)
if isinstance(error.error_data, RuntimeMismatchErrorData):
return GrapheneRuntimeMismatchConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
value_rep=error.error_data.value_rep,
)
elif isinstance(error.error_data, MissingFieldErrorData):
return GrapheneMissingFieldConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
field=GrapheneConfigTypeField(
get_config_type,
field_snap=error.error_data.field_snap,
),
)
elif isinstance(error.error_data, MissingFieldsErrorData):
return GrapheneMissingFieldsConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
fields=[
GrapheneConfigTypeField(
get_config_type,
field_snap=field_snap,
)
for field_snap in error.error_data.field_snaps
],
)
elif isinstance(error.error_data, FieldNotDefinedErrorData):
return GrapheneFieldNotDefinedConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
field_name=error.error_data.field_name,
)
elif isinstance(error.error_data, FieldsNotDefinedErrorData):
return GrapheneFieldsNotDefinedConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
field_names=error.error_data.field_names,
)
elif isinstance(error.error_data, SelectorTypeErrorData):
return GrapheneSelectorTypeConfigError(
message=error.message,
path=[], # TODO: remove
stack=GrapheneEvaluationStack(error.stack),
reason=error.reason.value,
incoming_fields=error.error_data.incoming_fields,
)
else:
check.failed(f"Error type not supported {error.error_data!r}")
| GrapheneConfigValidationError |
python | huggingface__transformers | src/transformers/models/pvt_v2/modeling_pvt_v2.py | {
"start": 2546,
"end": 3671
} | class ____(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, config: PvtV2Config, layer_idx: int):
super().__init__()
patch_size = config.patch_sizes[layer_idx]
patch_size = (patch_size, patch_size) if isinstance(patch_size, int) else patch_size
stride = config.strides[layer_idx]
num_channels = config.num_channels if layer_idx == 0 else config.hidden_sizes[layer_idx - 1]
hidden_size = config.hidden_sizes[layer_idx]
self.patch_size = patch_size
self.proj = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=patch_size,
stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2),
)
self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, pixel_values):
embeddings = self.proj(pixel_values)
_, _, height, width = embeddings.shape
embeddings = embeddings.flatten(2).transpose(1, 2)
embeddings = self.layer_norm(embeddings)
return embeddings, height, width
| PvtV2OverlapPatchEmbeddings |
python | huggingface__transformers | tests/models/prophetnet/test_tokenization_prophetnet.py | {
"start": 1084,
"end": 7893
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/prophetnet-large-uncased"
tokenizer_class = ProphetNetTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00e9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00e9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535a\u63a8zz"), ["ah", "\u535a", "\u63a8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["h\u00e9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
@require_torch
def test_prepare_batch(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
batch = tokenizer(src_text, padding=True, return_tensors="pt")
self.assertIsInstance(batch, BatchEncoding)
result = list(batch.input_ids.numpy()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00a0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_2 + [102]
| ProphetNetTokenizationTest |
python | jupyterlab__jupyterlab | jupyterlab/labapp.py | {
"start": 7679,
"end": 8983
} | class ____(JupyterApp):
version = version
description = """
Clean the JupyterLab application
This will clean the app directory by removing the `staging` directories.
Optionally, the `extensions`, `settings`, and/or `static` directories,
or the entire contents of the app directory, can also be removed.
"""
aliases = clean_aliases
flags = clean_flags
# Not configurable!
core_config = Instance(CoreConfig, allow_none=True)
app_dir = Unicode("", config=True, help="The app directory to clean")
extensions = Bool(False, config=True, help=f"Also delete <app-dir>/extensions.\n{ext_warn_msg}")
settings = Bool(False, config=True, help="Also delete <app-dir>/settings")
static = Bool(False, config=True, help="Also delete <app-dir>/static")
all = Bool(
False,
config=True,
help=f"Delete the entire contents of the app directory.\n{ext_warn_msg}",
)
def start(self):
app_options = LabCleanAppOptions(
logger=self.log,
core_config=self.core_config,
app_dir=self.app_dir,
extensions=self.extensions,
settings=self.settings,
static=self.static,
all=self.all,
)
clean(app_options=app_options)
| LabCleanApp |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/gaussian_process.py | {
"start": 370,
"end": 2850
} | class ____(AutoSklearnRegressionAlgorithm):
def __init__(self, alpha, thetaL, thetaU, random_state=None):
self.alpha = alpha
self.thetaL = thetaL
self.thetaU = thetaU
self.random_state = random_state
self.estimator = None
def fit(self, X, y):
import sklearn.gaussian_process
self.alpha = float(self.alpha)
self.thetaL = float(self.thetaL)
self.thetaU = float(self.thetaU)
n_features = X.shape[1]
kernel = sklearn.gaussian_process.kernels.RBF(
length_scale=[1.0] * n_features,
length_scale_bounds=[(self.thetaL, self.thetaU)] * n_features,
)
# Instanciate a Gaussian Process model
self.estimator = sklearn.gaussian_process.GaussianProcessRegressor(
kernel=kernel,
n_restarts_optimizer=10,
optimizer="fmin_l_bfgs_b",
alpha=self.alpha,
copy_X_train=True,
random_state=self.random_state,
normalize_y=True,
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "GP",
"name": "Gaussian Process",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
alpha = UniformFloatHyperparameter(
name="alpha", lower=1e-14, upper=1.0, default_value=1e-8, log=True
)
thetaL = UniformFloatHyperparameter(
name="thetaL", lower=1e-10, upper=1e-3, default_value=1e-6, log=True
)
thetaU = UniformFloatHyperparameter(
name="thetaU", lower=1.0, upper=100000, default_value=100000.0, log=True
)
cs = ConfigurationSpace()
cs.add_hyperparameters([alpha, thetaL, thetaU])
return cs
| GaussianProcess |
python | weaviate__weaviate-python-client | weaviate/proto/v1/v4216/v1/file_replication_pb2_grpc.py | {
"start": 2198,
"end": 5815
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def PauseFileActivity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResumeFileActivity(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFiles(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFileMetadata(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFile(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FileReplicationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'PauseFileActivity': grpc.unary_unary_rpc_method_handler(
servicer.PauseFileActivity,
request_deserializer=v1_dot_file__replication__pb2.PauseFileActivityRequest.FromString,
response_serializer=v1_dot_file__replication__pb2.PauseFileActivityResponse.SerializeToString,
),
'ResumeFileActivity': grpc.unary_unary_rpc_method_handler(
servicer.ResumeFileActivity,
request_deserializer=v1_dot_file__replication__pb2.ResumeFileActivityRequest.FromString,
response_serializer=v1_dot_file__replication__pb2.ResumeFileActivityResponse.SerializeToString,
),
'ListFiles': grpc.unary_unary_rpc_method_handler(
servicer.ListFiles,
request_deserializer=v1_dot_file__replication__pb2.ListFilesRequest.FromString,
response_serializer=v1_dot_file__replication__pb2.ListFilesResponse.SerializeToString,
),
'GetFileMetadata': grpc.stream_stream_rpc_method_handler(
servicer.GetFileMetadata,
request_deserializer=v1_dot_file__replication__pb2.GetFileMetadataRequest.FromString,
response_serializer=v1_dot_file__replication__pb2.FileMetadata.SerializeToString,
),
'GetFile': grpc.stream_stream_rpc_method_handler(
servicer.GetFile,
request_deserializer=v1_dot_file__replication__pb2.GetFileRequest.FromString,
response_serializer=v1_dot_file__replication__pb2.FileChunk.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'weaviate.v1.FileReplicationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| FileReplicationServiceServicer |
python | huggingface__transformers | tests/cli/test_serve.py | {
"start": 30722,
"end": 32788
} | class ____:
"""
Mixin class for the Completions API tests, to seamlessly replicate tests across the two versions of the API
(`generate` and `continuous_batching`).
"""
@retry
def run_server(self, request):
client = OpenAI(base_url=f"http://localhost:{self.port}/v1", api_key="<KEY>")
stream = client.responses.create(**request)
all_payloads = []
for payload in stream:
all_payloads.append(payload)
return all_payloads
def test_request(self):
"""Tests that an inference using the Responses API works"""
request = {
"model": "Qwen/Qwen2.5-0.5B-Instruct",
"instructions": "You are a helpful assistant.",
"input": "Hello!",
"stream": True,
"max_output_tokens": 1,
}
all_payloads = self.run_server(request)
# Allow variable number of delta events depending on tokenizer/streamer behavior
self.assertGreaterEqual(len(all_payloads), 8)
# Start markers
self.assertIsInstance(all_payloads[0], ResponseCreatedEvent)
self.assertIsInstance(all_payloads[1], ResponseInProgressEvent)
self.assertIsInstance(all_payloads[2], ResponseOutputItemAddedEvent)
self.assertIsInstance(all_payloads[3], ResponseContentPartAddedEvent)
# At least one delta event during streaming
self.assertTrue(any(isinstance(p, ResponseTextDeltaEvent) for p in all_payloads[4:-4]))
# Closing markers
self.assertIsInstance(all_payloads[-4], ResponseTextDoneEvent)
self.assertIsInstance(all_payloads[-3], ResponseContentPartDoneEvent)
self.assertIsInstance(all_payloads[-2], ResponseOutputItemDoneEvent)
self.assertIsInstance(all_payloads[-1], ResponseCompletedEvent)
# TODO: one test for each request flag, to confirm it is working as expected
# TODO: speed-based test to confirm that KV cache is working across requests
@slow # server startup time is slow on our push CI
@require_openai
| ServeResponsesMixin |
python | getsentry__sentry | src/sentry/issue_detection/detectors/io_main_thread_detector.py | {
"start": 829,
"end": 4321
} | class ____(PerformanceDetector):
SPAN_PREFIX: str # abstract
group_type: type[GroupType] # abstract
def _is_io_on_main_thread(self, span: Span) -> bool:
raise NotImplementedError
def _fingerprint(self, span_list: list[Span]) -> str:
raise NotImplementedError
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.mapper: ProguardMapper | None = None
self.parent_to_blocked_span: dict[str, list[Span]] = defaultdict(list)
def visit_span(self, span: Span) -> None:
if self._is_io_on_main_thread(span) and span.get("op", "").lower().startswith(
self.SPAN_PREFIX
):
parent_span_id = span["parent_span_id"]
self.parent_to_blocked_span[parent_span_id].append(span)
def on_complete(self) -> None:
for parent_span_id, span_list in self.parent_to_blocked_span.items():
span_list = [
span for span in span_list if "start_timestamp" in span and "timestamp" in span
]
total_duration = total_span_time(span_list)
settings_for_span = self.settings_for_span(span_list[0])
if not settings_for_span:
return
_, _, _, _, settings = settings_for_span
if total_duration >= settings["duration_threshold"]:
fingerprint = self._fingerprint(span_list)
offender_spans = [span for span in span_list if "span_id" in span]
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op=span_list[0].get("op", ""),
desc=span_list[0].get("description", ""),
parent_span_ids=[parent_span_id],
type=self.group_type,
cause_span_ids=[],
offender_span_ids=[span["span_id"] for span in offender_spans],
evidence_data={
"op": span_list[0].get("op"),
"parent_span_ids": [parent_span_id],
"cause_span_ids": [],
"offender_span_ids": [
span["span_id"] for span in span_list if "span_id" in span
],
"transaction_name": self._event.get("transaction", ""),
"repeating_spans": get_span_evidence_value(offender_spans[0]),
"repeating_spans_compact": get_span_evidence_value(
offender_spans[0], include_op=False
),
"num_repeating_spans": str(len(offender_spans)),
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
span_list[0].get("op"),
span_list[0].get("description", ""),
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
def is_creation_allowed_for_project(self, project: Project) -> bool:
return self.settings["detection_enabled"]
| BaseIOMainThreadDetector |
python | astropy__astropy | astropy/io/fits/tests/conftest.py | {
"start": 1883,
"end": 6139
} | class ____:
def setup_method(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data")
self.temp_dir = tempfile.mkdtemp(prefix="fits-test-")
self.home_is_data = False
self.home_is_temp = False
self.temp_files_used = set()
self.use_pathlib = False
# Restore global settings to defaults
# TODO: Replace this when there's a better way to in the config API to
# force config values to their defaults
fits.conf.enable_record_valued_keyword_cards = True
fits.conf.extension_name_case_sensitive = False
fits.conf.strip_header_whitespace = True
fits.conf.use_memmap = True
def teardown_method(self):
if self.home_is_temp:
# Verify that no files were written to a literal tilde path
for temp_file, temp_file_no_tilde in self.temp_files_used:
assert not os.path.exists(temp_file)
assert os.path.exists(temp_file_no_tilde)
if hasattr(self, "temp_dir") and os.path.exists(self.temp_dir):
tries = 3
while tries:
try:
shutil.rmtree(self.temp_dir)
break
except OSError:
# Probably couldn't delete the file because for whatever
# reason a handle to it is still open/hasn't been
# garbage-collected
time.sleep(0.5)
tries -= 1
fits.conf.reset("enable_record_valued_keyword_cards")
fits.conf.reset("extension_name_case_sensitive")
fits.conf.reset("strip_header_whitespace")
fits.conf.reset("use_memmap")
def copy_file(self, filename, new_filename=None):
"""Copies a backup of a test data file to the temp dir and sets its
mode to writeable. Can optionally change the file's name.
"""
new_filename = filename if new_filename is None else new_filename
p = self.temp(new_filename)
shutil.copy(
os.path.expanduser(self.data(filename)),
os.path.expanduser(p),
)
os.chmod(os.path.expanduser(p), stat.S_IREAD | stat.S_IWRITE)
return p
def data(self, filename):
"""Returns the path to a test data file."""
if self.home_is_data:
prefix = "~"
else:
prefix = self.data_dir
if self.use_pathlib:
return pathlib.Path(prefix, filename)
return os.path.join(prefix, filename)
def temp(self, filename):
"""Returns the full path to a file in the test temp dir."""
real_target = os.path.join(self.temp_dir, filename)
if self.home_is_temp:
prefix = "~"
# Record the '~' path and the intended path, for use
# in `home_is_temp`
self.temp_files_used.add((os.path.join(prefix, filename), real_target))
else:
prefix = self.temp_dir
if self.use_pathlib:
return pathlib.Path(prefix, filename)
return os.path.join(prefix, filename)
def set_home_as_data(self):
"""
This overrides the HOME environment variable, so that paths beginning
with '~/' expand to the data directory. Used by the `home_is_data`
fixture.
"""
self.home_is_data = True
# For Unix
self.monkeypatch.setenv("HOME", self.data_dir)
# For Windows
self.monkeypatch.setenv("USERPROFILE", self.data_dir)
def set_home_as_temp(self):
"""
This overrides the HOME environment variable, so that paths beginning
with '~/' expand to the temp directory. In conjunction with
self.temp(), temporary files are tracked as they are created, so we can
verify they end up in the temporary directory and not unexpected places
in the filesystem. Used by the `home_is_temp` fixture.
"""
self.home_is_temp = True
# For Unix
self.monkeypatch.setenv("HOME", self.temp_dir)
# For Windows
self.monkeypatch.setenv("USERPROFILE", self.temp_dir)
def set_paths_via_pathlib(self, use_pathlib):
self.use_pathlib = use_pathlib
| FitsTestCase |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/orm_config_constructs.py | {
"start": 350,
"end": 1037
} | class ____(Base):
__tablename__ = "User"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
@validates("name", include_removes=True)
def validate_name(self, name: str) -> str:
"""test #8577"""
return name + "hi"
# test #9536
_password: Mapped[str] = mapped_column("Password", String)
password1: Mapped[str] = column_property(
_password.collate("SQL_Latin1_General_CP1_CS_AS"), deferred=True
)
password2: Mapped[str] = deferred(
_password.collate("SQL_Latin1_General_CP1_CS_AS")
)
password3: Mapped[str] = query_expression(
_password.collate("SQL_Latin1_General_CP1_CS_AS")
)
| User |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 9443,
"end": 11550
} | class ____(SiglipVisionTransformer):
def __init__(self, config: Siglip2VisionConfig):
super().__init__(config)
# Update: add `spatial_shapes` and `attention_mask`
def forward(
self,
pixel_values: torch.FloatTensor,
attention_mask: torch.Tensor,
spatial_shapes: torch.LongTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> BaseModelOutputWithPooling:
r"""
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
Tensor containing the spatial dimensions (height, width) of the input images.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_states = self.embeddings(pixel_values, spatial_shapes)
if attention_mask is not None and self.config._attn_implementation != "flash_attention_2":
# [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
else:
encoder_attention_mask = attention_mask
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooler_output = self.head(last_hidden_state, attention_mask) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| Siglip2VisionTransformer |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 2130,
"end": 2309
} | class ____(DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
pass
| DistutilsSetupError |
python | huggingface__transformers | src/transformers/models/rwkv/modeling_rwkv.py | {
"start": 18826,
"end": 19793
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
state: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
| RwkvCausalLMOutput |
python | cython__cython | tests/run/py3k_super.py | {
"start": 3073,
"end": 3239
} | class ____:
"""
>>> obj = E()
>>> obj.method()().__name__
'E'
"""
def method(self):
def inner(): return __class__
return inner
| E |
python | jazzband__django-waffle | waffle/models.py | {
"start": 699,
"end": 3683
} | class ____(models.Model):
SINGLE_CACHE_KEY = ''
ALL_CACHE_KEY = ''
class Meta:
abstract = True
def __str__(self) -> str:
return self.name
def natural_key(self) -> tuple[str]:
return (self.name,)
@classmethod
def _cache_key(cls, name: str) -> str:
return keyfmt(get_setting(cls.SINGLE_CACHE_KEY), name)
@classmethod
def get(cls: type[_BaseModelType], name: str) -> _BaseModelType:
cache = get_cache()
cache_key = cls._cache_key(name)
cached = cache.get(cache_key)
if cached == CACHE_EMPTY:
return cls(name=name)
if cached:
return cached
try:
obj = cls.get_from_db(name)
except cls.DoesNotExist:
cache.add(cache_key, CACHE_EMPTY)
return cls(name=name)
cache.add(cache_key, obj)
return obj
@classmethod
def get_from_db(cls: type[_BaseModelType], name: str) -> _BaseModelType:
objects = cls.objects
if get_setting('READ_FROM_WRITE_DB'):
objects = objects.using(router.db_for_write(cls))
return objects.get(name=name)
@classmethod
def get_all(cls: type[_BaseModelType]) -> list[_BaseModelType]:
cache = get_cache()
cache_key = get_setting(cls.ALL_CACHE_KEY)
cached = cache.get(cache_key)
if cached == CACHE_EMPTY:
return []
if cached:
return cached
objs = cls.get_all_from_db()
if not objs:
cache.add(cache_key, CACHE_EMPTY)
return []
cache.add(cache_key, objs)
return objs
@classmethod
def get_all_from_db(cls: type[_BaseModelType]) -> list[_BaseModelType]:
objects = cls.objects
if get_setting('READ_FROM_WRITE_DB'):
objects = objects.using(router.db_for_write(cls))
return list(objects.all())
def flush(self) -> None:
cache = get_cache()
keys = [
self._cache_key(self.name),
get_setting(self.ALL_CACHE_KEY),
]
cache.delete_many(keys)
def save(self, *args: Any, **kwargs: Any) -> None:
self.modified = timezone.now()
ret = super().save(*args, **kwargs)
if hasattr(transaction, 'on_commit'):
transaction.on_commit(self.flush)
else:
self.flush()
return ret
def delete(self, *args: Any, **kwargs: Any) -> tuple[int, dict[str, int]]:
ret = super().delete(*args, **kwargs)
if hasattr(transaction, 'on_commit'):
transaction.on_commit(self.flush)
else:
self.flush()
return ret
def set_flag(request: HttpRequest, flag_name: str, active: bool | None = True, session_only: bool = False) -> None:
"""Set a flag value on a request object."""
if not hasattr(request, 'waffles'):
request.waffles = {}
request.waffles[flag_name] = [active, session_only]
| BaseModel |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 3519,
"end": 3604
} | class ____:
def _repr_pretty_(self, p, cycle):
p.text("Dummy1(...)")
| Dummy1 |
python | tensorflow__tensorflow | tensorflow/core/function/polymorphism/function_type_test.py | {
"start": 19900,
"end": 24500
} | class ____(test.TestCase):
def test_same_type(self):
foo_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, trace_type.from_value(1))
])
self.assertEqual(foo_type, foo_type)
self.assertTrue(foo_type.is_supertype_of(foo_type))
self.assertEqual(
foo_type,
foo_type.most_specific_common_subtype([foo_type, foo_type, foo_type]))
def test_unrelated_types(self):
foo_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, trace_type.from_value(1))
])
bar_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, trace_type.from_value(2))
])
self.assertNotEqual(foo_type, bar_type)
self.assertFalse(foo_type.is_supertype_of(bar_type))
self.assertIsNone(
foo_type.most_specific_common_subtype([bar_type, bar_type]))
self.assertIsNone(
foo_type.most_specific_common_subtype([bar_type, foo_type]))
def test_partial_raises_error(self):
foo_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, trace_type.from_value(1)),
])
bar_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, None)
])
self.assertNotEqual(foo_type, bar_type)
with self.assertRaises(TypeError):
foo_type.is_supertype_of(bar_type)
with self.assertRaises(TypeError):
bar_type.is_supertype_of(foo_type)
with self.assertRaises(TypeError):
foo_type.most_specific_common_subtype([bar_type, bar_type])
with self.assertRaises(TypeError):
bar_type.most_specific_common_subtype([foo_type, bar_type])
def test_related_types(self):
class MockAlwaysSuperType(trace.TraceType):
def is_subtype_of(self, other: trace.TraceType) -> bool:
return False
def most_specific_common_supertype(self, others):
return self
def placeholder_value(self, placeholder_context):
raise NotImplementedError
def __eq__(self, other):
return self is other
def __hash__(self):
return 0
supertype = MockAlwaysSuperType()
class MockAlwaysSubtype(trace.TraceType):
def is_subtype_of(self, other) -> bool:
return True
def most_specific_common_supertype(self, others):
return supertype
def placeholder_value(self, placeholder_context):
raise NotImplementedError
def __eq__(self, other):
return self is other
def __hash__(self):
return 1
subtype = MockAlwaysSubtype()
foo_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, supertype),
])
bar_type = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, subtype)
])
self.assertNotEqual(foo_type, bar_type)
self.assertTrue(bar_type.is_supertype_of(foo_type))
self.assertFalse(foo_type.is_supertype_of(bar_type))
self.assertEqual(
foo_type.most_specific_common_subtype([bar_type, foo_type]), foo_type)
self.assertEqual(
bar_type.most_specific_common_subtype([bar_type, foo_type]), foo_type)
def test_placeholder_arg(self):
type_context = trace_type.InternalTracingContext()
foo = function_type.FunctionType([
function_type.Parameter("x", function_type.Parameter.POSITIONAL_ONLY,
False, trace_type.from_value(1, type_context)),
function_type.Parameter("y",
function_type.Parameter.POSITIONAL_OR_KEYWORD,
False, trace_type.from_value(2, type_context)),
function_type.Parameter("z", function_type.Parameter.KEYWORD_ONLY,
False, trace_type.from_value(3, type_context)),
])
context_graph = func_graph.FuncGraph("test")
placeholder_context = trace_type.InternalPlaceholderContext(context_graph)
self.assertEqual(
foo.placeholder_arguments(placeholder_context).args, (1, 2))
self.assertEqual(
foo.placeholder_arguments(placeholder_context).kwargs, {"z": 3})
| TypeHierarchyTest |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_grid.py | {
"start": 9772,
"end": 28347
} | class ____:
def test_should_response_200(self, test_client):
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID}")
assert response.status_code == 200
assert response.json() == [
GRID_RUN_1,
GRID_RUN_2,
]
@pytest.mark.parametrize(
("order_by", "expected"),
[
(
"logical_date",
[
GRID_RUN_1,
GRID_RUN_2,
],
),
(
"-logical_date",
[
GRID_RUN_2,
GRID_RUN_1,
],
),
(
"run_after",
[
GRID_RUN_1,
GRID_RUN_2,
],
),
(
"-run_after",
[
GRID_RUN_2,
GRID_RUN_1,
],
),
],
)
def test_should_response_200_order_by(self, test_client, order_by, expected):
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID}", params={"order_by": order_by})
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize(
("limit", "expected"),
[
(
1,
[GRID_RUN_1],
),
(
2,
[GRID_RUN_1, GRID_RUN_2],
),
],
)
def test_should_response_200_limit(self, test_client, limit, expected):
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID}", params={"limit": limit})
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize(
("params", "expected"),
[
(
{
"run_after_gte": timezone.datetime(2024, 11, 30),
"run_after_lte": timezone.datetime(2024, 11, 30),
},
[GRID_RUN_1, GRID_RUN_2],
),
(
{
"run_after_gte": timezone.datetime(2024, 10, 30),
"run_after_lte": timezone.datetime(2024, 10, 30),
},
[],
),
],
)
def test_runs_should_response_200_date_filters(self, test_client, params, expected):
with assert_queries_count(5):
response = test_client.get(
f"/grid/runs/{DAG_ID}",
params=params,
)
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize(
("params", "expected", "expected_queries_count"),
[
(
{
"run_after_gte": timezone.datetime(2024, 11, 30),
"run_after_lte": timezone.datetime(2024, 11, 30),
},
GRID_NODES,
7,
),
(
{
"run_after_gte": timezone.datetime(2024, 10, 30),
"run_after_lte": timezone.datetime(2024, 10, 30),
},
GRID_NODES,
5,
),
],
)
def test_structure_should_response_200_date_filters(
self, test_client, params, expected, expected_queries_count
):
with assert_queries_count(expected_queries_count):
response = test_client.get(
f"/grid/structure/{DAG_ID}",
params=params,
)
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize("endpoint", ["runs", "structure"])
def test_should_response_401(self, unauthenticated_test_client, endpoint):
response = unauthenticated_test_client.get(f"/grid/{endpoint}/{DAG_ID_3}")
assert response.status_code == 401
@pytest.mark.parametrize("endpoint", ["runs", "structure"])
def test_should_response_403(self, unauthorized_test_client, endpoint):
response = unauthorized_test_client.get(f"/grid/{endpoint}/{DAG_ID_3}")
assert response.status_code == 403
@pytest.mark.parametrize("endpoint", ["runs", "structure"])
def test_should_response_404(self, test_client, endpoint):
response = test_client.get(f"/grid/{endpoint}/invalid_dag")
assert response.status_code == 404
assert response.json() == {"detail": "Dag with id invalid_dag was not found"}
def test_structure_should_response_200_without_dag_run(self, test_client):
with assert_queries_count(5):
response = test_client.get(f"/grid/structure/{DAG_ID_2}")
assert response.status_code == 200
assert response.json() == [{"id": "task2", "label": "task2"}]
def test_runs_should_response_200_without_dag_run(self, test_client):
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID_2}")
assert response.status_code == 200
assert response.json() == []
def test_should_response_200_with_deleted_task_and_taskgroup(self, session, test_client):
# Mark one of the TI of the previous runs as "REMOVED" to simulate clearing an older DagRun.
# https://github.com/apache/airflow/issues/48670
ti = session.scalar(
select(TaskInstance).where(TaskInstance.run_id == "run_3", TaskInstance.task_id == TASK_ID_4)
)
ti.state = TaskInstanceState.REMOVED
ti.dag_version = session.scalar(select(DagModel).where(DagModel.dag_id == DAG_ID_3)).dag_versions[-1]
session.commit()
with assert_queries_count(7):
response = test_client.get(f"/grid/structure/{DAG_ID_3}")
assert response.status_code == 200
assert response.json() == [
{"id": "task3", "label": "task3"},
{"id": "task4", "label": "task4"},
{
"children": [{"id": "task_group.inner_task", "label": "inner_task"}],
"id": "task_group",
"label": "task_group",
},
]
# Also verify that TI summaries include a leaf entry for the removed task
with assert_queries_count(4):
ti_resp = test_client.get(f"/grid/ti_summaries/{DAG_ID_3}/run_3")
assert ti_resp.status_code == 200
ti_payload = ti_resp.json()
assert ti_payload["dag_id"] == DAG_ID_3
assert ti_payload["run_id"] == "run_3"
# Find the removed task summary; it should exist even if not in current serialized DAG structure
removed_ti = next(
(
n
for n in ti_payload["task_instances"]
if n["task_id"] == TASK_ID_4 and n["child_states"] is None
),
None,
)
assert removed_ti is not None
# Its state should be the aggregated state of its TIs, which includes 'removed'
assert removed_ti["state"] in (
"removed",
None,
"skipped",
"success",
"failed",
"running",
"queued",
"scheduled",
"deferred",
"restarting",
"up_for_retry",
"up_for_reschedule",
"upstream_failed",
)
def test_get_dag_structure(self, session, test_client):
session.commit()
with assert_queries_count(7):
response = test_client.get(f"/grid/structure/{DAG_ID}?limit=5")
assert response.status_code == 200
assert response.json() == [
{
"children": [{"id": "mapped_task_group.subtask", "is_mapped": True, "label": "subtask"}],
"id": "mapped_task_group",
"is_mapped": True,
"label": "mapped_task_group",
},
{"id": "task", "label": "A Beautiful Task Name 🚀"},
{
"children": [
{
"children": [
{
"id": "task_group.inner_task_group.inner_task_group_sub_task",
"is_mapped": True,
"label": "Inner Task Group Sub Task Label",
}
],
"id": "task_group.inner_task_group",
"label": "My Inner Task Group",
},
{"id": "task_group.mapped_task", "is_mapped": True, "label": "mapped_task"},
],
"id": "task_group",
"label": "task_group",
},
{"id": "mapped_task_2", "is_mapped": True, "label": "mapped_task_2"},
]
def test_get_grid_runs(self, session, test_client):
session.commit()
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID}?limit=5")
assert response.status_code == 200
assert response.json() == [
{
"dag_id": "test_dag",
"duration": 283996800.0,
"end_date": "2024-12-31T00:00:00Z",
"run_after": "2024-11-30T00:00:00Z",
"run_id": "run_1",
"run_type": "scheduled",
"start_date": "2016-01-01T00:00:00Z",
"state": "success",
},
{
"dag_id": "test_dag",
"duration": 283996800.0,
"end_date": "2024-12-31T00:00:00Z",
"run_after": "2024-11-30T00:00:00Z",
"run_id": "run_2",
"run_type": "manual",
"start_date": "2016-01-01T00:00:00Z",
"state": "failed",
},
]
@pytest.mark.parametrize(
("endpoint", "run_type", "expected"),
[
("runs", "scheduled", [GRID_RUN_1]),
("runs", "manual", [GRID_RUN_2]),
("structure", "scheduled", GRID_NODES),
("structure", "manual", GRID_NODES),
],
)
def test_filter_by_run_type(self, session, test_client, endpoint, run_type, expected):
session.commit()
response = test_client.get(f"/grid/{endpoint}/{DAG_ID}?run_type={run_type}")
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize(
("endpoint", "triggering_user", "expected"),
[
("runs", "user2", [GRID_RUN_2]),
("runs", "nonexistent", []),
("structure", "user2", GRID_NODES),
],
)
def test_filter_by_triggering_user(self, session, test_client, endpoint, triggering_user, expected):
session.commit()
response = test_client.get(f"/grid/{endpoint}/{DAG_ID}?triggering_user={triggering_user}")
assert response.status_code == 200
assert response.json() == expected
def test_get_grid_runs_filter_by_run_type_and_triggering_user(self, session, test_client):
session.commit()
with assert_queries_count(5):
response = test_client.get(f"/grid/runs/{DAG_ID}?run_type=manual&triggering_user=user2")
assert response.status_code == 200
assert response.json() == [GRID_RUN_2]
@pytest.mark.parametrize(
("endpoint", "state", "expected"),
[
("runs", "success", [GRID_RUN_1]),
("runs", "failed", [GRID_RUN_2]),
("runs", "running", []),
("structure", "success", GRID_NODES),
("structure", "failed", GRID_NODES),
],
)
def test_filter_by_state(self, session, test_client, endpoint, state, expected):
session.commit()
response = test_client.get(f"/grid/{endpoint}/{DAG_ID}?state={state}")
assert response.status_code == 200
assert response.json() == expected
def test_grid_ti_summaries_group(self, session, test_client):
run_id = "run_4-1"
session.commit()
with assert_queries_count(4):
response = test_client.get(f"/grid/ti_summaries/{DAG_ID_4}/{run_id}")
assert response.status_code == 200
actual = response.json()
expected = {
"dag_id": "test_dag_4",
"run_id": "run_4-1",
"task_instances": [
{
"state": "success",
"task_id": "t1",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"state": "success",
"task_id": "t2",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"state": "success",
"task_id": "t7",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"child_states": {"success": 4},
"max_end_date": "2025-03-02T00:00:12Z",
"min_start_date": "2025-03-02T00:00:04Z",
"state": "success",
"task_id": "task_group-1",
},
{
"state": "success",
"task_id": "task_group-1.t6",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"child_states": {"success": 3},
"max_end_date": "2025-03-02T00:00:12Z",
"min_start_date": "2025-03-02T00:00:06Z",
"state": "success",
"task_id": "task_group-1.task_group-2",
},
{
"state": "success",
"task_id": "task_group-1.task_group-2.t3",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"state": "success",
"task_id": "task_group-1.task_group-2.t4",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"state": "success",
"task_id": "task_group-1.task_group-2.t5",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
],
}
for obj in actual, expected:
tis = obj["task_instances"]
tis[:] = sorted(tis, key=lambda x: x["task_id"])
assert actual == expected
def test_grid_ti_summaries_mapped(self, session, test_client):
run_id = "run_2"
session.commit()
with assert_queries_count(4):
response = test_client.get(f"/grid/ti_summaries/{DAG_ID}/{run_id}")
assert response.status_code == 200
data = response.json()
actual = data["task_instances"]
def sort_dict(in_dict):
in_dict = sorted(in_dict, key=lambda x: x["task_id"])
out = []
for d in in_dict:
n = {k: d[k] for k in sorted(d, reverse=True)}
out.append(n)
return out
expected = [
{
"child_states": {"None": 1},
"task_id": "mapped_task_2",
"max_end_date": None,
"min_start_date": None,
"state": None,
},
{
"child_states": {"success": 1, "running": 1, "None": 1},
"max_end_date": "2024-12-30T01:02:03Z",
"min_start_date": "2024-12-30T01:00:00Z",
"state": "running",
"task_id": "mapped_task_group",
},
{
"state": "running",
"task_id": "mapped_task_group.subtask",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"state": "success",
"task_id": "task",
"child_states": None,
"max_end_date": None,
"min_start_date": None,
},
{
"child_states": {"None": 6},
"task_id": "task_group",
"max_end_date": None,
"min_start_date": None,
"state": None,
},
{
"child_states": {"None": 2},
"task_id": "task_group.inner_task_group",
"max_end_date": None,
"min_start_date": None,
"state": None,
},
{
"child_states": {"None": 2},
"task_id": "task_group.inner_task_group.inner_task_group_sub_task",
"max_end_date": None,
"min_start_date": None,
"state": None,
},
{
"child_states": {"None": 4},
"task_id": "task_group.mapped_task",
"max_end_date": None,
"min_start_date": None,
"state": None,
},
]
expected = sort_dict(expected)
actual = sort_dict(actual)
assert actual == expected
def test_structure_includes_historical_removed_task_with_proper_shape(self, session, test_client):
# Ensure the structure endpoint returns synthetic node for historical/removed task
with assert_queries_count(7):
response = test_client.get(f"/grid/structure/{DAG_ID_3}")
assert response.status_code == 200
nodes = response.json()
# Find the historical removed task id
t4 = next((n for n in nodes if n["id"] == TASK_ID_4), None)
assert t4 is not None
assert t4["label"] == TASK_ID_4
# Optional None fields are excluded from response due to response_model_exclude_none=True
assert "is_mapped" not in t4
assert "children" not in t4
| TestGetGridDataEndpoint |
python | pandas-dev__pandas | pandas/core/indexers/objects.py | {
"start": 17065,
"end": 21787
} | class ____(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int | BaseIndexer = 0,
groupby_indices: dict | None = None,
window_indexer: type[BaseIndexer] = BaseIndexer,
indexer_kwargs: dict | None = None,
**kwargs,
) -> None:
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int or BaseIndexer
window size during the windowing operation
groupby_indices : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indices = groupby_indices or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}
super().__init__(
index_array=index_array,
window_size=self.indexer_kwargs.pop("window_size", window_size),
**kwargs,
)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indices_start = 0
for indices in self.groupby_indices.values():
index_array: np.ndarray | None
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed, step
)
start = start.astype(np.int64)
end = end.astype(np.int64)
assert len(start) == len(end), (
"these should be equal in length from get_window_bounds"
)
# Cannot use groupby_indices as they might not be monotonic with the object
# we're rolling over
window_indices = np.arange(
window_indices_start, window_indices_start + len(indices)
)
window_indices_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
np.int64, copy=False
)
start_arrays.append(window_indices.take(ensure_platform_int(start)))
end_arrays.append(window_indices.take(ensure_platform_int(end)))
if len(start_arrays) == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
| GroupbyIndexer |
python | django__django | tests/model_forms/models.py | {
"start": 12621,
"end": 13153
} | class ____(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to="tests")
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super().save(force_insert=force_insert, force_update=force_update)
self._savecount += 1
| Photo |
python | dateutil__dateutil | src/dateutil/parser/_parser.py | {
"start": 8649,
"end": 13451
} | class ____(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z", "z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if ((res.tzoffset == 0 and not res.tzname) or
(res.tzname == 'Z' or res.tzname == 'z')):
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
| parserinfo |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/fit_loop.py | {
"start": 2120,
"end": 2370
} | class ____:
NONE = "none"
RESTARTED_ON_EPOCH_START = "restarted_on_epoch_start"
RESTARTED_MID_EPOCH = "restarted_mid_epoch"
RESTARTED_ON_EPOCH_END = "restarted_on_epoch_end"
RESUMED_ON_EPOCH_END = "resumed_on_epoch_end"
| RestartStage |
python | jazzband__django-oauth-toolkit | oauth2_provider/contrib/rest_framework/permissions.py | {
"start": 2558,
"end": 3174
} | class ____(TokenHasScope):
"""
The request is authenticated as a user and the token used has the right scope
"""
def get_scopes(self, request, view):
try:
view_scopes = super().get_scopes(request, view)
except ImproperlyConfigured:
view_scopes = []
if request.method.upper() in SAFE_METHODS:
scope_type = oauth2_settings.READ_SCOPE
else:
scope_type = oauth2_settings.WRITE_SCOPE
required_scopes = ["{}:{}".format(scope, scope_type) for scope in view_scopes]
return required_scopes
| TokenHasResourceScope |
python | nedbat__coveragepy | tests/test_process.py | {
"start": 57622,
"end": 59958
} | class ____(CoverageTest):
"""Test that we can measure coverage in subprocesses."""
@pytest.mark.parametrize(
"fname",
[
base + suffix
for base, suffix in itertools.product(
["exec", "spawn"],
["l", "le", "lp", "lpe", "v", "ve", "vp", "vpe"],
)
],
)
def test_execv_patch(self, fname: str, _clean_pth_files: None) -> None:
self.make_file(
".coveragerc",
"""\
[run]
patch = subprocess, execv
""",
)
self.make_file(
"main.py",
f"""\
import os, sys
print("In main")
args = []
if "spawn" in {fname!r}:
args.append(os.P_WAIT)
args.append(sys.executable)
prog_args = ["python", {os.path.abspath("other.py")!r}, "cat", "dog"]
if "l" in {fname!r}:
args.extend(prog_args)
else:
args.append(prog_args)
if {fname!r}.endswith("e"):
args.append({{"SUBVAR": "the-sub-var"}})
os.environ["MAINVAR"] = "the-main-var"
sys.stdout.flush()
os.{fname}(*args)
""",
)
self.make_file(
"other.py",
"""\
import os, sys
print(f"MAINVAR = {os.getenv('MAINVAR', 'none')}")
print(f"SUBVAR = {os.getenv('SUBVAR', 'none')}")
print(f"{sys.argv[1:] = }")
""",
)
out = self.run_command("coverage run main.py")
expected = "In main\n"
if fname.endswith("e"):
expected += "MAINVAR = none\n"
expected += "SUBVAR = the-sub-var\n"
else:
expected += "MAINVAR = the-main-var\n"
expected += "SUBVAR = none\n"
expected += "sys.argv[1:] = ['cat', 'dog']\n"
assert out == expected
self.run_command("coverage combine")
data = coverage.CoverageData()
data.read()
main_lines = 12
if "spawn" in fname:
main_lines += 1
if fname.endswith("e"):
main_lines += 1
assert line_counts(data)["main.py"] == main_lines
assert line_counts(data)["other.py"] == 4
| ExecvTest |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 37284,
"end": 43539
} | class ____:
result_type = namedtuple('result_type',
['nocast', 'casted'])
helper_lambdas = {
'zero': lambda dtype: 0,
'min': lambda dtype: np.iinfo(dtype).min,
'neg_min': lambda dtype: -np.iinfo(dtype).min,
'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
}
overflow_results = {
np.remainder: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
np.fmod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.mod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.floordiv: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.floor_divide: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.divmod: result_type(
helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
}
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_signed_division_overflow(self, dtype):
to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == np.iinfo(op1.dtype).min
# Remainder is well defined though, and does not warn:
res = op1 % op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
# Check fmod as well:
res = np.fmod(op1, op2)
assert extractor(res) == 0
# Divmod warns for the division part:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == np.iinfo(op1.dtype).min
assert extractor(res2) == 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_divide_by_zero(self, dtype):
# Note that the return value cannot be well defined here, but NumPy
# currently uses 0 consistently. This could be changed.
to_check = interesting_binop_operands(1, 0, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="divide by zero"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
with pytest.warns(RuntimeWarning, match="divide by zero"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == 0
assert extractor(res2) == 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dividend_dtype", sctypes['int'])
@pytest.mark.parametrize("divisor_dtype", sctypes['int'])
@pytest.mark.parametrize("operation",
[np.remainder, np.fmod, np.divmod, np.floor_divide,
operator.mod, operator.floordiv])
@np.errstate(divide='warn', over='warn')
def test_overflows(self, dividend_dtype, divisor_dtype, operation):
# SIMD tries to perform the operation on as many elements as possible
# that is a multiple of the register's size. We resort to the
# default implementation for the leftover elements.
# We try to cover all paths here.
arrays = [np.array([np.iinfo(dividend_dtype).min] * i,
dtype=dividend_dtype) for i in range(1, 129)]
divisor = np.array([-1], dtype=divisor_dtype)
# If dividend is a larger type than the divisor (`else` case),
# then, result will be a larger type than dividend and will not
# result in an overflow for `divmod` and `floor_divide`.
if np.dtype(dividend_dtype).itemsize >= np.dtype(
divisor_dtype).itemsize and operation in (
np.divmod, np.floor_divide, operator.floordiv):
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].nocast(
dividend_dtype)
# Arrays
for a in arrays:
# In case of divmod, we need to flatten the result
# column first as we get a column vector of quotient and
# remainder and a normal flatten of the expected result.
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].nocast(
dividend_dtype)] * len(a)).flatten()
assert_array_equal(result, expected_array)
else:
# Scalars
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].casted(
dividend_dtype)
# Arrays
for a in arrays:
# See above comment on flatten
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].casted(
dividend_dtype)] * len(a)).flatten()
assert_array_equal(result, expected_array)
| TestDivisionIntegerOverflowsAndDivideByZero |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0138_remove_old_fields.py | {
"start": 121,
"end": 1367
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0137_use_generic_root_selector"),
]
operations = [
migrations.RemoveField(
model_name="addonsconfig",
name="doc_diff_root_selector",
),
migrations.RemoveField(
model_name="addonsconfig",
name="linkpreviews_doctool_name",
),
migrations.RemoveField(
model_name="addonsconfig",
name="linkpreviews_doctool_version",
),
migrations.RemoveField(
model_name="addonsconfig",
name="linkpreviews_root_selector",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="doc_diff_root_selector",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="linkpreviews_doctool_name",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="linkpreviews_doctool_version",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="linkpreviews_root_selector",
),
]
| Migration |
python | apache__airflow | providers/apprise/src/airflow/providers/apprise/notifications/apprise.py | {
"start": 1163,
"end": 4137
} | class ____(BaseNotifier):
r"""
Apprise BaseNotifier.
:param body: Specify the message body
:param title: Specify the message title. This field is complete optional
:param notify_type: Specify the message type (default=info). Possible values are "info",
"success", "failure", and "warning"
:param body_format: Specify the input message format (default=text). Possible values are "text",
"html", and "markdown".
:param tag: Specify one or more tags to filter which services to notify
:param attach: Specify one or more file attachment locations
:param interpret_escapes: Enable interpretation of backslash escapes. For example, this would convert
sequences such as \n and \r to their respected ascii new-line and carriage
:param config: Specify one or more configuration
:param apprise_conn_id: connection that has Apprise configs setup
"""
template_fields = ("body", "title", "tag", "attach")
def __init__(
self,
*,
body: str,
title: str | None = None,
notify_type: NotifyType = NotifyType.INFO,
body_format: NotifyFormat = NotifyFormat.TEXT,
tag: str | Iterable[str] = "all",
attach: str | None = None,
interpret_escapes: bool | None = None,
config: AppriseConfig | None = None,
apprise_conn_id: str = AppriseHook.default_conn_name,
**kwargs,
):
if AIRFLOW_V_3_1_PLUS:
# Support for passing context was added in 3.1.0
super().__init__(**kwargs)
else:
super().__init__()
self.apprise_conn_id = apprise_conn_id
self.body = body
self.title = title
self.notify_type = notify_type
self.body_format = body_format
self.tag = tag
self.attach = attach
self.interpret_escapes = interpret_escapes
self.config = config
@cached_property
def hook(self) -> AppriseHook:
"""Apprise Hook."""
return AppriseHook(apprise_conn_id=self.apprise_conn_id)
def notify(self, context):
"""Send a alert to a apprise configured service."""
self.hook.notify(
body=self.body,
title=self.title,
notify_type=self.notify_type,
body_format=self.body_format,
tag=self.tag,
attach=self.attach,
interpret_escapes=self.interpret_escapes,
config=self.config,
)
async def async_notify(self, context):
"""Send a alert to a apprise configured service."""
await self.hook.async_notify(
body=self.body,
title=self.title,
notify_type=self.notify_type,
body_format=self.body_format,
tag=self.tag,
attach=self.attach,
interpret_escapes=self.interpret_escapes,
config=self.config,
)
send_apprise_notification = AppriseNotifier
| AppriseNotifier |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-elasticsearch/llama_index/vector_stores/elasticsearch/base.py | {
"start": 3799,
"end": 21867
} | class ____(BasePydanticVectorStore):
"""
Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
retrieval_strategy: Retrieval strategy to use. AsyncBM25Strategy /
AsyncSparseVectorStrategy / AsyncDenseVectorStrategy / AsyncRetrievalStrategy.
Defaults to AsyncDenseVectorStrategy.
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
Examples:
`pip install llama-index-vector-stores-elasticsearch`
```python
from llama_index.vector_stores import ElasticsearchStore
# Additional setup for ElasticsearchStore class
index_name = "my_index"
es_url = "http://localhost:9200"
es_cloud_id = "<cloud-id>" # Found within the deployment page
es_user = "elastic"
es_password = "<password>" # Provided when creating deployment or can be reset
es_api_key = "<api-key>" # Create an API key within Kibana (Security -> API Keys)
# Connecting to ElasticsearchStore locally
es_local = ElasticsearchStore(
index_name=index_name,
es_url=es_url,
)
# Connecting to Elastic Cloud with username and password
es_cloud_user_pass = ElasticsearchStore(
index_name=index_name,
es_cloud_id=es_cloud_id,
es_user=es_user,
es_password=es_password,
)
# Connecting to Elastic Cloud with API Key
es_cloud_api_key = ElasticsearchStore(
index_name=index_name,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
)
```
"""
class Config:
# allow pydantic to tolarate its inability to validate AsyncRetrievalStrategy
arbitrary_types_allowed = True
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
retrieval_strategy: AsyncRetrievalStrategy
_store = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
retrieval_strategy: Optional[AsyncRetrievalStrategy] = None,
metadata_mappings: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
nest_asyncio.apply()
if not es_client:
es_client = get_elasticsearch_client(
url=es_url,
cloud_id=es_cloud_id,
api_key=es_api_key,
username=es_user,
password=es_password,
)
if retrieval_strategy is None:
retrieval_strategy = AsyncDenseVectorStrategy(
distance=DistanceMetric[distance_strategy]
)
base_metadata_mappings = {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
metadata_mappings = metadata_mappings or {}
metadata_mappings.update(base_metadata_mappings)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
retrieval_strategy=retrieval_strategy,
)
self._store = AsyncVectorStore(
user_agent=get_user_agent(),
client=es_client,
index=index_name,
retrieval_strategy=retrieval_strategy,
text_field=text_field,
vector_field=vector_field,
metadata_mappings=metadata_mappings,
)
# Disable query embeddings when using Sparse vectors or BM25.
# ELSER generates its own embeddings server-side
if not isinstance(retrieval_strategy, AsyncDenseVectorStrategy):
self.is_embedding_query = False
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._store.client
def close(self) -> None:
return asyncio.get_event_loop().run_until_complete(self._store.close())
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(
nodes,
create_index_if_not_exists=create_index_if_not_exists,
**add_kwargs,
)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""
Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
if len(nodes) == 0:
return []
embeddings: Optional[List[List[float]]] = None
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
# Generate embeddings when using dense vectors. They are not needed
# for other strategies.
if isinstance(self.retrieval_strategy, AsyncDenseVectorStrategy):
embeddings = []
for node in nodes:
embeddings.append(node.get_embedding())
if not self._store.num_dimensions:
self._store.num_dimensions = len(embeddings[0])
return await self._store.add_texts(
texts=texts,
metadatas=metadatas,
vectors=embeddings,
ids=ids,
create_index_if_not_exists=create_index_if_not_exists,
bulk_kwargs=add_kwargs,
)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
await self._store.delete(
query={"term": {"metadata.ref_doc_id": ref_doc_id}}, **delete_kwargs
)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes from vector store using node IDs and filters.
Args:
node_ids: Optional list of node IDs to delete.
filters: Optional metadata filters to select nodes to delete.
delete_kwargs: Optional additional arguments to pass to delete operation.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete_nodes(node_ids, filters, **delete_kwargs)
)
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronously delete nodes from vector store using node IDs and filters.
Args:
node_ids (Optional[List[str]], optional): List of node IDs. Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters. Defaults to None.
delete_kwargs (Any, optional): Optional additional arguments to pass to delete operation.
"""
if not node_ids and not filters:
return
if node_ids and not filters:
await self._store.delete(ids=node_ids, **delete_kwargs)
return
query = {"bool": {"must": []}}
if node_ids:
query["bool"]["must"].append({"terms": {"_id": node_ids}})
if filters:
es_filter = _to_elasticsearch_filter(filters)
if "bool" in es_filter and "must" in es_filter["bool"]:
query["bool"]["must"].extend(es_filter["bool"]["must"])
else:
query["bool"]["must"].append(es_filter)
await self._store.delete(query=query, **delete_kwargs)
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
metadata_keyword_suffix: str = ".keyword",
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
metadata_keyword_suffix (str): The suffix to append to the metadata field of the keyword type.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
metadata_keyword_suffix: str = ".keyword",
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
metadata_keyword_suffix (str): The suffix to append to the metadata field of the keyword type.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
_mode_must_match_retrieval_strategy(query.mode, self.retrieval_strategy)
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters, metadata_keyword_suffix)]
else:
filter = es_filter or []
hits = await self._store.search(
query=query.query_str,
query_vector=query.query_embedding,
k=query.similarity_top_k,
num_candidates=query.similarity_top_k * 10,
filter=filter,
custom_query=custom_query,
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for hit in hits:
node = convert_es_hit_to_node(hit, self.text_field)
top_k_nodes.append(node)
top_k_ids.append(hit["_id"])
top_k_scores.append(hit["_score"])
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Get nodes from Elasticsearch index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
"""
return asyncio.get_event_loop().run_until_complete(
self.aget_nodes(node_ids, filters)
)
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Asynchronously get nodes from Elasticsearch index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
Raises:
ValueError: If neither node_ids nor filters is provided.
"""
if not node_ids and not filters:
raise ValueError("Either node_ids or filters must be provided.")
query = {"bool": {"must": []}}
if node_ids is not None:
query["bool"]["must"].append({"terms": {"_id": node_ids}})
if filters:
es_filter = _to_elasticsearch_filter(filters)
if "bool" in es_filter and "must" in es_filter["bool"]:
query["bool"]["must"].extend(es_filter["bool"]["must"])
else:
query["bool"]["must"].append(es_filter)
response = await self._store.client.search(
index=self.index_name,
body={"query": query, "size": 10000},
)
hits = response.get("hits", {}).get("hits", [])
nodes = []
for hit in hits:
nodes.append(convert_es_hit_to_node(hit, self.text_field))
return nodes
def clear(self) -> None:
"""
Clear all nodes from Elasticsearch index.
This method deletes and recreates the index.
"""
return asyncio.get_event_loop().run_until_complete(self.aclear())
async def aclear(self) -> None:
"""
Asynchronously clear all nodes from Elasticsearch index.
This method deletes and recreates the index.
"""
if await self._store.client.indices.exists(index=self.index_name):
await self._store.client.indices.delete(index=self.index_name)
| ElasticsearchStore |
python | django__django | tests/queries/models.py | {
"start": 13860,
"end": 13985
} | class ____(models.Model):
title = models.TextField()
paragraph = models.ForeignKey("Paragraph", models.CASCADE)
| Chapter |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_legend.py | {
"start": 15781,
"end": 65035
} | class ____:
# Tests the legend function for figure
def test_legend_handle_label(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(lines, ['hello world'])
Legend.assert_called_with(fig, lines, ['hello world'],
bbox_transform=fig.transFigure)
def test_legend_no_args(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend()
Legend.assert_called_with(fig, lines, ['hello world'],
bbox_transform=fig.transFigure)
def test_legend_label_arg(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(['foobar'])
Legend.assert_called_with(fig, lines, ['foobar'],
bbox_transform=fig.transFigure)
def test_legend_label_three_args(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with pytest.raises(TypeError, match="0-2"):
fig.legend(lines, ['foobar'], 'right')
with pytest.raises(TypeError, match="0-2"):
fig.legend(lines, ['foobar'], 'right', loc='left')
def test_legend_kw_args(self):
fig, axs = plt.subplots(1, 2)
lines = axs[0].plot(range(10))
lines2 = axs[1].plot(np.arange(10) * 2.)
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(loc='right', labels=('a', 'b'), handles=(lines, lines2))
Legend.assert_called_with(
fig, (lines, lines2), ('a', 'b'), loc='right',
bbox_transform=fig.transFigure)
def test_error_args_kwargs(self):
fig, axs = plt.subplots(1, 2)
lines = axs[0].plot(range(10))
lines2 = axs[1].plot(np.arange(10) * 2.)
msg = 'must both be passed positionally or both as keywords'
with pytest.raises(TypeError, match=msg):
fig.legend((lines, lines2), labels=('a', 'b'))
def test_figure_legend_outside():
todos = ['upper ' + pos for pos in ['left', 'center', 'right']]
todos += ['lower ' + pos for pos in ['left', 'center', 'right']]
todos += ['left ' + pos for pos in ['lower', 'center', 'upper']]
todos += ['right ' + pos for pos in ['lower', 'center', 'upper']]
upperext = [20.347556, 27.722556, 790.583, 545.499]
lowerext = [20.347556, 71.056556, 790.583, 588.833]
leftext = [151.681556, 27.722556, 790.583, 588.833]
rightext = [20.347556, 27.722556, 659.249, 588.833]
axbb = [upperext, upperext, upperext,
lowerext, lowerext, lowerext,
leftext, leftext, leftext,
rightext, rightext, rightext]
legbb = [[10., 555., 133., 590.], # upper left
[338.5, 555., 461.5, 590.], # upper center
[667, 555., 790., 590.], # upper right
[10., 10., 133., 45.], # lower left
[338.5, 10., 461.5, 45.], # lower center
[667., 10., 790., 45.], # lower right
[10., 10., 133., 45.], # left lower
[10., 282.5, 133., 317.5], # left center
[10., 555., 133., 590.], # left upper
[667, 10., 790., 45.], # right lower
[667., 282.5, 790., 317.5], # right center
[667., 555., 790., 590.]] # right upper
for nn, todo in enumerate(todos):
print(todo)
fig, axs = plt.subplots(constrained_layout=True, dpi=100)
axs.plot(range(10), label='Boo1')
leg = fig.legend(loc='outside ' + todo)
fig.draw_without_rendering()
assert_allclose(axs.get_window_extent().extents,
axbb[nn])
assert_allclose(leg.get_window_extent().extents,
legbb[nn])
@image_comparison(['legend_stackplot.png'],
tol=0 if platform.machine() == 'x86_64' else 0.031)
def test_legend_stackplot():
"""Test legend for PolyCollection using stackplot."""
# related to #1341, #1943, and PR #3303
fig, ax = plt.subplots()
x = np.linspace(0, 10, 10)
y1 = 1.0 * x
y2 = 2.0 * x + 1
y3 = 3.0 * x + 2
ax.stackplot(x, y1, y2, y3, labels=['y1', 'y2', 'y3'])
ax.set_xlim(0, 10)
ax.set_ylim(0, 70)
ax.legend(loc='best')
def test_cross_figure_patch_legend():
fig, ax = plt.subplots()
fig2, ax2 = plt.subplots()
brs = ax.bar(range(3), range(3))
fig2.legend(brs, 'foo')
def test_nanscatter():
fig, ax = plt.subplots()
h = ax.scatter([np.nan], [np.nan], marker="o",
facecolor="r", edgecolor="r", s=3)
ax.legend([h], ["scatter"])
fig, ax = plt.subplots()
for color in ['red', 'green', 'blue']:
n = 750
x, y = np.random.rand(2, n)
scale = 200.0 * np.random.rand(n)
ax.scatter(x, y, c=color, s=scale, label=color,
alpha=0.3, edgecolors='none')
ax.legend()
ax.grid(True)
def test_legend_repeatcheckok():
fig, ax = plt.subplots()
ax.scatter(0.0, 1.0, color='k', marker='o', label='test')
ax.scatter(0.5, 0.0, color='r', marker='v', label='test')
ax.legend()
hand, lab = mlegend._get_legend_handles_labels([ax])
assert len(lab) == 2
fig, ax = plt.subplots()
ax.scatter(0.0, 1.0, color='k', marker='o', label='test')
ax.scatter(0.5, 0.0, color='k', marker='v', label='test')
ax.legend()
hand, lab = mlegend._get_legend_handles_labels([ax])
assert len(lab) == 2
@image_comparison(['not_covering_scatter.png'])
def test_not_covering_scatter():
colors = ['b', 'g', 'r']
for n in range(3):
plt.scatter([n], [n], color=colors[n])
plt.legend(['foo', 'foo', 'foo'], loc='best')
plt.gca().set_xlim(-0.5, 2.2)
plt.gca().set_ylim(-0.5, 2.2)
@image_comparison(['not_covering_scatter_transform.png'])
def test_not_covering_scatter_transform():
# Offsets point to top left, the default auto position
offset = mtransforms.Affine2D().translate(-20, 20)
x = np.linspace(0, 30, 1000)
plt.plot(x, x)
plt.scatter([20], [10], transform=offset + plt.gca().transData)
plt.legend(['foo', 'bar'], loc='best')
def test_linecollection_scaled_dashes():
lines1 = [[(0, .5), (.5, 1)], [(.3, .6), (.2, .2)]]
lines2 = [[[0.7, .2], [.8, .4]], [[.5, .7], [.6, .1]]]
lines3 = [[[0.6, .2], [.8, .4]], [[.5, .7], [.1, .1]]]
lc1 = mcollections.LineCollection(lines1, linestyles="--", lw=3)
lc2 = mcollections.LineCollection(lines2, linestyles="-.")
lc3 = mcollections.LineCollection(lines3, linestyles=":", lw=.5)
fig, ax = plt.subplots()
ax.add_collection(lc1)
ax.add_collection(lc2)
ax.add_collection(lc3)
leg = ax.legend([lc1, lc2, lc3], ["line1", "line2", 'line 3'])
h1, h2, h3 = leg.legend_handles
for oh, lh in zip((lc1, lc2, lc3), (h1, h2, h3)):
assert oh.get_linestyles()[0] == lh._dash_pattern
def test_handler_numpoints():
"""Test legend handler with numpoints <= 1."""
# related to #6921 and PR #8478
fig, ax = plt.subplots()
ax.plot(range(5), label='test')
ax.legend(numpoints=0.5)
def test_text_nohandler_warning():
"""Test that Text artists with labels raise a warning"""
fig, ax = plt.subplots()
ax.plot([0], label="mock data")
ax.text(x=0, y=0, s="text", label="label")
with pytest.warns(UserWarning) as record:
ax.legend()
assert len(record) == 1
# this should _not_ warn:
f, ax = plt.subplots()
ax.pcolormesh(np.random.uniform(0, 1, (10, 10)))
with warnings.catch_warnings():
warnings.simplefilter("error")
ax.get_legend_handles_labels()
def test_empty_bar_chart_with_legend():
"""Test legend when bar chart is empty with a label."""
# related to issue #13003. Calling plt.legend() should not
# raise an IndexError.
plt.bar([], [], label='test')
plt.legend()
@image_comparison(['shadow_argument_types.png'], remove_text=True, style='mpl20',
tol=0 if platform.machine() == 'x86_64' else 0.028)
def test_shadow_argument_types():
# Test that different arguments for shadow work as expected
fig, ax = plt.subplots()
ax.plot([1, 2, 3], label='test')
# Test various shadow configurations
# as well as different ways of specifying colors
legs = (ax.legend(loc='upper left', shadow=True), # True
ax.legend(loc='upper right', shadow=False), # False
ax.legend(loc='center left', # string
shadow={'color': 'red', 'alpha': 0.1}),
ax.legend(loc='center right', # tuple
shadow={'color': (0.1, 0.2, 0.5), 'oy': -5}),
ax.legend(loc='lower left', # tab
shadow={'color': 'tab:cyan', 'ox': 10})
)
for l in legs:
ax.add_artist(l)
ax.legend(loc='lower right') # default
def test_shadow_invalid_argument():
# Test if invalid argument to legend shadow
# (i.e. not [color|bool]) raises ValueError
fig, ax = plt.subplots()
ax.plot([1, 2, 3], label='test')
with pytest.raises(ValueError, match="dict or bool"):
ax.legend(loc="upper left", shadow="aardvark") # Bad argument
def test_shadow_framealpha():
# Test if framealpha is activated when shadow is True
# and framealpha is not explicitly passed'''
fig, ax = plt.subplots()
ax.plot(range(100), label="test")
leg = ax.legend(shadow=True, facecolor='w')
assert leg.get_frame().get_alpha() == 1
def test_legend_title_empty():
# test that if we don't set the legend title, that
# it comes back as an empty string, and that it is not
# visible:
fig, ax = plt.subplots()
ax.plot(range(10), label="mock data")
leg = ax.legend()
assert leg.get_title().get_text() == ""
assert not leg.get_title().get_visible()
def test_legend_proper_window_extent():
# test that legend returns the expected extent under various dpi...
fig, ax = plt.subplots(dpi=100)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
x01 = leg.get_window_extent(fig.canvas.get_renderer()).x0
fig, ax = plt.subplots(dpi=200)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
x02 = leg.get_window_extent(fig.canvas.get_renderer()).x0
assert pytest.approx(x01*2, 0.1) == x02
def test_window_extent_cached_renderer():
fig, ax = plt.subplots(dpi=100)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
leg2 = fig.legend()
fig.canvas.draw()
# check that get_window_extent will use the cached renderer
leg.get_window_extent()
leg2.get_window_extent()
def test_legend_title_fontprop_fontsize():
# test the title_fontsize kwarg
plt.plot(range(10), label="mock data")
with pytest.raises(ValueError):
plt.legend(title='Aardvark', title_fontsize=22,
title_fontproperties={'family': 'serif', 'size': 22})
leg = plt.legend(title='Aardvark', title_fontproperties=FontProperties(
family='serif', size=22))
assert leg.get_title().get_size() == 22
fig, axes = plt.subplots(2, 3, figsize=(10, 6))
axes = axes.flat
axes[0].plot(range(10), label="mock data")
leg0 = axes[0].legend(title='Aardvark', title_fontsize=22)
assert leg0.get_title().get_fontsize() == 22
axes[1].plot(range(10), label="mock data")
leg1 = axes[1].legend(title='Aardvark',
title_fontproperties={'family': 'serif', 'size': 22})
assert leg1.get_title().get_fontsize() == 22
axes[2].plot(range(10), label="mock data")
mpl.rcParams['legend.title_fontsize'] = None
leg2 = axes[2].legend(title='Aardvark',
title_fontproperties={'family': 'serif'})
assert leg2.get_title().get_fontsize() == mpl.rcParams['font.size']
axes[3].plot(range(10), label="mock data")
leg3 = axes[3].legend(title='Aardvark')
assert leg3.get_title().get_fontsize() == mpl.rcParams['font.size']
axes[4].plot(range(10), label="mock data")
mpl.rcParams['legend.title_fontsize'] = 20
leg4 = axes[4].legend(title='Aardvark',
title_fontproperties={'family': 'serif'})
assert leg4.get_title().get_fontsize() == 20
axes[5].plot(range(10), label="mock data")
leg5 = axes[5].legend(title='Aardvark')
assert leg5.get_title().get_fontsize() == 20
@pytest.mark.parametrize('alignment', ('center', 'left', 'right'))
def test_legend_alignment(alignment):
fig, ax = plt.subplots()
ax.plot(range(10), label='test')
leg = ax.legend(title="Aardvark", alignment=alignment)
assert leg.get_children()[0].align == alignment
assert leg.get_alignment() == alignment
@pytest.mark.parametrize('loc', ('center', 'best',))
def test_ax_legend_set_loc(loc):
fig, ax = plt.subplots()
ax.plot(range(10), label='test')
leg = ax.legend()
leg.set_loc(loc)
assert leg._get_loc() == mlegend.Legend.codes[loc]
@pytest.mark.parametrize('loc', ('outside right', 'right',))
def test_fig_legend_set_loc(loc):
fig, ax = plt.subplots()
ax.plot(range(10), label='test')
leg = fig.legend()
leg.set_loc(loc)
loc = loc.split()[1] if loc.startswith("outside") else loc
assert leg._get_loc() == mlegend.Legend.codes[loc]
@pytest.mark.parametrize('alignment', ('center', 'left', 'right'))
def test_legend_set_alignment(alignment):
fig, ax = plt.subplots()
ax.plot(range(10), label='test')
leg = ax.legend()
leg.set_alignment(alignment)
assert leg.get_children()[0].align == alignment
assert leg.get_alignment() == alignment
@pytest.mark.parametrize('color', ('red', 'none', (.5, .5, .5)))
def test_legend_labelcolor_single(color):
# test labelcolor for a single color
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1')
ax.plot(np.arange(10), np.arange(10)*2, label='#2')
ax.plot(np.arange(10), np.arange(10)*3, label='#3')
leg = ax.legend(labelcolor=color)
for text in leg.get_texts():
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_list():
# test labelcolor for a list of colors
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1')
ax.plot(np.arange(10), np.arange(10)*2, label='#2')
ax.plot(np.arange(10), np.arange(10)*3, label='#3')
leg = ax.legend(labelcolor=['r', 'g', 'b'])
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_linecolor():
# test the labelcolor for labelcolor='linecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', color='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', color='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', color='b')
leg = ax.legend(labelcolor='linecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_linecolor():
# test the labelcolor for labelcolor='linecolor' on PathCollection
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.arange(10)*1, label='#1', c='r')
ax.scatter(np.arange(10), np.arange(10)*2, label='#2', c='g')
ax.scatter(np.arange(10), np.arange(10)*3, label='#3', c='b')
leg = ax.legend(labelcolor='linecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_linecolor_iterable():
# test the labelcolor for labelcolor='linecolor' on PathCollection
# with iterable colors
fig, ax = plt.subplots()
colors = np.array(['r', 'g', 'b', 'c', 'm'] * 2)
ax.scatter(np.arange(10), np.arange(10), label='#1', c=colors)
leg = ax.legend(labelcolor='linecolor')
text, = leg.get_texts()
assert mpl.colors.same_color(text.get_color(), 'black')
def test_legend_pathcollection_labelcolor_linecolor_cmap():
# test the labelcolor for labelcolor='linecolor' on PathCollection
# with a colormap
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.arange(10), c=np.arange(10), label='#1')
leg = ax.legend(labelcolor='linecolor')
text, = leg.get_texts()
assert mpl.colors.same_color(text.get_color(), 'black')
def test_legend_labelcolor_markeredgecolor():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markeredgecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markeredgecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markeredgecolor='b')
leg = ax.legend(labelcolor='markeredgecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markeredgecolor():
# test the labelcolor for labelcolor='markeredgecolor' on PathCollection
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.arange(10)*1, label='#1', edgecolor='r')
ax.scatter(np.arange(10), np.arange(10)*2, label='#2', edgecolor='g')
ax.scatter(np.arange(10), np.arange(10)*3, label='#3', edgecolor='b')
leg = ax.legend(labelcolor='markeredgecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markeredgecolor_iterable():
# test the labelcolor for labelcolor='markeredgecolor' on PathCollection
# with iterable colors
fig, ax = plt.subplots()
colors = np.array(['r', 'g', 'b', 'c', 'm'] * 2)
ax.scatter(np.arange(10), np.arange(10), label='#1', edgecolor=colors)
leg = ax.legend(labelcolor='markeredgecolor')
for text, color in zip(leg.get_texts(), ['k']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markeredgecolor_cmap():
# test the labelcolor for labelcolor='markeredgecolor' on PathCollection
# with a colormap
fig, ax = plt.subplots()
edgecolors = mpl.colormaps["viridis"](np.random.rand(10))
ax.scatter(
np.arange(10),
np.arange(10),
label='#1',
c=np.arange(10),
edgecolor=edgecolors,
cmap="Reds"
)
leg = ax.legend(labelcolor='markeredgecolor')
for text, color in zip(leg.get_texts(), ['k']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_markerfacecolor():
# test the labelcolor for labelcolor='markerfacecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markerfacecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markerfacecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markerfacecolor='b')
leg = ax.legend(labelcolor='markerfacecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markerfacecolor():
# test the labelcolor for labelcolor='markerfacecolor' on PathCollection
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.arange(10)*1, label='#1', facecolor='r')
ax.scatter(np.arange(10), np.arange(10)*2, label='#2', facecolor='g')
ax.scatter(np.arange(10), np.arange(10)*3, label='#3', facecolor='b')
leg = ax.legend(labelcolor='markerfacecolor')
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markerfacecolor_iterable():
# test the labelcolor for labelcolor='markerfacecolor' on PathCollection
# with iterable colors
fig, ax = plt.subplots()
colors = np.array(['r', 'g', 'b', 'c', 'm'] * 2)
ax.scatter(np.arange(10), np.arange(10), label='#1', facecolor=colors)
leg = ax.legend(labelcolor='markerfacecolor')
for text, color in zip(leg.get_texts(), ['k']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_pathcollection_labelcolor_markfacecolor_cmap():
# test the labelcolor for labelcolor='markerfacecolor' on PathCollection
# with colormaps
fig, ax = plt.subplots()
colors = mpl.colormaps["viridis"](np.random.rand(10))
ax.scatter(
np.arange(10),
np.arange(10),
label='#1',
c=colors
)
leg = ax.legend(labelcolor='markerfacecolor')
for text, color in zip(leg.get_texts(), ['k']):
assert mpl.colors.same_color(text.get_color(), color)
@pytest.mark.parametrize('color', ('red', 'none', (.5, .5, .5)))
def test_legend_labelcolor_rcparam_single(color):
# test the rcParams legend.labelcolor for a single color
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1')
ax.plot(np.arange(10), np.arange(10)*2, label='#2')
ax.plot(np.arange(10), np.arange(10)*3, label='#3')
mpl.rcParams['legend.labelcolor'] = color
leg = ax.legend()
for text in leg.get_texts():
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_rcparam_linecolor():
# test the rcParams legend.labelcolor for a linecolor
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', color='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', color='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', color='b')
mpl.rcParams['legend.labelcolor'] = 'linecolor'
leg = ax.legend()
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_rcparam_markeredgecolor():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markeredgecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markeredgecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markeredgecolor='b')
mpl.rcParams['legend.labelcolor'] = 'markeredgecolor'
leg = ax.legend()
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_rcparam_markeredgecolor_short():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markeredgecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markeredgecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markeredgecolor='b')
mpl.rcParams['legend.labelcolor'] = 'mec'
leg = ax.legend()
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_rcparam_markerfacecolor():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markerfacecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markerfacecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markerfacecolor='b')
mpl.rcParams['legend.labelcolor'] = 'markerfacecolor'
leg = ax.legend()
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def test_legend_labelcolor_rcparam_markerfacecolor_short():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.arange(10)*1, label='#1', markerfacecolor='r')
ax.plot(np.arange(10), np.arange(10)*2, label='#2', markerfacecolor='g')
ax.plot(np.arange(10), np.arange(10)*3, label='#3', markerfacecolor='b')
mpl.rcParams['legend.labelcolor'] = 'mfc'
leg = ax.legend()
for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
assert mpl.colors.same_color(text.get_color(), color)
def assert_last_legend_patch_color(histogram, leg, expected_color,
facecolor=False, edgecolor=False):
"""
Check that histogram color, legend handle color, and legend label color all
match the expected input. Provide facecolor and edgecolor flags to clarify
which feature to match.
"""
label_color = leg.texts[-1].get_color()
patch = leg.get_patches()[-1]
histogram = histogram[-1][0]
assert mpl.colors.same_color(label_color, expected_color)
if facecolor:
assert mpl.colors.same_color(label_color, patch.get_facecolor())
assert mpl.colors.same_color(label_color, histogram.get_facecolor())
if edgecolor:
assert mpl.colors.same_color(label_color, patch.get_edgecolor())
assert mpl.colors.same_color(label_color, histogram.get_edgecolor())
def test_legend_labelcolor_linecolor_histograms():
x = np.arange(10)
# testing c kwarg for bar, step, and stepfilled histograms
fig, ax = plt.subplots()
h = ax.hist(x, histtype='bar', color='r', label="red bar hist with a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
h = ax.hist(x, histtype='step', color='g', label="green step hist, green label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'g', edgecolor=True)
h = ax.hist(x, histtype='stepfilled', color='b',
label="blue stepfilled hist with a blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'b', facecolor=True)
# testing c, fc, and ec combinations for bar histograms
h = ax.hist(x, histtype='bar', color='r', ec='b',
label="red bar hist with blue edges and a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
h = ax.hist(x, histtype='bar', fc='r', ec='b',
label="red bar hist with blue edges and a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
h = ax.hist(x, histtype='bar', fc='none', ec='b',
label="unfilled blue bar hist with a blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'b', edgecolor=True)
# testing c, and ec combinations for step histograms
h = ax.hist(x, histtype='step', color='r', ec='b',
label="blue step hist with a blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'b', edgecolor=True)
h = ax.hist(x, histtype='step', ec='b',
label="blue step hist with a blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'b', edgecolor=True)
# testing c, fc, and ec combinations for stepfilled histograms
h = ax.hist(x, histtype='stepfilled', color='r', ec='b',
label="red stepfilled hist, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
h = ax.hist(x, histtype='stepfilled', fc='r', ec='b',
label="red stepfilled hist, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
h = ax.hist(x, histtype='stepfilled', fc='none', ec='b',
label="unfilled blue stepfilled hist, blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'b', edgecolor=True)
h = ax.hist(x, histtype='stepfilled', fc='r', ec='none',
label="edgeless red stepfilled hist with a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_patch_color(h, leg, 'r', facecolor=True)
def assert_last_legend_linemarker_color(line_marker, leg, expected_color, color=False,
facecolor=False, edgecolor=False):
"""
Check that line marker color, legend handle color, and legend label color all
match the expected input. Provide color, facecolor and edgecolor flags to clarify
which feature to match.
"""
label_color = leg.texts[-1].get_color()
leg_marker = leg.get_lines()[-1]
assert mpl.colors.same_color(label_color, expected_color)
if color:
assert mpl.colors.same_color(label_color, leg_marker.get_color())
assert mpl.colors.same_color(label_color, line_marker.get_color())
if facecolor:
assert mpl.colors.same_color(label_color, leg_marker.get_markerfacecolor())
assert mpl.colors.same_color(label_color, line_marker.get_markerfacecolor())
if edgecolor:
assert mpl.colors.same_color(label_color, leg_marker.get_markeredgecolor())
assert mpl.colors.same_color(label_color, line_marker.get_markeredgecolor())
def test_legend_labelcolor_linecolor_plot():
x = np.arange(5)
# testing line plot
fig, ax = plt.subplots()
l, = ax.plot(x, c='r', label="red line with a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'r', color=True)
# testing c, fc, and ec combinations for maker plots
l, = ax.plot(x, 'o', c='r', label="red circles with a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'r', color=True)
l, = ax.plot(x, 'o', c='r', mec='b', label="red circles, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'r', color=True)
l, = ax.plot(x, 'o', mfc='r', mec='b', label="red circles, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'r', facecolor=True)
# 'none' cases
l, = ax.plot(x, 'o', mfc='none', mec='b',
label="blue unfilled circles, blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'b', edgecolor=True)
l, = ax.plot(x, 'o', mfc='r', mec='none', label="red edgeless circles, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'r', facecolor=True)
l, = ax.plot(x, 'o', c='none', mec='none',
label="black label despite invisible circles for dummy entries")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_linemarker_color(l, leg, 'k')
def assert_last_legend_scattermarker_color(scatter_marker, leg, expected_color,
facecolor=False, edgecolor=False):
"""
Check that scatter marker color, legend handle color, and legend label color all
match the expected input. Provide facecolor and edgecolor flags to clarify
which feature to match.
"""
label_color = leg.texts[-1].get_color()
leg_handle = leg.legend_handles[-1]
assert mpl.colors.same_color(label_color, expected_color)
if facecolor:
assert mpl.colors.same_color(label_color, leg_handle.get_facecolor())
assert mpl.colors.same_color(label_color, scatter_marker.get_facecolor())
if edgecolor:
assert mpl.colors.same_color(label_color, leg_handle.get_edgecolor())
assert mpl.colors.same_color(label_color, scatter_marker.get_edgecolor())
def test_legend_labelcolor_linecolor_scatter():
x = np.arange(5)
# testing c, fc, and ec combinations for scatter plots
fig, ax = plt.subplots()
s = ax.scatter(x, x, c='r', label="red circles with a red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'r', facecolor=True)
s = ax.scatter(x, x, c='r', ec='b', label="red circles, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'r', facecolor=True)
s = ax.scatter(x, x, fc='r', ec='b', label="red circles, blue edges, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'r', facecolor=True)
# 'none' cases
s = ax.scatter(x, x, fc='none', ec='b', label="blue unfilled circles, blue label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'b', edgecolor=True)
s = ax.scatter(x, x, fc='r', ec='none', label="red edgeless circles, red label")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'r', facecolor=True)
s = ax.scatter(x, x, c='none', ec='none',
label="black label despite invisible circles for dummy entries")
leg = ax.legend(labelcolor='linecolor')
assert_last_legend_scattermarker_color(s, leg, 'k')
@pytest.mark.filterwarnings("ignore:No artists with labels found to put in legend")
def test_get_set_draggable():
legend = plt.legend()
assert not legend.get_draggable()
legend.set_draggable(True)
assert legend.get_draggable()
legend.set_draggable(False)
assert not legend.get_draggable()
@pytest.mark.parametrize('draggable', (True, False))
def test_legend_draggable(draggable):
fig, ax = plt.subplots()
ax.plot(range(10), label='shabnams')
leg = ax.legend(draggable=draggable)
assert leg.get_draggable() is draggable
def test_alpha_handles():
x, n, hh = plt.hist([1, 2, 3], alpha=0.25, label='data', color='red')
legend = plt.legend()
for lh in legend.legend_handles:
lh.set_alpha(1.0)
assert lh.get_facecolor()[:-1] == hh[1].get_facecolor()[:-1]
assert lh.get_edgecolor()[:-1] == hh[1].get_edgecolor()[:-1]
@needs_usetex
def test_usetex_no_warn(caplog):
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Computer Modern'
mpl.rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.plot(0, 0, label='input')
ax.legend(title="My legend")
fig.canvas.draw()
assert "Font family ['serif'] not found." not in caplog.text
def test_warn_big_data_best_loc(monkeypatch):
# Force _find_best_position to think it took a long time.
counter = itertools.count(0, step=1.5)
monkeypatch.setattr(time, 'perf_counter', lambda: next(counter))
fig, ax = plt.subplots()
fig.canvas.draw() # So that we can call draw_artist later.
# Place line across all possible legend locations.
x = [0.9, 0.1, 0.1, 0.9, 0.9, 0.5]
y = [0.95, 0.95, 0.05, 0.05, 0.5, 0.5]
ax.plot(x, y, 'o-', label='line')
with rc_context({'legend.loc': 'best'}):
legend = ax.legend()
with pytest.warns(UserWarning,
match='Creating legend with loc="best" can be slow with large '
'amounts of data.') as records:
fig.draw_artist(legend) # Don't bother drawing the lines -- it's slow.
# The _find_best_position method of Legend is called twice, duplicating
# the warning message.
assert len(records) == 2
def test_no_warn_big_data_when_loc_specified(monkeypatch):
# Force _find_best_position to think it took a long time.
counter = itertools.count(0, step=1.5)
monkeypatch.setattr(time, 'perf_counter', lambda: next(counter))
fig, ax = plt.subplots()
fig.canvas.draw()
# Place line across all possible legend locations.
x = [0.9, 0.1, 0.1, 0.9, 0.9, 0.5]
y = [0.95, 0.95, 0.05, 0.05, 0.5, 0.5]
ax.plot(x, y, 'o-', label='line')
legend = ax.legend('best')
fig.draw_artist(legend) # Check that no warning is emitted.
@pytest.mark.parametrize('label_array', [['low', 'high'],
('low', 'high'),
np.array(['low', 'high'])])
def test_plot_multiple_input_multiple_label(label_array):
# test ax.plot() with multidimensional input
# and multiple labels
x = [1, 2, 3]
y = [[1, 2],
[2, 5],
[4, 9]]
fig, ax = plt.subplots()
ax.plot(x, y, label=label_array)
leg = ax.legend()
legend_texts = [entry.get_text() for entry in leg.get_texts()]
assert legend_texts == ['low', 'high']
@pytest.mark.parametrize('label', ['one', 1, int])
def test_plot_multiple_input_single_label(label):
# test ax.plot() with multidimensional input
# and single label
x = [1, 2, 3]
y = [[1, 2],
[2, 5],
[4, 9]]
fig, ax = plt.subplots()
ax.plot(x, y, label=label)
leg = ax.legend()
legend_texts = [entry.get_text() for entry in leg.get_texts()]
assert legend_texts == [str(label)] * 2
def test_plot_single_input_multiple_label():
# test ax.plot() with 1D array like input
# and iterable label
x = [1, 2, 3]
y = [2, 5, 6]
fig, ax = plt.subplots()
with pytest.raises(ValueError,
match='label must be scalar or have the same length'):
ax.plot(x, y, label=['low', 'high'])
def test_plot_single_input_list_label():
fig, ax = plt.subplots()
line, = ax.plot([[0], [1]], label=['A'])
assert line.get_label() == 'A'
def test_plot_multiple_label_incorrect_length_exception():
# check that exception is raised if multiple labels
# are given, but number of on labels != number of lines
with pytest.raises(ValueError):
x = [1, 2, 3]
y = [[1, 2],
[2, 5],
[4, 9]]
label = ['high', 'low', 'medium']
fig, ax = plt.subplots()
ax.plot(x, y, label=label)
def test_legend_face_edgecolor():
# Smoke test for PolyCollection legend handler with 'face' edgecolor.
fig, ax = plt.subplots()
ax.fill_between([0, 1, 2], [1, 2, 3], [2, 3, 4],
facecolor='r', edgecolor='face', label='Fill')
ax.legend()
def test_legend_text_axes():
fig, ax = plt.subplots()
ax.plot([1, 2], [3, 4], label='line')
leg = ax.legend()
assert leg.axes is ax
assert leg.get_texts()[0].axes is ax
def test_handlerline2d():
# Test marker consistency for monolithic Line2D legend handler (#11357).
fig, ax = plt.subplots()
ax.scatter([0, 1], [0, 1], marker="v")
handles = [mlines.Line2D([0], [0], marker="v")]
leg = ax.legend(handles, ["Aardvark"], numpoints=1)
assert handles[0].get_marker() == leg.legend_handles[0].get_marker()
def test_subfigure_legend():
# Test that legend can be added to subfigure (#20723)
subfig = plt.figure().subfigures()
ax = subfig.subplots()
ax.plot([0, 1], [0, 1], label="line")
leg = subfig.legend()
assert leg.get_figure(root=False) is subfig
def test_setting_alpha_keeps_polycollection_color():
pc = plt.fill_between([0, 1], [2, 3], color='#123456', label='label')
patch = plt.legend().get_patches()[0]
patch.set_alpha(0.5)
assert patch.get_facecolor()[:3] == tuple(pc.get_facecolor()[0][:3])
assert patch.get_edgecolor()[:3] == tuple(pc.get_edgecolor()[0][:3])
def test_legend_markers_from_line2d():
# Test that markers can be copied for legend lines (#17960)
_markers = ['.', '*', 'v']
fig, ax = plt.subplots()
lines = [mlines.Line2D([0], [0], ls='None', marker=mark)
for mark in _markers]
labels = ["foo", "bar", "xyzzy"]
markers = [line.get_marker() for line in lines]
legend = ax.legend(lines, labels)
new_markers = [line.get_marker() for line in legend.get_lines()]
new_labels = [text.get_text() for text in legend.get_texts()]
assert markers == new_markers == _markers
assert labels == new_labels
@check_figures_equal()
def test_ncol_ncols(fig_test, fig_ref):
# Test that both ncol and ncols work
strings = ["a", "b", "c", "d", "e", "f"]
ncols = 3
fig_test.legend(strings, ncol=ncols)
fig_ref.legend(strings, ncols=ncols)
def test_loc_invalid_tuple_exception():
# check that exception is raised if the loc arg
# of legend is not a 2-tuple of numbers
fig, ax = plt.subplots()
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not \\(1.1,\\)')):
ax.legend(loc=(1.1, ), labels=["mock data"])
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not \\(0.481, 0.4227, 0.4523\\)')):
ax.legend(loc=(0.481, 0.4227, 0.4523), labels=["mock data"])
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not \\(0.481, \'go blue\'\\)')):
ax.legend(loc=(0.481, "go blue"), labels=["mock data"])
def test_loc_valid_tuple():
fig, ax = plt.subplots()
ax.legend(loc=(0.481, 0.442), labels=["mock data"])
ax.legend(loc=(1, 2), labels=["mock data"])
def test_loc_valid_list():
fig, ax = plt.subplots()
ax.legend(loc=[0.481, 0.442], labels=["mock data"])
ax.legend(loc=[1, 2], labels=["mock data"])
def test_loc_invalid_list_exception():
fig, ax = plt.subplots()
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not \\[1.1, 2.2, 3.3\\]')):
ax.legend(loc=[1.1, 2.2, 3.3], labels=["mock data"])
def test_loc_invalid_type():
fig, ax = plt.subplots()
with pytest.raises(ValueError, match=("loc must be string, coordinate "
"tuple, or an integer 0-10, not {'not': True}")):
ax.legend(loc={'not': True}, labels=["mock data"])
def test_loc_validation_numeric_value():
fig, ax = plt.subplots()
ax.legend(loc=0, labels=["mock data"])
ax.legend(loc=1, labels=["mock data"])
ax.legend(loc=5, labels=["mock data"])
ax.legend(loc=10, labels=["mock data"])
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not 11')):
ax.legend(loc=11, labels=["mock data"])
with pytest.raises(ValueError, match=('loc must be string, coordinate '
'tuple, or an integer 0-10, not -1')):
ax.legend(loc=-1, labels=["mock data"])
def test_loc_validation_string_value():
fig, ax = plt.subplots()
labels = ["mock data"]
ax.legend(loc='best', labels=labels)
ax.legend(loc='upper right', labels=labels)
ax.legend(loc='best', labels=labels)
ax.legend(loc='upper right', labels=labels)
ax.legend(loc='upper left', labels=labels)
ax.legend(loc='lower left', labels=labels)
ax.legend(loc='lower right', labels=labels)
ax.legend(loc='right', labels=labels)
ax.legend(loc='center left', labels=labels)
ax.legend(loc='center right', labels=labels)
ax.legend(loc='lower center', labels=labels)
ax.legend(loc='upper center', labels=labels)
with pytest.raises(ValueError, match="'wrong' is not a valid value for"):
ax.legend(loc='wrong', labels=labels)
def test_legend_handle_label_mismatch():
pl1, = plt.plot(range(10))
pl2, = plt.plot(range(10))
with pytest.warns(UserWarning, match="number of handles and labels"):
legend = plt.legend(handles=[pl1, pl2], labels=["pl1", "pl2", "pl3"])
assert len(legend.legend_handles) == 2
assert len(legend.get_texts()) == 2
def test_legend_handle_label_mismatch_no_len():
pl1, = plt.plot(range(10))
pl2, = plt.plot(range(10))
legend = plt.legend(handles=iter([pl1, pl2]),
labels=iter(["pl1", "pl2", "pl3"]))
assert len(legend.legend_handles) == 2
assert len(legend.get_texts()) == 2
def test_legend_nolabels_warning():
plt.plot([1, 2, 3])
with pytest.raises(UserWarning, match="No artists with labels found"):
plt.legend()
@pytest.mark.filterwarnings("ignore:No artists with labels found to put in legend")
def test_legend_nolabels_draw():
plt.plot([1, 2, 3])
plt.legend()
assert plt.gca().get_legend() is not None
def test_legend_loc_polycollection():
# Test that the legend is placed in the correct
# position for 'best' for polycollection
x = [3, 4, 5]
y1 = [1, 1, 1]
y2 = [5, 5, 5]
leg_bboxes = []
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
for ax, loc in zip(axs.flat, ('best', 'lower left')):
ax.fill_between(x, y1, y2, color='gray', alpha=0.5, label='Shaded Area')
ax.set_xlim(0, 6)
ax.set_ylim(-1, 5)
leg = ax.legend(loc=loc)
fig.canvas.draw()
leg_bboxes.append(
leg.get_window_extent().transformed(ax.transAxes.inverted()))
assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds)
def test_legend_text():
# Test that legend is place in the correct
# position for 'best' when there is text in figure
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
leg_bboxes = []
for ax, loc in zip(axs.flat, ('best', 'lower left')):
x = [1, 2]
y = [2, 1]
ax.plot(x, y, label='plot name')
ax.text(1.5, 2, 'some text blahblah', verticalalignment='top')
leg = ax.legend(loc=loc)
fig.canvas.draw()
leg_bboxes.append(
leg.get_window_extent().transformed(ax.transAxes.inverted()))
assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds)
def test_legend_annotate():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], label="Line")
ax.annotate("a", xy=(1, 1))
ax.legend(loc=0)
with mock.patch.object(
fig, '_get_renderer', wraps=fig._get_renderer) as mocked_get_renderer:
fig.savefig(io.BytesIO())
# Finding the legend position should not require _get_renderer to be called
mocked_get_renderer.assert_not_called()
def test_boxplot_legend_labels():
# Test that legend entries are generated when passing `label`.
np.random.seed(19680801)
data = np.random.random((10, 4))
fig, axs = plt.subplots(nrows=1, ncols=4)
legend_labels = ['box A', 'box B', 'box C', 'box D']
# Testing legend labels and patch passed to legend.
bp1 = axs[0].boxplot(data, patch_artist=True, label=legend_labels)
assert [v.get_label() for v in bp1['boxes']] == legend_labels
handles, labels = axs[0].get_legend_handles_labels()
assert labels == legend_labels
assert all(isinstance(h, mpl.patches.PathPatch) for h in handles)
# Testing legend without `box`.
bp2 = axs[1].boxplot(data, label=legend_labels, showbox=False)
# Without a box, The legend entries should be passed from the medians.
assert [v.get_label() for v in bp2['medians']] == legend_labels
handles, labels = axs[1].get_legend_handles_labels()
assert labels == legend_labels
assert all(isinstance(h, mpl.lines.Line2D) for h in handles)
# Testing legend with number of labels different from number of boxes.
with pytest.raises(ValueError, match='values must have same the length'):
bp3 = axs[2].boxplot(data, label=legend_labels[:-1])
# Test that for a string label, only the first box gets a label.
bp4 = axs[3].boxplot(data, label='box A')
assert bp4['medians'][0].get_label() == 'box A'
assert all(x.get_label().startswith("_") for x in bp4['medians'][1:])
def test_patchcollection_legend():
# Test that PatchCollection labels show up in legend and preserve visual
# properties (issue #23998)
fig, ax = plt.subplots()
pc = mcollections.PatchCollection(
[mpatches.Circle((0, 0), 1), mpatches.Circle((2, 0), 1)],
label="patch collection",
facecolor='red',
edgecolor='blue',
linewidths=3,
linestyle='--',
)
ax.add_collection(pc)
ax.autoscale_view()
leg = ax.legend()
# Check that the legend contains our label
assert len(leg.get_texts()) == 1
assert leg.get_texts()[0].get_text() == "patch collection"
# Check that the legend handle exists and has correct visual properties
assert len(leg.legend_handles) == 1
legend_patch = leg.legend_handles[0]
assert mpl.colors.same_color(legend_patch.get_facecolor(),
pc.get_facecolor()[0])
assert mpl.colors.same_color(legend_patch.get_edgecolor(),
pc.get_edgecolor()[0])
assert legend_patch.get_linewidth() == pc.get_linewidths()[0]
assert legend_patch.get_linestyle() == pc.get_linestyles()[0]
def test_patchcollection_legend_empty():
# Test that empty PatchCollection doesn't crash
fig, ax = plt.subplots()
# Create an empty PatchCollection
pc = mcollections.PatchCollection([], label="empty collection")
ax.add_collection(pc)
# This should not crash
leg = ax.legend()
# Check that the label still appears
assert len(leg.get_texts()) == 1
assert leg.get_texts()[0].get_text() == "empty collection"
# The legend handle should exist
assert len(leg.legend_handles) == 1
| TestLegendFigureFunction |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 239657,
"end": 239921
} | class ____(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections.abc.Hashable))
| TestHashing |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 5429,
"end": 6488
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.Cluster.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaGetClusterOperator(
task_id=TASK_ID,
cluster_id=TEST_CLUSTER_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.get_cluster.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaGetClusterOperator |
python | ZoranPandovski__al-go-rithms | sort/radix_sort/python/radixsort.py | {
"start": 78,
"end": 1845
} | class ____:
def __init__(self,a):
self.a = a
def result(self):
maxElement = max(self.a)
exp = 1
while int(maxElement/exp) > 0:
self.countingsort(exp)
exp *= 10
return self.a
def countingsort(self,exp):
position = [0]*(10)
b = [0]*(len(self.a))
for i in range(len(self.a)):
index = int(self.a[i]/exp)
position[index%10] += 1
for i in range(1,10):
position[i] += position[i-1]
for i in range(len(self.a)-1,-1,-1):
index = int(self.a[i]/exp)
b[position[index%10]-1] = self.a[i]
position[index%10] -= 1
self.a = b
if __name__ == '__main__':
from random import randint
import unittest
class TestRadixSort(unittest.TestCase):
def test_random_set(self):
num_case = 20
min_val = 1
max_val = 5
min_length = 1
max_length = 10
check_match = lambda a, b: sum([i==j for i,j in zip(a,b)]) == len(a)
for i in range(num_case):
input_sequence = [randint(min_val,max_val) for i in range(randint(min_length,max_length))]
radix_sorted_sequence = RadixSort(input_sequence).result()
builtin_sorted_sequence = sorted(input_sequence)
debug_msg = '\ninput ' + str(input_sequence) + \
'\noutput ' + str(radix_sorted_sequence) + \
'\ntrue output ' + str(builtin_sorted_sequence)
self.assertTrue(check_match(builtin_sorted_sequence, radix_sorted_sequence), msg=debug_msg)
unittest.main()
| RadixSort |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/contours/_labelfont.py | {
"start": 233,
"end": 10116
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour.contours"
_path_str = "histogram2dcontour.contours.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.contours.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super().__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.contours.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.contours.Labelfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Labelfont |
python | langchain-ai__langchain | libs/partners/prompty/langchain_prompty/core.py | {
"start": 1347,
"end": 5650
} | class ____(BaseModel):
"""Base Prompty model."""
# Metadata
name: str = Field(default="")
description: str = Field(default="")
authors: list[str] = Field(default=[])
tags: list[str] = Field(default=[])
version: str = Field(default="")
base: str = Field(default="")
basePrompty: Prompty | None = Field(default=None)
# Model
model: ModelSettings = Field(default_factory=ModelSettings)
# Sample
sample: dict = Field(default={})
# Input / output
inputs: dict[str, PropertySettings] = Field(default={})
outputs: dict[str, PropertySettings] = Field(default={})
# Template
template: TemplateSettings
file: FilePath = Field(default="") # type: ignore[assignment]
content: str = Field(default="")
def to_safe_dict(self) -> dict[str, Any]:
d = {}
for k, v in self:
if v != "" and v != {} and v != [] and v is not None:
if k == "model":
d[k] = v.model_dump_safe()
elif k == "template":
d[k] = v.model_dump()
elif k == "inputs" or k == "outputs":
d[k] = {k: v.model_dump() for k, v in v.items()}
elif k == "file":
d[k] = (
str(self.file.as_posix())
if isinstance(self.file, Path)
else self.file
)
elif k == "basePrompty":
# No need to serialize basePrompty
continue
else:
d[k] = v
return d
# Generate json representation of the prompty
def to_safe_json(self) -> str:
d = self.to_safe_dict()
return json.dumps(d)
@staticmethod
def normalize(attribute: Any, parent: Path, env_error: bool = True) -> Any:
if isinstance(attribute, str):
attribute = attribute.strip()
if attribute.startswith("${") and attribute.endswith("}"):
variable = attribute[2:-1].split(":")
if variable[0] in os.environ.keys():
return os.environ[variable[0]]
else:
if len(variable) > 1:
return variable[1]
else:
if env_error:
raise ValueError(
f"Variable {variable[0]} not found in environment"
)
else:
return ""
elif (
attribute.startswith("file:")
and Path(parent / attribute.split(":")[1]).exists()
):
with open(parent / attribute.split(":")[1]) as f:
items = json.load(f)
if isinstance(items, list):
return [Prompty.normalize(value, parent) for value in items]
elif isinstance(items, dict):
return {
key: Prompty.normalize(value, parent)
for key, value in items.items()
}
else:
return items
else:
return attribute
elif isinstance(attribute, list):
return [Prompty.normalize(value, parent) for value in attribute]
elif isinstance(attribute, dict):
return {
key: Prompty.normalize(value, parent)
for key, value in attribute.items()
}
else:
return attribute
def param_hoisting(
top: dict[str, Any], bottom: dict[str, Any], top_key: Any = None
) -> dict[str, Any]:
"""Merge two dictionaries with hoisting of parameters from bottom to top.
Args:
top: The top dictionary.
bottom: The bottom dictionary.
top_key: The key to hoist from the bottom to the top.
Returns:
The merged dictionary.
"""
if top_key:
new_dict = {**top[top_key]} if top_key in top else {}
else:
new_dict = {**top}
for key, value in bottom.items():
if key not in new_dict:
new_dict[key] = value
return new_dict
| Prompty |
python | joke2k__faker | faker/providers/internet/fi_FI/__init__.py | {
"start": 46,
"end": 332
} | class ____(InternetProvider):
free_email_domains = (
"gmail.com",
"googlemail.com",
"hotmail.com",
"suomi24.fi",
"kolumbus.fi",
"luukku.com",
"surffi.net",
)
tlds = ("com", "com", "com", "fi", "fi", "net", "org")
| Provider |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/sql_query.py | {
"start": 23030,
"end": 25039
} | class ____(BaseSQLTableQueryEngine):
"""SQL Table retriever query engine."""
def __init__(
self,
sql_database: SQLDatabase,
table_retriever: ObjectRetriever[SQLTableSchema],
rows_retrievers: Optional[dict[str, BaseRetriever]] = None,
cols_retrievers: Optional[dict[str, dict[str, BaseRetriever]]] = None,
llm: Optional[LLM] = None,
text_to_sql_prompt: Optional[BasePromptTemplate] = None,
context_query_kwargs: Optional[dict] = None,
synthesize_response: bool = True,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
refine_synthesis_prompt: Optional[BasePromptTemplate] = None,
context_str_prefix: Optional[str] = None,
sql_only: bool = False,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._sql_retriever = NLSQLRetriever(
sql_database,
llm=llm,
text_to_sql_prompt=text_to_sql_prompt,
context_query_kwargs=context_query_kwargs,
table_retriever=table_retriever,
rows_retrievers=rows_retrievers,
cols_retrievers=cols_retrievers,
context_str_prefix=context_str_prefix,
sql_only=sql_only,
callback_manager=callback_manager,
verbose=kwargs.get("verbose", False),
)
super().__init__(
synthesize_response=synthesize_response,
response_synthesis_prompt=response_synthesis_prompt,
refine_synthesis_prompt=refine_synthesis_prompt,
llm=llm,
callback_manager=callback_manager,
**kwargs,
)
@property
def sql_retriever(self) -> NLSQLRetriever:
"""Get SQL retriever."""
return self._sql_retriever
# legacy
GPTNLStructStoreQueryEngine = NLStructStoreQueryEngine
GPTSQLStructStoreQueryEngine = SQLStructStoreQueryEngine
| SQLTableRetrieverQueryEngine |
python | ethereum__web3.py | web3/geth.py | {
"start": 708,
"end": 1202
} | class ____(Module):
"""
https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-txpool
"""
is_async = False
content: Method[Callable[[], TxPoolContent]] = Method(
RPC.txpool_content,
is_property=True,
)
inspect: Method[Callable[[], TxPoolInspect]] = Method(
RPC.txpool_inspect,
is_property=True,
)
status: Method[Callable[[], TxPoolStatus]] = Method(
RPC.txpool_status,
is_property=True,
)
| GethTxPool |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ViewBox/ViewBox.py | {
"start": 377,
"end": 875
} | class ____(object):
def __init__(self):
self._items = []
def append(self, obj):
#Add backwards to iterate backwards (to make iterating more efficient on removal).
self._items.insert(0, weakref.ref(obj))
def __iter__(self):
i = len(self._items)-1
while i >= 0:
ref = self._items[i]
d = ref()
if d is None:
del self._items[i]
else:
yield d
i -= 1
| WeakList |
python | doocs__leetcode | solution/1400-1499/1469.Find All The Lonely Nodes/Solution.py | {
"start": 192,
"end": 663
} | class ____:
def getLonelyNodes(self, root: Optional[TreeNode]) -> List[int]:
def dfs(root: Optional[TreeNode]):
if root is None or root.left == root.right:
return
if root.left is None:
ans.append(root.right.val)
if root.right is None:
ans.append(root.left.val)
dfs(root.left)
dfs(root.right)
ans = []
dfs(root)
return ans
| Solution |
python | PyCQA__pylint | tests/functional/r/redefined/redefined_slots.py | {
"start": 193,
"end": 313
} | class ____:
"""Class defining the `a`, `b` & `deque.__name__` slots"""
__slots__ = ("a", "b", deque.__name__)
| Base |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 2197,
"end": 2727
} | class ____(BaseClass):
def with_comments(self):
super(
# super helpful comment
ClassForCommentEnthusiasts,
self
).f()
super(
ClassForCommentEnthusiasts,
# even more helpful comment
self
).f()
super(
ClassForCommentEnthusiasts,
self
# also a comment
).f()
# Issue #19096: super calls with keyword arguments should emit diagnostic but not be fixed
| ClassForCommentEnthusiasts |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_group_version_resource.py | {
"start": 383,
"end": 5118
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'str',
'resource': 'str',
'version': 'str'
}
attribute_map = {
'group': 'group',
'resource': 'resource',
'version': 'version'
}
def __init__(self, group=None, resource=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1GroupVersionResource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._resource = None
self._version = None
self.discriminator = None
if group is not None:
self.group = group
if resource is not None:
self.resource = resource
if version is not None:
self.version = version
@property
def group(self):
"""Gets the group of this V1alpha1GroupVersionResource. # noqa: E501
The name of the group. # noqa: E501
:return: The group of this V1alpha1GroupVersionResource. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1alpha1GroupVersionResource.
The name of the group. # noqa: E501
:param group: The group of this V1alpha1GroupVersionResource. # noqa: E501
:type: str
"""
self._group = group
@property
def resource(self):
"""Gets the resource of this V1alpha1GroupVersionResource. # noqa: E501
The name of the resource. # noqa: E501
:return: The resource of this V1alpha1GroupVersionResource. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1alpha1GroupVersionResource.
The name of the resource. # noqa: E501
:param resource: The resource of this V1alpha1GroupVersionResource. # noqa: E501
:type: str
"""
self._resource = resource
@property
def version(self):
"""Gets the version of this V1alpha1GroupVersionResource. # noqa: E501
The name of the version. # noqa: E501
:return: The version of this V1alpha1GroupVersionResource. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1alpha1GroupVersionResource.
The name of the version. # noqa: E501
:param version: The version of this V1alpha1GroupVersionResource. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1GroupVersionResource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1GroupVersionResource):
return True
return self.to_dict() != other.to_dict()
| V1alpha1GroupVersionResource |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_gtin_variable_measure_trade_item.py | {
"start": 2075,
"end": 4799
} | class ____(ColumnMapExpectation):
"""Expect column values to be GTIN variable measure trade item."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_variable_measure_trade_item": [
"99782894508094",
"94000101613603",
"92345678901282",
"90811068011975",
"90188781000174",
],
"some_other": [
"09782894508091",
"04000101613600",
"00041333704647",
"00811068011972",
"92345678901282",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_variable_measure_trade_item"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.gtin_variable_measure_trade_item"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["gtin"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeGtinVariableMeasureTradeItem().print_diagnostic_checklist()
| ExpectColumnValuesToBeGtinVariableMeasureTradeItem |
python | jazzband__django-waffle | test_app/models.py | {
"start": 720,
"end": 809
} | class ____(AbstractBaseSample):
"""Demonstrates custom switch behavior."""
| CustomSample |
python | pypa__warehouse | tests/unit/metrics/test_event_handlers.py | {
"start": 3930,
"end": 7283
} | class ____:
def test_without_timings(self, pyramid_request, metrics):
on_new_response(pretend.stub(request=pyramid_request))
assert metrics.timing.calls == []
def test_without_route(self, pyramid_request, metrics):
response = pretend.stub(status_code="200")
new_request = datetime.datetime.now(datetime.UTC)
new_response = new_request + datetime.timedelta(seconds=1)
pyramid_request.timings = {"new_request_start": new_request.timestamp() * 1000}
pyramid_request.matched_route = None
with freezegun.freeze_time(new_response):
on_new_response(pretend.stub(request=pyramid_request, response=response))
assert metrics.timing.calls == [
pretend.call(
"pyramid.request.duration.total",
1000,
tags=["status_code:200", "status_type:2xx"],
)
]
assert pyramid_request.timings == {
"new_request_start": new_request.timestamp() * 1000,
"request_duration": 1000.0,
}
def test_without_render(self, pyramid_request, metrics):
response = pretend.stub(status_code="200")
new_request = datetime.datetime.now(datetime.UTC)
new_response = new_request + datetime.timedelta(seconds=1)
pyramid_request.timings = {"new_request_start": new_request.timestamp() * 1000}
pyramid_request.matched_route = pretend.stub(name="thing")
with freezegun.freeze_time(new_response):
on_new_response(pretend.stub(request=pyramid_request, response=response))
assert metrics.timing.calls == [
pretend.call(
"pyramid.request.duration.total",
1000,
tags=["route:thing", "status_code:200", "status_type:2xx"],
)
]
assert pyramid_request.timings == {
"new_request_start": new_request.timestamp() * 1000,
"request_duration": 1000.0,
}
def test_with_render(self, pyramid_request, metrics):
response = pretend.stub(status_code="200")
new_request = datetime.datetime.now(datetime.UTC)
before_render = new_request + datetime.timedelta(seconds=1)
new_response = new_request + datetime.timedelta(seconds=2)
pyramid_request.timings = {
"new_request_start": new_request.timestamp() * 1000,
"before_render_start": before_render.timestamp() * 1000,
}
pyramid_request.matched_route = pretend.stub(name="thing")
with freezegun.freeze_time(new_response):
on_new_response(pretend.stub(request=pyramid_request, response=response))
assert metrics.timing.calls == [
pretend.call(
"pyramid.request.duration.template_render", 1000, tags=["route:thing"]
),
pretend.call(
"pyramid.request.duration.total",
2000,
tags=["route:thing", "status_code:200", "status_type:2xx"],
),
]
assert pyramid_request.timings == {
"new_request_start": new_request.timestamp() * 1000,
"before_render_start": before_render.timestamp() * 1000,
"template_render_duration": 1000.0,
"request_duration": 2000.0,
}
| TestOnNewResponse |
python | doocs__leetcode | solution/1200-1299/1228.Missing Number In Arithmetic Progression/Solution.py | {
"start": 0,
"end": 135
} | class ____:
def missingNumber(self, arr: List[int]) -> int:
return (arr[0] + arr[-1]) * (len(arr) + 1) // 2 - sum(arr)
| Solution |
python | cherrypy__cherrypy | cherrypy/test/test_core.py | {
"start": 28903,
"end": 30247
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
def break_header():
# Add a header after finalize that is invalid
cherrypy.serving.response.header_list.append((2, 3))
cherrypy.tools.break_header = cherrypy.Tool(
'on_end_resource',
break_header,
)
class Root:
@cherrypy.expose
def index(self):
return 'hello'
@cherrypy.config(**{'tools.break_header.on': True})
def start_response_error(self):
return 'salud!'
@cherrypy.expose
def stat(self, path):
with cherrypy.HTTPError.handle(OSError, 404):
os.stat(path)
root = Root()
cherrypy.tree.mount(root)
def test_start_response_error(self):
self.getPage('/start_response_error')
self.assertStatus(500)
self.assertInBody(
'TypeError: response.header_list key 2 is not a byte string.',
)
def test_contextmanager(self):
self.getPage('/stat/missing')
self.assertStatus(404)
body_text = self.body.decode('utf-8')
assert (
'No such file or directory' in body_text
or 'cannot find the file specified' in body_text
)
| ErrorTests |
python | Lightning-AI__lightning | src/lightning/fabric/wrappers.py | {
"start": 1790,
"end": 3762
} | class ____:
def __init__(self, optimizer: Optimizer, strategy: Strategy, callbacks: Optional[list[Callable]] = None) -> None:
"""FabricOptimizer is a thin wrapper around the :class:`~torch.optim.Optimizer` that delegates the optimizer
step calls to the strategy.
The underlying wrapped optimizer object can be accessed via the property :attr:`optimizer`.
Args:
optimizer: The optimizer to wrap
strategy: Reference to the strategy for handling the optimizer step
"""
self._optimizer = optimizer
self._strategy = strategy
self._callbacks = callbacks or []
# imitate the class of the wrapped object to make isinstance checks work
self.__class__ = type("Fabric" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
@property
def optimizer(self) -> Optimizer:
return self._optimizer
def state_dict(self) -> dict[str, Tensor]:
return self._strategy.get_optimizer_state(self.optimizer)
def load_state_dict(self, state_dict: dict[str, Tensor]) -> None:
self.optimizer.load_state_dict(state_dict)
def step(self, closure: Optional[Callable] = None) -> Any:
kwargs = {"closure": closure} if closure is not None else {}
if hasattr(self._strategy, "model") and isinstance(self._strategy.model, Optimizable):
# only DeepSpeed defines this
optimizer = self._strategy.model
else:
optimizer = self.optimizer
output = self._strategy.optimizer_step(
optimizer,
**kwargs,
)
for callback in self._callbacks:
hook = getattr(callback, "on_after_optimizer_step", None)
if callable(hook):
hook(strategy=self._strategy, optimizer=optimizer)
return output
def __getattr__(self, item: Any) -> Any:
return getattr(self._optimizer, item)
| _FabricOptimizer |
python | ray-project__ray | rllib/core/models/tests/test_cnn_encoders.py | {
"start": 337,
"end": 3939
} | class ____(unittest.TestCase):
def test_cnn_encoders(self):
"""Tests building CNN encoders properly and checks for correct architecture."""
# Loop through permutations of hyperparameters.
inputs_dimss = [
[96, 96, 3],
[96, 96, 1],
[84, 84, 3],
[84, 84, 1],
[64, 64, 3],
[64, 64, 1],
[42, 42, 3],
[42, 42, 1],
[10, 10, 3],
]
cnn_activations = [None, "linear", "relu"]
cnn_use_layernorms = [False, True]
cnn_use_biases = [False, True]
for permutation in itertools.product(
inputs_dimss,
cnn_activations,
cnn_use_layernorms,
cnn_use_biases,
):
(
inputs_dims,
cnn_activation,
cnn_use_layernorm,
cnn_use_bias,
) = permutation
filter_specifiers = get_filter_config(inputs_dims)
print(
f"Testing ...\n"
f"input_dims: {inputs_dims}\n"
f"cnn_filter_specifiers: {filter_specifiers}\n"
f"cnn_activation: {cnn_activation}\n"
f"cnn_use_layernorm: {cnn_use_layernorm}\n"
f"cnn_use_bias: {cnn_use_bias}\n"
)
config = CNNEncoderConfig(
input_dims=inputs_dims,
cnn_filter_specifiers=filter_specifiers,
cnn_activation=cnn_activation,
cnn_use_layernorm=cnn_use_layernorm,
cnn_use_bias=cnn_use_bias,
)
# Use a ModelChecker to compare all added models (different frameworks)
# with each other.
model_checker = ModelChecker(config)
# Add this framework version of the model to our checker.
outputs = model_checker.add(framework="torch")
# Confirm that the config conputed the correct (actual) output dims.
self.assertEqual(outputs[ENCODER_OUT].shape, (1, config.output_dims[0]))
# Check all added models against each other.
model_checker.check()
def test_cnn_encoders_valid_padding(self):
"""Tests building CNN encoders with valid padding."""
inputs_dims = [42, 42, 3]
# Test filter specifier with "valid"-padding setting in it.
# Historical fun fact: The following was our old default CNN setup for
# (42, 42, 3) Atari image spaces. The last layer, with its hard-coded :( padding
# setting was narrowing down the image size to 1x1, so we could use the last
# layer already as a 256-sized pre-logits layer (normally done by a Dense).
filter_specifiers = [[16, 4, 2, "same"], [32, 4, 2], [256, 11, 1, "valid"]]
config = CNNEncoderConfig(
input_dims=inputs_dims,
cnn_filter_specifiers=filter_specifiers,
)
# Use a ModelChecker to compare all added models (different frameworks)
# with each other.
model_checker = ModelChecker(config)
# Add this framework version of the model to our checker.
outputs = model_checker.add(framework="torch")
# Confirm that the config conputed the correct (actual) output dims.
self.assertEqual(outputs[ENCODER_OUT].shape, (1, config.output_dims[0]))
# Check all added models against each other.
model_checker.check()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestCNNEncoders |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1570276,
"end": 1570929
} | class ____(OffsetDef):
"""
ValueDefnumber schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {"$ref": "#/definitions/ValueDef<number>"}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ValueDefnumber |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/multiple_sources.py | {
"start": 207,
"end": 2597
} | class ____:
def __init__(self, id) -> None:
self.id = id
def send(self, vc) -> None: ...
@classmethod
def get(cls, id) -> "Node":
return cls(id)
def user_controlled_input():
return "evil"
def permissive_context():
return 0
def combine_tainted_user_and_dangerous_vc():
id = user_controlled_input()
vc = permissive_context()
Node.get(id).send(vc)
def demonstrate_triggered_context(vc):
id = user_controlled_input()
Node.get(id).send(vc)
def demonstrate_triggered_input(id):
vc = permissive_context()
Node.get(id).send(vc)
def issue_with_triggered_input():
id = user_controlled_input()
demonstrate_triggered_input(id)
def issue_with_triggered_context():
vc = permissive_context()
demonstrate_triggered_context(vc)
def no_issue_with_wrong_label():
vc = permissive_context()
demonstrate_triggered_input(vc)
def test_other_input():
return "other"
def combines_tests_and_context(test, vc):
return None
def a_source():
return None
def b_source():
return None
def issue_with_test_a_and_b():
combines_tests_and_context(a_source(), permissive_context())
combines_tests_and_context(b_source(), permissive_context())
def a_sink(arg):
return
def b_sink(arg):
return
def transform_t(arg):
return
def sanitize_source_a_tito(arg):
return arg
def sanitize_source_b_tito(arg):
return arg
def sanitize_sink_a_tito(arg):
return arg
def no_issue_with_transform():
x = a_source()
y = transform_t(x)
combines_tests_and_context(y, permissive_context())
def no_sink_with_transform(x):
y = transform_t(x)
combines_tests_and_context(a_source(), y)
def issue_with_sanitizer():
x = a_source()
y = sanitize_sink_a_tito(x)
combines_tests_and_context(y, permissive_context())
def no_sink_with_sanitizer(x):
y = sanitize_source_b_tito(sanitize_source_a_tito(x))
combines_tests_and_context(y, permissive_context())
def user_controlled_input_wrapper():
return user_controlled_input()
def demonstrate_triggered_context_more_hops(vc):
# More hops enable showing the source trace as a subtrace
id = user_controlled_input_wrapper()
Node.get(id).send(vc)
def issue_with_triggered_context_more_hops():
vc = permissive_context()
demonstrate_triggered_context_more_hops(vc)
| Node |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-database/llama_index/tools/database/base.py | {
"start": 449,
"end": 4829
} | class ____(BaseToolSpec, BaseReader):
"""
Simple Database tool.
Concatenates each row into Document used by LlamaIndex.
Args:
sql_database (Optional[SQLDatabase]): SQL database to use,
including table names to specify.
See :ref:`Ref-Struct-Store` for more details.
OR
engine (Optional[Engine]): SQLAlchemy Engine object of the database connection.
OR
uri (Optional[str]): uri of the database connection.
OR
scheme (Optional[str]): scheme of the database connection.
host (Optional[str]): host of the database connection.
port (Optional[int]): port of the database connection.
user (Optional[str]): user of the database connection.
password (Optional[str]): password of the database connection.
dbname (Optional[str]): dbname of the database connection.
"""
spec_functions = ["load_data", "describe_tables", "list_tables"]
def __init__(
self,
sql_database: Optional[SQLDatabase] = None,
engine: Optional[Engine] = None,
uri: Optional[str] = None,
scheme: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
dbname: Optional[str] = None,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
if sql_database:
self.sql_database = sql_database
elif engine:
self.sql_database = SQLDatabase(engine, *args, **kwargs)
elif uri:
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
elif scheme and host and port and user and password and dbname:
uri = f"{scheme}://{user}:{password}@{host}:{port}/{dbname}"
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
else:
raise ValueError(
"You must provide either a SQLDatabase, "
"a SQL Alchemy Engine, a valid connection URI, or a valid "
"set of credentials."
)
self._metadata = MetaData()
self._metadata.reflect(bind=self.sql_database.engine)
def load_data(self, query: str) -> List[Document]:
"""
Query and load data from the Database, returning a list of Documents.
Args:
query (str): an SQL query to filter tables and rows.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
with self.sql_database.engine.connect() as connection:
if query is None:
raise ValueError("A query parameter is necessary to filter the data")
else:
result = connection.execute(text(query))
for item in result.fetchall():
# fetch each item
doc_str = ", ".join([str(entry) for entry in item])
documents.append(Document(text=doc_str))
return documents
def list_tables(self) -> List[str]:
"""
Returns a list of available tables in the database.
To retrieve details about the columns of specific tables, use
the describe_tables endpoint.
"""
return [x.name for x in self._metadata.sorted_tables]
def describe_tables(self, tables: Optional[List[str]] = None) -> str:
"""
Describes the specified tables in the database.
Args:
tables (List[str]): A list of table names to retrieve details about
"""
table_names = tables or [table.name for table in self._metadata.sorted_tables]
table_schemas = []
for table_name in table_names:
table = next(
(
table
for table in self._metadata.sorted_tables
if table.name == table_name
),
None,
)
if table is None:
raise NoSuchTableError(f"Table '{table_name}' does not exist.")
schema = str(CreateTable(table).compile(self.sql_database._engine))
table_schemas.append(f"{schema}\n")
return "\n".join(table_schemas)
| DatabaseToolSpec |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_installation_external_requests.py | {
"start": 205,
"end": 3432
} | class ____(APITestCase):
def setUp(self) -> None:
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.sentry_app = self.create_sentry_app(
name="Testin", organization=self.org, webhook_url="https://example.com"
)
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-external-requests", args=[self.install.uuid]
)
@responses.activate
def test_makes_external_request(self) -> None:
self.login_as(user=self.user)
options = [{"label": "Project Name", "value": "1234"}]
responses.add(
method=responses.GET,
url="https://example.com/get-projects",
match=[
query_string_matcher(
f"projectSlug={self.project.slug}&installationId={self.install.uuid}&query=proj"
)
],
json=options,
status=200,
content_type="application/json",
)
url = self.url + f"?projectId={self.project.id}&uri=/get-projects&query=proj"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data == {"choices": [["1234", "Project Name"]]}
@responses.activate
def test_makes_external_request_with_dependent_data(self) -> None:
self.login_as(user=self.user)
options = [{"label": "Project Name", "value": "1234"}]
qs = urlencode(
{
"projectSlug": self.project.slug,
"installationId": self.install.uuid,
"query": "proj",
"dependentData": orjson.dumps({"org_id": "A"}).decode(),
}
)
responses.add(
method=responses.GET,
url="https://example.com/get-projects",
match=[query_string_matcher(qs)],
json=options,
status=200,
content_type="application/json",
)
qs = urlencode(
{
"projectId": self.project.id,
"uri": "/get-projects",
"query": "proj",
"dependentData": orjson.dumps({"org_id": "A"}).decode(),
}
)
url = f"{self.url}?{qs}"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data == {"choices": [["1234", "Project Name"]]}
@responses.activate
def test_external_request_fails(self) -> None:
self.login_as(user=self.user)
responses.add(
method=responses.GET,
url=f"https://example.com/get-projects?installationId={self.project.slug}",
status=500,
content_type="application/json",
)
url = self.url + f"?uri={self.project.id}"
response = self.client.get(url, format="json")
assert response.status_code == 500
| SentryAppInstallationExternalRequestsEndpointTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/sensor_tests/test_business_logic.py | {
"start": 11202,
"end": 15734
} | class ____:
"""Test processing of sensor data structures.
This class tests pure functions and domain model functionality
without requiring external dependencies.
"""
def test_sensor_creation_with_all_fields(self, snapshot):
"""Test creating sensor with all possible fields."""
sensor = DgApiSensor(
id="complete-sensor-xyz",
name="comprehensive_sensor",
status=DgApiSensorStatus.RUNNING,
sensor_type=DgApiSensorType.FRESHNESS_POLICY,
description="Comprehensive test sensor with all fields",
repository_origin="test_location@test_repo",
next_tick_timestamp=1705311000.0,
)
# Test JSON serialization works correctly
result = sensor.model_dump_json(indent=2)
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_sensor_status_enum_values(self):
"""Test that all expected SensorStatus enum values are available."""
expected_statuses = ["RUNNING", "STOPPED", "PAUSED"]
actual_statuses = [status.value for status in DgApiSensorStatus]
assert set(actual_statuses) == set(expected_statuses)
def test_sensor_type_enum_values(self):
"""Test that all expected SensorType enum values are available."""
expected_types = [
"STANDARD",
"MULTI_ASSET",
"FRESHNESS_POLICY",
"AUTO_MATERIALIZE",
"ASSET",
]
actual_types = [sensor_type.value for sensor_type in DgApiSensorType]
assert set(actual_types) == set(expected_types)
def test_sensor_with_missing_optional_fields(self):
"""Test sensor creation with None values for optional fields."""
sensor = DgApiSensor(
id="sparse-sensor-123",
name="sparse_sensor",
status=DgApiSensorStatus.STOPPED,
sensor_type=DgApiSensorType.STANDARD,
description=None,
repository_origin=None,
next_tick_timestamp=None,
)
assert sensor.id == "sparse-sensor-123"
assert sensor.name == "sparse_sensor"
assert sensor.status == DgApiSensorStatus.STOPPED
assert sensor.sensor_type == DgApiSensorType.STANDARD
assert sensor.description is None
assert sensor.repository_origin is None
assert sensor.next_tick_timestamp is None
def test_sensor_list_creation(self):
"""Test SensorList creation and basic functionality."""
sensors = [
DgApiSensor(
id="sensor1",
name="first_sensor",
status=DgApiSensorStatus.RUNNING,
sensor_type=DgApiSensorType.STANDARD,
),
DgApiSensor(
id="sensor2",
name="second_sensor",
status=DgApiSensorStatus.PAUSED,
sensor_type=DgApiSensorType.ASSET,
),
]
sensor_list = DgApiSensorList(items=sensors, total=len(sensors))
assert len(sensor_list.items) == 2
assert sensor_list.total == 2
assert sensor_list.items[0].name == "first_sensor"
assert sensor_list.items[1].name == "second_sensor"
def test_sensor_timestamp_handling(self, snapshot):
"""Test sensor with various timestamp values."""
test_cases = [
# Normal timestamp
DgApiSensor(
id="sensor1",
name="normal_timestamp",
status=DgApiSensorStatus.RUNNING,
sensor_type=DgApiSensorType.STANDARD,
next_tick_timestamp=1705311000.0,
),
# No timestamp
DgApiSensor(
id="sensor2",
name="no_timestamp",
status=DgApiSensorStatus.STOPPED,
sensor_type=DgApiSensorType.STANDARD,
next_tick_timestamp=None,
),
# Future timestamp
DgApiSensor(
id="sensor3",
name="future_timestamp",
status=DgApiSensorStatus.PAUSED,
sensor_type=DgApiSensorType.STANDARD,
next_tick_timestamp=2000000000.0, # Far future
),
]
results = []
for sensor in test_cases:
# Test serialization
serialized = json.loads(sensor.model_dump_json())
results.append(serialized)
snapshot.assert_match(results)
| TestSensorDataProcessing |
python | numpy__numpy | numpy/f2py/tests/test_block_docstring.py | {
"start": 101,
"end": 584
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "block_docstring", "foo.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
expected = "bar : 'i'-array(2,3)\n"
assert self.module.block.__doc__ == expected
| TestBlockDocString |
python | bokeh__bokeh | src/bokeh/core/property/container.py | {
"start": 10571,
"end": 10991
} | class ____(Dict):
""" Accept RelativeDelta dicts for time delta values.
"""
def __init__(self, default={}, *, help: str | None = None) -> None:
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super().__init__(keys, values, default=default, help=help)
def __str__(self) -> str:
return self.__class__.__name__
| RelativeDelta |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 30278,
"end": 30841
} | class ____(BaseFormatter):
"""A Javascript formatter.
To define the callables that compute the Javascript representation of
your objects, define a :meth:`_repr_javascript_` method or use the
:meth:`for_type` or :meth:`for_type_by_name` methods to register functions
that handle this.
The return value of this formatter should be valid Javascript code and
should *not* be enclosed in ```<script>``` tags.
"""
format_type = Unicode('application/javascript')
print_method = ObjectName('_repr_javascript_')
| JavascriptFormatter |
python | mwaskom__seaborn | seaborn/regression.py | {
"start": 2070,
"end": 34326
} | class ____(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.seed = seed
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Disable regression in case of singleton inputs
if len(self.x) <= 1:
self.fit_reg = False
# Save the range of the x variable for the grid later
if self.fit_reg:
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.x_ci == "sd":
sd = np.std(_y)
_ci = est - sd, est + sd
else:
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y,
func=self.x_estimator,
n_boot=self.n_boot,
units=units,
seed=self.seed)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def _check_statsmodels(self):
"""Check whether statsmodels is installed if any boolean options require it."""
options = "logistic", "robust", "lowess"
err = "`{}=True` requires statsmodels, an optional dependency, to be installed."
for option in options:
if getattr(self, option) and not _has_statsmodels:
raise RuntimeError(err.format(option))
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
self._check_statsmodels()
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
def reg_func(_x, _y):
return np.polyval(np.polyfit(_x, _y, order), grid)
x, y = self.x, self.y
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
import statsmodels.tools.sm_exceptions as sme
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
def reg_func(_x, _y):
err_classes = (sme.PerfectSeparationError,)
try:
with warnings.catch_warnings():
if hasattr(sme, "PerfectSeparationWarning"):
# statsmodels>=0.14.0
warnings.simplefilter("error", sme.PerfectSeparationWarning)
err_classes = (*err_classes, sme.PerfectSeparationWarning)
yhat = model(_y, _x, **kwargs).fit().predict(grid)
except err_classes:
yhat = np.empty(len(grid))
yhat.fill(np.nan)
return yhat
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = np.asarray(self.x)
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.percentile(x, percentiles)
else:
bins = np.ravel(bins)
dist = np.abs(np.subtract.outer(x, bins))
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return np.asarray(a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = ax.plot([], [])
color = lines.get_color()
lines.remove()
else:
color = self.color
# Ensure that color is hex to avoid matplotlib weirdness
color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
if "alpha" in kws:
ci_kws["alpha"] = kws["alpha"]
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
edges = grid[0], grid[-1]
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
line, = ax.plot(grid, yhat, **kws)
if not self.truncate:
line.sticky_edges.x[:] = edges # Prevent mpl from adding margin
if err_bands is not None:
ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model. See the :ref:`tutorial <regression_tutorial>` for more
information.\
"""),
regplot_vs_lmplot=dedent("""\
The :func:`regplot` and :func:`lmplot` functions are closely related, but
the former is an axes-level function while the latter is a figure-level
function that combines :func:`regplot` and :class:`FacetGrid`.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is given, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", "sd", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If ``"ci"``, defer to the value of the
``ci`` parameter. If ``"sd"``, skip bootstrapping and show the
standard deviation of the observations in each bin.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
seed=dedent("""\
seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
Seed or random number generator for reproducible bootstrapping.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
If ``True``, the regression line is bounded by the data limits. If
``False``, it extends to the ``x`` axis limits.
"""),
dropna=dedent("""\
dropna : bool, optional
If ``True``, remove observations with missing data from the plot.
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
def lmplot(
data, *,
x=None, y=None, hue=None, col=None, row=None,
palette=None, col_wrap=None, height=5, aspect=1, markers="o",
sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,
line_kws=None, facet_kws=None,
):
if facet_kws is None:
facet_kws = {}
def facet_kw_deprecation(key, val):
msg = (
f"{key} is deprecated from the `lmplot` function signature. "
"Please update your code to pass it using `facet_kws`."
)
if val is not None:
warnings.warn(msg, UserWarning)
facet_kws[key] = val
facet_kw_deprecation("sharex", sharex)
facet_kw_deprecation("sharey", sharey)
facet_kw_deprecation("legend_out", legend_out)
if data is None:
raise TypeError("Missing required keyword argument `data`.")
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(
data, row=row, col=col, hue=hue,
palette=palette,
row_order=row_order, col_order=col_order, hue_order=hue_order,
height=height, aspect=aspect, col_wrap=col_wrap,
**facet_kws,
)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of markers "
"for each level of the hue variable")
facets.hue_kws = {"marker": markers}
def update_datalim(data, x, y, ax, **kws):
xys = data[[x, y]].to_numpy().astype(float)
ax.update_datalim(xys, updatey=False)
ax.autoscale_view(scaley=False)
facets.map_dataframe(update_datalim, x=x, y=y)
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
seed=seed, order=order, logistic=logistic, lowess=lowess,
robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,
truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)
facets.set_axis_labels(x, y)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
{data}
x, y : strings, optional
Input variables; these should be column names in ``data``.
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{height}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
.. deprecated:: 0.12.0
Pass using the `facet_kws` dictionary.
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
.. deprecated:: 0.12.0
Pass using the `facet_kws` dictionary.
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
facet_kws : dict
Dictionary of keyword arguments for :class:`FacetGrid`.
Returns
-------
:class:`FacetGrid`
The :class:`FacetGrid` object with the plot on it for further tweaking.
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
.. include:: ../docstrings/lmplot.rst
""").format(**_regression_docs)
def regplot(
data=None, *, x=None, y=None,
x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None
):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y : string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{dropna}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
.. include:: ../docstrings/regplot.rst
""").format(**_regression_docs)
def residplot(
data=None, *, x=None, y=None,
x_partial=None, y_partial=None, lowess=False,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None
):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
{x, y}_partial : vectors or string(s) , optional
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot : Draw a :func:`residplot` with univariate marginal distributions
(when used with ``kind="resid"``).
Examples
--------
.. include:: ../docstrings/residplot.rst
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
plotter.plot(ax, scatter_kws, line_kws)
return ax
| _RegressionPlotter |
python | cython__cython | Cython/Compiler/Tests/TestBuffer.py | {
"start": 1574,
"end": 4144
} | class ____(CythonTest):
# Tests the full parsing of the options within the brackets
def nonfatal_error(self, error):
# We're passing self as context to transform to trap this
self.error = error
self.assertTrue(self.expect_error)
def parse_opts(self, opts, expect_error=False):
assert opts != ""
s = "def f():\n cdef object[%s] x" % opts
self.expect_error = expect_error
root = self.fragment(s, pipeline=[NormalizeTree(self), PostParse(self)]).root
if not expect_error:
vardef = root.stats[0].body.stats[0]
assert isinstance(vardef, CVarDefNode) # use normal assert as this is to validate the test code
buftype = vardef.base_type
self.assertTrue(isinstance(buftype, TemplatedTypeNode))
self.assertTrue(isinstance(buftype.base_type_node, CSimpleBaseTypeNode))
self.assertEqual("object", buftype.base_type_node.name)
return buftype
else:
self.assertTrue(len(root.stats[0].body.stats) == 0)
def non_parse(self, expected_err, opts):
self.parse_opts(opts, expect_error=True)
# e = self.should_fail(lambda: self.parse_opts(opts))
self.assertEqual(expected_err, self.error.message_only)
def __test_basic(self):
buf = self.parse_opts("unsigned short int, 3")
self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
def __test_dict(self):
buf = self.parse_opts("ndim=3, dtype=unsigned short int")
self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
def __test_ndim(self):
self.parse_opts("int, 2")
self.non_parse(ERR_BUF_NDIM, "int, 'a'")
self.non_parse(ERR_BUF_NDIM, "int, -34")
def __test_use_DEF(self):
t = self.fragment("""
DEF ndim = 3
def f():
cdef object[int, ndim] x
cdef object[ndim=ndim, dtype=int] y
""", pipeline=[NormalizeTree(self), PostParse(self)]).root
stats = t.stats[0].body.stats
self.assertTrue(stats[0].base_type.ndim == 3)
self.assertTrue(stats[1].base_type.ndim == 3)
# add exotic and impossible combinations as they come along...
if __name__ == '__main__':
import unittest
unittest.main()
| TestBufferOptions |
python | kamyu104__LeetCode-Solutions | Python/maximum-score-of-a-good-subarray.py | {
"start": 744,
"end": 1423
} | class ____(object):
def maximumScore(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def score(nums, k):
prefix = [nums[k]]*(k+1)
for i in reversed(xrange(k)):
prefix[i] = min(prefix[i+1], nums[i])
result = right = nums[k]
for j in xrange(k+1, len(nums)):
right = min(right, nums[j])
i = bisect.bisect_left(prefix, right)
if i >= 0:
result = max(result, right*(j-i+1))
return result
return max(score(nums, k), score(nums[::-1], len(nums)-1-k))
| Solution2 |
python | django-debug-toolbar__django-debug-toolbar | tests/forms.py | {
"start": 71,
"end": 220
} | class ____(forms.Form):
user = forms.ModelChoiceField(queryset=User.objects.all())
def __repr__(self):
return str(self)
| TemplateReprForm |
python | django__django | tests/admin_custom_urls/models.py | {
"start": 1451,
"end": 1524
} | class ____(models.Model):
name = models.CharField(max_length=20)
| Person |
python | python-openxml__python-docx | src/docx/oxml/text/run.py | {
"start": 793,
"end": 6010
} | class ____(BaseOxmlElement):
"""`<w:r>` element, containing the properties and text for a run."""
add_br: Callable[[], CT_Br]
add_tab: Callable[[], CT_TabStop]
get_or_add_rPr: Callable[[], CT_RPr]
_add_drawing: Callable[[], CT_Drawing]
_add_t: Callable[..., CT_Text]
rPr: CT_RPr | None = ZeroOrOne("w:rPr") # pyright: ignore[reportAssignmentType]
br = ZeroOrMore("w:br")
cr = ZeroOrMore("w:cr")
drawing = ZeroOrMore("w:drawing")
t = ZeroOrMore("w:t")
tab = ZeroOrMore("w:tab")
def add_t(self, text: str) -> CT_Text:
"""Return a newly added `<w:t>` element containing `text`."""
t = self._add_t(text=text)
if len(text.strip()) < len(text):
t.set(qn("xml:space"), "preserve")
return t
def add_drawing(self, inline_or_anchor: CT_Inline | CT_Anchor) -> CT_Drawing:
"""Return newly appended `CT_Drawing` (`w:drawing`) child element.
The `w:drawing` element has `inline_or_anchor` as its child.
"""
drawing = self._add_drawing()
drawing.append(inline_or_anchor)
return drawing
def clear_content(self) -> None:
"""Remove all child elements except a `w:rPr` element if present."""
# -- remove all run inner-content except a `w:rPr` when present. --
for e in self.xpath("./*[not(self::w:rPr)]"):
self.remove(e)
@property
def inner_content_items(self) -> List[str | CT_Drawing | CT_LastRenderedPageBreak]:
"""Text of run, possibly punctuated by `w:lastRenderedPageBreak` elements."""
from docx.oxml.text.pagebreak import CT_LastRenderedPageBreak
accum = TextAccumulator()
def iter_items() -> Iterator[str | CT_Drawing | CT_LastRenderedPageBreak]:
for e in self.xpath(
"w:br"
" | w:cr"
" | w:drawing"
" | w:lastRenderedPageBreak"
" | w:noBreakHyphen"
" | w:ptab"
" | w:t"
" | w:tab"
):
if isinstance(e, (CT_Drawing, CT_LastRenderedPageBreak)):
yield from accum.pop()
yield e
else:
accum.push(str(e))
# -- don't forget the "tail" string --
yield from accum.pop()
return list(iter_items())
def insert_comment_range_end_and_reference_below(self, comment_id: int) -> None:
"""Insert a `w:commentRangeEnd` and `w:commentReference` element after this run.
The `w:commentRangeEnd` element is the immediate sibling of this `w:r` and is followed by
a `w:r` containing the `w:commentReference` element.
"""
self.addnext(self._new_comment_reference_run(comment_id))
self.addnext(OxmlElement("w:commentRangeEnd", attrs={qn("w:id"): str(comment_id)}))
def insert_comment_range_start_above(self, comment_id: int) -> None:
"""Insert a `w:commentRangeStart` element with `comment_id` before this run."""
self.addprevious(OxmlElement("w:commentRangeStart", attrs={qn("w:id"): str(comment_id)}))
@property
def lastRenderedPageBreaks(self) -> List[CT_LastRenderedPageBreak]:
"""All `w:lastRenderedPageBreaks` descendants of this run."""
return self.xpath("./w:lastRenderedPageBreak")
@property
def style(self) -> str | None:
"""String contained in `w:val` attribute of `w:rStyle` grandchild.
|None| if that element is not present.
"""
rPr = self.rPr
if rPr is None:
return None
return rPr.style
@style.setter
def style(self, style: str | None):
"""Set character style of this `w:r` element to `style`.
If `style` is None, remove the style element.
"""
rPr = self.get_or_add_rPr()
rPr.style = style
@property
def text(self) -> str:
"""The textual content of this run.
Inner-content child elements like `w:tab` are translated to their text
equivalent.
"""
return "".join(
str(e) for e in self.xpath("w:br | w:cr | w:noBreakHyphen | w:ptab | w:t | w:tab")
)
@text.setter
def text(self, text: str): # pyright: ignore[reportIncompatibleMethodOverride]
self.clear_content()
_RunContentAppender.append_to_run_from_text(self, text)
def _insert_rPr(self, rPr: CT_RPr) -> CT_RPr:
self.insert(0, rPr)
return rPr
def _new_comment_reference_run(self, comment_id: int) -> CT_R:
"""Return a new `w:r` element with `w:commentReference` referencing `comment_id`.
Should look like this:
<w:r>
<w:rPr><w:rStyle w:val="CommentReference"/></w:rPr>
<w:commentReference w:id="0"/>
</w:r>
"""
r = cast(CT_R, OxmlElement("w:r"))
rPr = r.get_or_add_rPr()
rPr.style = "CommentReference"
r.append(OxmlElement("w:commentReference", attrs={qn("w:id"): str(comment_id)}))
return r
# ------------------------------------------------------------------------------------
# Run inner-content elements
| CT_R |
python | mamba-org__mamba | micromamba/tests/test_update.py | {
"start": 8661,
"end": 19741
} | class ____:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
env_name = helpers.random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + helpers.random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
@staticmethod
@pytest.fixture(scope="class")
def root(existing_cache):
os.environ["MAMBA_ROOT_PREFIX"] = TestUpdateConfig.root_prefix
os.environ["CONDA_PREFIX"] = TestUpdateConfig.prefix
helpers.create("-n", "base", no_dry_run=True)
helpers.create("-n", TestUpdateConfig.env_name, "--offline", no_dry_run=True)
yield
os.environ["MAMBA_ROOT_PREFIX"] = TestUpdateConfig.current_root_prefix
os.environ["CONDA_PREFIX"] = TestUpdateConfig.current_prefix
shutil.rmtree(TestUpdateConfig.root_prefix)
@staticmethod
@pytest.fixture
def env_created(root):
os.environ["MAMBA_ROOT_PREFIX"] = TestUpdateConfig.root_prefix
os.environ["CONDA_PREFIX"] = TestUpdateConfig.prefix
yield
for v in ("CONDA_CHANNELS", "MAMBA_TARGET_PREFIX"):
if v in os.environ:
os.environ.pop(v)
@classmethod
def config_tests(cls, res, root_prefix=root_prefix, target_prefix=prefix):
assert res["root_prefix"] == root_prefix
assert res["target_prefix"] == target_prefix
assert res["use_target_prefix_fallback"]
assert res["use_default_prefix_fallback"]
assert res["use_root_prefix_fallback"]
checks = (
helpers.MAMBA_ALLOW_EXISTING_PREFIX
| helpers.MAMBA_NOT_ALLOW_MISSING_PREFIX
| helpers.MAMBA_NOT_ALLOW_NOT_ENV_PREFIX
| helpers.MAMBA_EXPECT_EXISTING_PREFIX
)
assert res["target_prefix_checks"] == checks
@pytest.mark.parametrize(
"source,file_type",
[
("cli_only", None),
("spec_file_only", "classic"),
("spec_file_only", "explicit"),
("spec_file_only", "yaml"),
("both", "classic"),
("both", "explicit"),
("both", "yaml"),
],
)
def test_specs(self, source, file_type, env_created):
cmd = []
specs = []
if source in ("cli_only", "both"):
specs = ["xtensor-python", "xtl"]
cmd = list(specs)
if source in ("spec_file_only", "both"):
f_name = helpers.random_string()
spec_file = os.path.join(TestUpdateConfig.root_prefix, f_name)
if file_type == "classic":
file_content = ["xtensor >0.20", "xsimd"]
specs += file_content
elif file_type == "explicit":
explicit_specs = [
"https://conda.anaconda.org/conda-forge/linux-64/xtensor-0.21.5-hc9558a2_0.tar.bz2#d330e02e5ed58330638a24601b7e4887",
"https://conda.anaconda.org/conda-forge/linux-64/xsimd-7.4.8-hc9558a2_0.tar.bz2#32d5b7ad7d6511f1faacf87e53a63e5f",
]
file_content = ["@EXPLICIT"] + explicit_specs
specs = explicit_specs
else: # yaml
spec_file += ".yaml"
file_content = ["dependencies:", " - xtensor >0.20", " - xsimd"]
specs += ["xtensor >0.20", "xsimd"]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
res = helpers.install(*cmd, "--print-config-only")
TestUpdateConfig.config_tests(res)
assert res["env_name"] == ""
assert res["specs"] == specs
@pytest.mark.parametrize("root_prefix", (None, "env_var", "cli"))
@pytest.mark.parametrize("target_is_root", (False, True))
@pytest.mark.parametrize("cli_prefix", (False, True))
@pytest.mark.parametrize("cli_env_name", (False, True))
@pytest.mark.parametrize("yaml_name", (False, True, "prefix"))
@pytest.mark.parametrize("env_var", (False, True))
@pytest.mark.parametrize("current_target_prefix_fallback", (False, True))
def test_target_prefix(
self,
root_prefix,
target_is_root,
cli_prefix,
cli_env_name,
yaml_name,
env_var,
current_target_prefix_fallback,
env_created,
):
cmd = []
if root_prefix in (None, "cli"):
os.environ["MAMBA_DEFAULT_ROOT_PREFIX"] = os.environ.pop("MAMBA_ROOT_PREFIX")
if root_prefix == "cli":
cmd += ["-r", TestUpdateConfig.root_prefix]
r = TestUpdateConfig.root_prefix
if target_is_root:
p = r
n = "base"
else:
p = TestUpdateConfig.prefix
n = TestUpdateConfig.env_name
expected_p = p
if cli_prefix:
cmd += ["-p", p]
if cli_env_name:
cmd += ["-n", n]
if yaml_name:
f_name = helpers.random_string() + ".yaml"
spec_file = os.path.join(TestUpdateConfig.prefix, f_name)
if yaml_name == "prefix":
yaml_n = p
else:
yaml_n = n
if not (cli_prefix or cli_env_name or target_is_root):
expected_p = os.path.join(TestUpdateConfig.root_prefix, "envs", yaml_n)
file_content = [
f"name: {yaml_n}",
"dependencies: [xtensor]",
]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
if env_var:
os.environ["MAMBA_TARGET_PREFIX"] = p
if not current_target_prefix_fallback:
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
else:
os.environ["CONDA_PREFIX"] = p
if (cli_prefix and cli_env_name) or (yaml_name == "prefix"):
with pytest.raises(helpers.subprocess.CalledProcessError):
helpers.install(*cmd, "--print-config-only")
elif not (
cli_prefix or cli_env_name or yaml_name or env_var or current_target_prefix_fallback
):
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestUpdateConfig.config_tests(res, root_prefix=r, target_prefix=r)
else:
res = helpers.install(*cmd, "--print-config-only")
TestUpdateConfig.config_tests(res, root_prefix=r, target_prefix=expected_p)
def test_target_prefix_with_no_settings(
self,
existing_cache,
):
# Specify no arg
cmd = []
# Get the actual set MAMBA_ROOT_PREFIX when setting up `TestUpdateConfig` class
os.environ["MAMBA_DEFAULT_ROOT_PREFIX"] = os.environ.pop("MAMBA_ROOT_PREFIX")
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestUpdateConfig.config_tests(
res,
root_prefix=TestUpdateConfig.root_prefix,
target_prefix=TestUpdateConfig.root_prefix,
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="MAMBA_ROOT_PREFIX is set in windows GH workflow",
)
def test_target_prefix_with_no_settings_and_no_env_var(
self,
existing_cache,
):
# Specify no arg
cmd = []
os.environ.pop("MAMBA_ROOT_PREFIX")
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestUpdateConfig.config_tests(
res,
root_prefix=TestUpdateConfig.current_root_prefix,
target_prefix=TestUpdateConfig.current_root_prefix,
)
@pytest.mark.parametrize("cli", (False, True))
@pytest.mark.parametrize("yaml", (False, True))
@pytest.mark.parametrize("env_var", (False, True))
@pytest.mark.parametrize("rc_file", (False, True))
def test_channels(self, cli, yaml, env_var, rc_file, env_created):
cmd = []
expected_channels = []
if cli:
cmd += ["-c", "cli"]
expected_channels += ["cli"]
if yaml:
f_name = helpers.random_string() + ".yaml"
spec_file = os.path.join(TestUpdateConfig.prefix, f_name)
file_content = [
"channels: [yaml]",
"dependencies: [xtensor]",
]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
expected_channels += ["yaml"]
if env_var:
os.environ["CONDA_CHANNELS"] = "env_var"
expected_channels += ["env_var"]
if rc_file:
f_name = helpers.random_string() + ".yaml"
rc_file = os.path.join(TestUpdateConfig.prefix, f_name)
file_content = ["channels: [rc]"]
with open(rc_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["--rc-file", rc_file]
expected_channels += ["rc"]
res = helpers.install(*cmd, "--print-config-only", no_rc=not rc_file, default_channel=False)
TestUpdateConfig.config_tests(res)
if expected_channels:
assert res["channels"] == expected_channels
else:
assert res["channels"] == ["conda-forge"]
@pytest.mark.parametrize("type", ("yaml", "classic", "explicit"))
def test_multiple_spec_files(self, type, env_created):
cmd = []
specs = ["xtensor", "xsimd"]
explicit_specs = [
"https://conda.anaconda.org/conda-forge/linux-64/xtensor-0.21.5-hc9558a2_0.tar.bz2#d330e02e5ed58330638a24601b7e4887",
"https://conda.anaconda.org/conda-forge/linux-64/xsimd-7.4.8-hc9558a2_0.tar.bz2#32d5b7ad7d6511f1faacf87e53a63e5f",
]
for i in range(2):
f_name = helpers.random_string()
file = os.path.join(TestUpdateConfig.prefix, f_name)
if type == "yaml":
file += ".yaml"
file_content = [f"dependencies: [{specs[i]}]"]
elif type == "classic":
file_content = [specs[i]]
else: # explicit
file_content = ["@EXPLICIT", explicit_specs[i]]
with open(file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", file]
res = helpers.install(*cmd, "--print-config-only")
if type == "yaml" or type == "classic":
assert res["specs"] == specs
else: # explicit
assert res["specs"] == [explicit_specs[0]]
def test_channel_specific(self, env_created):
helpers.install("quantstack::sphinx", no_dry_run=True)
res = helpers.update("quantstack::sphinx", "-c", "conda-forge", "--json")
assert "actions" not in res
| TestUpdateConfig |
python | tensorflow__tensorflow | tensorflow/compiler/tests/image_ops_test.py | {
"start": 11041,
"end": 14495
} | class ____(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
x = array_ops.placeholder(dtypes.float32, shape=x_shape)
with self.test_scope():
y_fused = self._adjust_saturation(x,
scale).eval(feed_dict={x: x_np})
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
| AdjustSaturationTest |
python | keras-team__keras | keras/src/distribution/distribution_lib.py | {
"start": 20028,
"end": 28832
} | class ____(Distribution):
"""Distribution that shards model variables.
Compare to `DataParallel` which replicates the variables across all devices,
`ModelParallel` allows you to shard variables in addition to the input data.
To construct a `ModelParallel` distribution, you need to provide a
`DeviceMesh` and a `LayoutMap`.
1. `DeviceMesh` contains physical device information. The axis names in
the mesh will be used to map the variable and data layout.
2. `LayoutMap` contains the mapping between variable paths to their
corresponding `TensorLayout`.
Example:
```python
devices = list_devices() # Assume there are 8 devices.
# Create a mesh with 2 devices for data parallelism and 4 devices for
# model parallelism.
device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'),
devices=devices)
# Create a layout map that shard the `Dense` layer and `Conv2D`
# layer variables on the last dimension.
# Based on the `device_mesh`, this means the variables
# will be split across 4 devices. Any other variable that doesn't
# match any key in the layout map will be fully replicated.
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
distribution = ModelParallel(
layout_map=layout_map,
batch_dim_name='batch',
)
# Set the global distribution, or via `with distribution.scope():`
set_distribution(distribution)
model = model_creation()
model.compile()
model.fit(data)
```
You can quickly update the device mesh shape to change the sharding factor
of the variables. E.g.
```python
# With only the shape change for the device mesh, the variables will be
# sharded across 8 devices instead of 4, which further reduces the memory
# footprint of variables on each of the device.
device_mesh = DeviceMesh(
shape=(1, 8),
axis_names=('batch', 'model'),
devices=devices,
)
```
To figure out a proper layout mapping rule for all the model variables, you
can first list out all the model variable paths, which will be used as the
key to map the variables to `TensorLayout`.
e.g.
```python
model = create_model()
for v in model.variables:
print(v.path)
```
Args:
layout_map: `LayoutMap` instance which map the variable path to the
corresponding tensor layout.
batch_dim_name: Optional string, the axis name in the device mesh
(of the `layout_map` object)
that will be used to distribute data. If unspecified, the
first axis from the device mesh will be used.
auto_shard_dataset: Automatically shard the dataset amongst
processes in a multi-process setting. Set to `False` if the dataset
is already sharded across hosts. Defaults to `True`.
"""
def __init__(
self,
*,
layout_map=None,
batch_dim_name=None,
auto_shard_dataset=True,
**kwargs,
):
kwargs.pop("device_mesh", None)
if layout_map is None:
raise ValueError("You must specify a layout_map argument.")
if not isinstance(layout_map, LayoutMap):
raise ValueError(
"Argument `layout_map` must be a `LayoutMap` instance. "
f"Received: layout_map={layout_map}"
)
device_mesh = layout_map.device_mesh
batch_dim_name = batch_dim_name or device_mesh.axis_names[0]
super().__init__(device_mesh, batch_dim_name, auto_shard_dataset)
self._layout_map = layout_map
# Those following attributes might get convert to public methods.
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self.batch_dim_name # Shard on the first dim
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
# First check if the variable already has a layout assigned.
if getattr(variable, "_layout", None) is not None:
return variable._layout
# Check the layout map.
variable_layout = self._layout_map[variable.path]
if variable_layout is not None:
return variable_layout
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
return self._layout_map[path]
def distribute_dataset(self, dataset):
if not self._is_multi_process or not self.auto_shard_dataset:
return dataset
# Try to distribute a global tf.data.Dataset.
from keras.src.utils.module_utils import tensorflow as tf
if not tf.available or not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Only `tf.data.Dataset` is supported for auto-sharding, "
f"got {type(dataset)}"
)
from tensorflow.python.data.experimental.ops import (
distribute as tf_data_distribute,
)
global_batch_size = tf_data_distribute.compute_batch_size(dataset)
if global_batch_size.numpy() < 0:
raise ValueError(
"The batch size of the input dataset is "
"unknown. Please config the batch size for "
"the input dataset, e.g via `dataset.batch(batch_size)`"
)
# We need to compute the per-process/worker/host batch size.
# This will depend on how many model replicas we have on each process.
# Note that this might be smaller than one if model replicas are sharded
# across multiple processes.
mesh_batch_dim_index = self.device_mesh.axis_names.index(
self.batch_dim_name
)
num_model_replicas = self.device_mesh.shape[mesh_batch_dim_index]
if num_model_replicas == 1:
# No sharding is needed in this case. Each process will have the
# global batch size, and data from the iterator will need to be
# replicated across all processes.
return dataset.prefetch(tf.data.AUTOTUNE)
num_model_replicas_per_process = num_model_replicas / self._num_process
if num_model_replicas_per_process >= 1:
# Each process will have one or more full model replicas. Data will
# be sharded across all processes without replication.
if global_batch_size % self._num_process != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"processes. `global_batch_size`={global_batch_size} and "
f"`num_process`={self._num_process}"
)
per_process_batch_size = global_batch_size // self._num_process
distributed_dataset = dataset.rebatch(per_process_batch_size)
distributed_dataset = distributed_dataset.shard(
num_shards=self._num_process,
index=self._process_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
else:
# Model replicas are sharded across multiple processes. Data will be
# sharded across model replicas, and replicated across processes
# within the same model replica.
if global_batch_size % num_model_replicas != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"replicas. `global_batch_size`={global_batch_size} and "
f"`num_model_replicas`={num_model_replicas}"
)
per_process_batch_size = global_batch_size // num_model_replicas
distributed_dataset = dataset.rebatch(per_process_batch_size)
processes_per_replica = self._num_process // num_model_replicas
# TODO: Figure out what the convention is for data sharding id.
data_shard_id = self._process_id % processes_per_replica
distributed_dataset = distributed_dataset.shard(
num_shards=num_model_replicas,
index=data_shard_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
@keras_export("keras.distribution.LayoutMap")
| ModelParallel |
python | charliermarsh__ruff | python/ruff-ecosystem/ruff_ecosystem/projects.py | {
"start": 5071,
"end": 5164
} | class ____(Enum):
check = "check"
format = "format"
@dataclass(frozen=True)
| RuffCommand |
python | ray-project__ray | python/ray/tests/test_placement_group.py | {
"start": 19691,
"end": 23179
} | class ____:
def test_strategy_validation(self):
"""Test strategy validation when creating a placement group."""
# Valid strategies should not raise an exception.
for strategy in VALID_PLACEMENT_GROUP_STRATEGIES:
validate_placement_group(bundles=[{"CPU": 1}], strategy=strategy)
# Any other strategy should raise a ValueError.
with pytest.raises(ValueError, match="Invalid placement group strategy"):
validate_placement_group(bundles=[{"CPU": 1}], strategy="invalid")
def test_bundle_validation(self):
"""Test _validate_bundle()."""
# Valid bundles should not raise an exception.
valid_bundles = [{"CPU": 1, "custom-resource": 2.2}, {"GPU": 0.75}]
_validate_bundles(valid_bundles)
# Non-list bundles should raise an exception.
with pytest.raises(ValueError, match="must be a list"):
_validate_bundles("not a list")
# Empty list bundles should raise an exception.
with pytest.raises(ValueError, match="must be a non-empty list"):
_validate_bundles([])
# List that doesn't contain dictionaries should raise an exception.
with pytest.raises(ValueError, match="resource dictionaries"):
_validate_bundles([{"CPU": 1}, "not a dict"])
# List with invalid dictionary entries should raise an exception.
with pytest.raises(ValueError, match="resource dictionaries"):
_validate_bundles([{8: 7}, {5: 3.5}])
with pytest.raises(ValueError, match="resource dictionaries"):
_validate_bundles([{"CPU": "6"}, {"GPU": "5"}])
# Bundles with resources that all have 0 values should raise an exception.
with pytest.raises(ValueError, match="only 0 values"):
_validate_bundles([{"CPU": 0, "GPU": 0}])
def test_bundle_label_selector_validation(self):
"""Test _validate_bundle_label_selector()."""
# Valid label selector list should not raise an exception.
valid_label_selectors = [
{"ray.io/market_type": "spot"},
{"ray.io/accelerator-type": "A100"},
]
_validate_bundle_label_selector(valid_label_selectors)
# Non-list input should raise an exception.
with pytest.raises(ValueError, match="must be a list"):
_validate_bundle_label_selector("not a list")
# Empty list should not raise (interpreted as no-op).
_validate_bundle_label_selector([])
# List with non-dictionary elements should raise an exception.
with pytest.raises(ValueError, match="must be a list of string dictionary"):
_validate_bundle_label_selector(["not a dict", {"valid": "label"}])
# Dictionary with non-string keys or values should raise an exception.
with pytest.raises(ValueError, match="must be a list of string dictionary"):
_validate_bundle_label_selector([{1: "value"}, {"key": "val"}])
with pytest.raises(ValueError, match="must be a list of string dictionary"):
_validate_bundle_label_selector([{"key": 123}, {"valid": "label"}])
# Invalid label key or value syntax (delegated to validate_label_selector).
with pytest.raises(ValueError, match="Invalid label selector provided"):
_validate_bundle_label_selector([{"INVALID key!": "value"}])
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestPlacementGroupValidation |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 348728,
"end": 349395
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("avatar_url", "date", "email", "name", "user")
avatar_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="avatarUrl",
args=sgqlc.types.ArgDict(
(("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)
),
)
date = sgqlc.types.Field(GitTimestamp, graphql_name="date")
email = sgqlc.types.Field(String, graphql_name="email")
name = sgqlc.types.Field(String, graphql_name="name")
user = sgqlc.types.Field("User", graphql_name="user")
| GitActor |
python | huggingface__transformers | src/transformers/models/internvl/video_processing_internvl.py | {
"start": 1144,
"end": 1256
} | class ____(VideosKwargs, total=False):
initial_shift: Union[bool, float, int]
| InternVLVideoProcessorInitKwargs |
python | python-visualization__folium | folium/features.py | {
"start": 40869,
"end": 45938
} | class ____(MacroElement):
"""Base class for GeoJsonTooltip and GeoJsonPopup.
:meta private:
"""
base_template = """
function(layer){
let div = L.DomUtil.create('div');
{% if this.fields %}
let handleObject = feature => {
if (feature === null) {
return '';
} else if (typeof(feature)=='object') {
return JSON.stringify(feature);
} else {
return feature;
}
}
let fields = {{ this.fields | tojson | safe }};
let aliases = {{ this.aliases | tojson | safe }};
let table = '<table>' +
String(
fields.map(
(v,i)=>
`<tr>{% if this.labels %}
<th>${aliases[i]{% if this.localize %}.toLocaleString(){% endif %}}</th>
{% endif %}
<td>${handleObject(layer.feature.properties[v]){% if this.localize %}.toLocaleString(){% endif %}}</td>
</tr>`).join(''))
+'</table>';
div.innerHTML=table;
{% endif %}
return div
}
"""
def __init__(
self,
fields: Sequence[str],
aliases: Optional[Sequence[str]] = None,
labels: bool = True,
localize: bool = False,
style: Optional[str] = None,
class_name: str = "geojsondetail",
):
super().__init__()
assert isinstance(
fields, (list, tuple)
), "Please pass a list or tuple to fields."
if aliases is not None:
assert isinstance(aliases, (list, tuple))
assert len(fields) == len(
aliases
), "fields and aliases must have the same length."
assert isinstance(labels, bool), "labels requires a boolean value."
assert isinstance(localize, bool), "localize must be bool."
self._name = "GeoJsonDetail"
self.fields = fields
self.aliases = aliases if aliases is not None else fields
self.labels = labels
self.localize = localize
self.class_name = class_name
if style:
assert isinstance(
style, str
), "Pass a valid inline HTML style property string to style."
# noqa outside of type checking.
self.style = style
def warn_for_geometry_collections(self) -> None:
"""Checks for GeoJson GeometryCollection features to warn user about incompatibility."""
assert isinstance(self._parent, GeoJson)
geom_collections = [
feature.get("properties") if feature.get("properties") is not None else key
for key, feature in enumerate(self._parent.data["features"])
if feature["geometry"]
and feature["geometry"]["type"] == "GeometryCollection"
]
if any(geom_collections):
warnings.warn(
f"{self._name} is not configured to render for GeoJson GeometryCollection geometries. "
f"Please consider reworking these features: {geom_collections} to MultiPolygon for full functionality.\n"
"https://tools.ietf.org/html/rfc7946#page-9",
UserWarning,
)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self.get_root()
if isinstance(self._parent, GeoJson):
keys = tuple(
self._parent.data["features"][0]["properties"].keys()
if self._parent.data["features"]
else []
)
self.warn_for_geometry_collections()
elif isinstance(self._parent, TopoJson):
obj_name = self._parent.object_path.split(".")[-1]
keys = tuple(
self._parent.data["objects"][obj_name]["geometries"][0][
"properties"
].keys()
)
else:
raise TypeError(
f"You cannot add a {self._name} to anything other than a "
"GeoJson or TopoJson object."
)
keys = tuple(x for x in keys if x not in ("style", "highlight"))
for value in self.fields:
assert (
value in keys
), f"The field {value} is not available in the data. Choose from: {keys}."
figure.header.add_child(
Element(
Template(
"""
<style>
.{{ this.class_name }} {
{{ this.style }}
}
.{{ this.class_name }} table{
margin: auto;
}
.{{ this.class_name }} tr{
text-align: left;
}
.{{ this.class_name }} th{
padding: 2px; padding-right: 8px;
}
</style>
"""
).render(this=self)
),
name=self.get_name() + "tablestyle",
)
super().render()
| GeoJsonDetail |
python | mlflow__mlflow | mlflow/data/spark_dataset.py | {
"start": 867,
"end": 16626
} | class ____(Dataset, PyFuncConvertibleDatasetMixin):
"""
Represents a Spark dataset (e.g. data derived from a Spark Table / file directory or Delta
Table) for use with MLflow Tracking.
"""
def __init__(
self,
df: "pyspark.sql.DataFrame",
source: DatasetSource,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
):
if targets is not None and targets not in df.columns:
raise MlflowException(
f"The specified Spark dataset does not contain the specified targets column"
f" '{targets}'.",
INVALID_PARAMETER_VALUE,
)
if predictions is not None and predictions not in df.columns:
raise MlflowException(
f"The specified Spark dataset does not contain the specified predictions column"
f" '{predictions}'.",
INVALID_PARAMETER_VALUE,
)
self._df = df
self._targets = targets
self._predictions = predictions
super().__init__(source=source, name=name, digest=digest)
def _compute_digest(self) -> str:
"""
Computes a digest for the dataset. Called if the user doesn't supply
a digest when constructing the dataset.
"""
# Retrieve a semantic hash of the DataFrame's logical plan, which is much more efficient
# and deterministic than hashing DataFrame records
import numpy as np
import pyspark
# Spark 3.1.0+ has a semanticHash() method on DataFrame
if Version(pyspark.__version__) >= Version("3.1.0"):
semantic_hash = self._df.semanticHash()
else:
semantic_hash = self._df._jdf.queryExecution().analyzed().semanticHash()
return get_normalized_md5_digest([np.int64(semantic_hash)])
def to_dict(self) -> dict[str, str]:
"""Create config dictionary for the dataset.
Returns a string dictionary containing the following fields: name, digest, source, source
type, schema, and profile.
"""
schema = json.dumps({"mlflow_colspec": self.schema.to_dict()}) if self.schema else None
config = super().to_dict()
config.update(
{
"schema": schema,
"profile": json.dumps(self.profile),
}
)
return config
@property
def df(self):
"""The Spark DataFrame instance.
Returns:
The Spark DataFrame instance.
"""
return self._df
@property
def targets(self) -> str | None:
"""The name of the Spark DataFrame column containing targets (labels) for supervised
learning.
Returns:
The string name of the Spark DataFrame column containing targets.
"""
return self._targets
@property
def predictions(self) -> str | None:
"""
The name of the predictions column. May be ``None`` if no predictions column
was specified when the dataset was created.
"""
return self._predictions
@property
def source(self) -> SparkDatasetSource | DeltaDatasetSource:
"""
Spark dataset source information.
Returns:
An instance of
:py:class:`SparkDatasetSource <mlflow.data.spark_dataset_source.SparkDatasetSource>` or
:py:class:`DeltaDatasetSource <mlflow.data.delta_dataset_source.DeltaDatasetSource>`.
"""
return self._source
@property
def profile(self) -> Any | None:
"""
A profile of the dataset. May be None if no profile is available.
"""
try:
from pyspark.rdd import BoundedFloat
# Use Spark RDD countApprox to get approximate count since count() may be expensive.
# Note that we call the Scala RDD API because the PySpark API does not respect the
# specified timeout. Reference code:
# https://spark.apache.org/docs/3.4.0/api/python/_modules/pyspark/rdd.html
# #RDD.countApprox. This is confirmed to work in all Spark 3.x versions
py_rdd = self.df.rdd
drdd = py_rdd.mapPartitions(lambda it: [float(sum(1 for i in it))])
jrdd = drdd.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = drdd.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
timeout_millis = 5000
confidence = 0.9
approx_count_operation = jdrdd.sumApprox(timeout_millis, confidence)
approx_count_result = approx_count_operation.initialValue()
approx_count_float = BoundedFloat(
mean=approx_count_result.mean(),
confidence=approx_count_result.confidence(),
low=approx_count_result.low(),
high=approx_count_result.high(),
)
approx_count = int(approx_count_float)
if approx_count <= 0:
# An approximate count of zero likely indicates that the count timed
# out before an estimate could be made. In this case, we use the value
# "unknown" so that users don't think the dataset is empty
approx_count = "unknown"
return {
"approx_count": approx_count,
}
except Exception as e:
_logger.warning(
"Encountered an unexpected exception while computing Spark dataset profile."
" Exception: %s",
e,
)
@cached_property
def schema(self) -> Schema | None:
"""
The MLflow ColSpec schema of the Spark dataset.
"""
try:
return _infer_schema(self._df)
except Exception as e:
_logger.warning("Failed to infer schema for Spark dataset. Exception: %s", e)
return None
def to_pyfunc(self) -> PyFuncInputsOutputs:
"""
Converts the Spark DataFrame to pandas and splits the resulting
:py:class:`pandas.DataFrame` into: 1. a :py:class:`pandas.DataFrame` of features and
2. a :py:class:`pandas.Series` of targets.
To avoid overuse of driver memory, only the first 10,000 DataFrame rows are selected.
"""
df = self._df.limit(10000).toPandas()
if self._targets is not None:
if self._targets not in df.columns:
raise MlflowException(
f"Failed to convert Spark dataset to pyfunc inputs and outputs because"
f" the pandas representation of the Spark dataset does not contain the"
f" specified targets column '{self._targets}'.",
# This is an internal error because we should have validated the presence of
# the target column in the Hugging Face dataset at construction time
INTERNAL_ERROR,
)
inputs = df.drop(columns=self._targets)
outputs = df[self._targets]
return PyFuncInputsOutputs(inputs=inputs, outputs=outputs)
else:
return PyFuncInputsOutputs(inputs=df, outputs=None)
def to_evaluation_dataset(self, path=None, feature_names=None) -> EvaluationDataset:
"""
Converts the dataset to an EvaluationDataset for model evaluation. Required
for use with mlflow.evaluate().
"""
return EvaluationDataset(
data=self._df.limit(10000).toPandas(),
targets=self._targets,
path=path,
feature_names=feature_names,
predictions=self._predictions,
name=self.name,
digest=self.digest,
)
def load_delta(
path: str | None = None,
table_name: str | None = None,
version: str | None = None,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
) -> SparkDataset:
"""
Loads a :py:class:`SparkDataset <mlflow.data.spark_dataset.SparkDataset>` from a Delta table
for use with MLflow Tracking.
Args:
path: The path to the Delta table. Either ``path`` or ``table_name`` must be specified.
table_name: The name of the Delta table. Either ``path`` or ``table_name`` must be
specified.
version: The Delta table version. If not specified, the version will be inferred.
targets: Optional. The name of the Delta table column containing targets (labels) for
supervised learning.
name: The name of the dataset. E.g. "wiki_train". If unspecified, a name is
automatically generated.
digest: The digest (hash, fingerprint) of the dataset. If unspecified, a digest
is automatically computed.
Returns:
An instance of :py:class:`SparkDataset <mlflow.data.spark_dataset.SparkDataset>`.
"""
from mlflow.data.spark_delta_utils import (
_try_get_delta_table_latest_version_from_path,
_try_get_delta_table_latest_version_from_table_name,
)
if (path, table_name).count(None) != 1:
raise MlflowException(
"Must specify exactly one of `table_name` or `path`.",
INVALID_PARAMETER_VALUE,
)
if version is None:
if path is not None:
version = _try_get_delta_table_latest_version_from_path(path)
else:
version = _try_get_delta_table_latest_version_from_table_name(table_name)
if name is None and table_name is not None:
name = table_name + (f"@v{version}" if version is not None else "")
source = DeltaDatasetSource(path=path, delta_table_name=table_name, delta_table_version=version)
df = source.load()
return SparkDataset(
df=df,
source=source,
targets=targets,
name=name,
digest=digest,
)
def from_spark(
df: "pyspark.sql.DataFrame",
path: str | None = None,
table_name: str | None = None,
version: str | None = None,
sql: str | None = None,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
) -> SparkDataset:
"""
Given a Spark DataFrame, constructs a
:py:class:`SparkDataset <mlflow.data.spark_dataset.SparkDataset>` object for use with
MLflow Tracking.
Args:
df: The Spark DataFrame from which to construct a SparkDataset.
path: The path of the Spark or Delta source that the DataFrame originally came from. Note
that the path does not have to match the DataFrame exactly, since the DataFrame may have
been modified by Spark operations. This is used to reload the dataset upon request via
:py:func:`SparkDataset.source.load()
<mlflow.data.spark_dataset_source.SparkDatasetSource.load>`. If none of ``path``,
``table_name``, or ``sql`` are specified, a CodeDatasetSource is used, which will source
information from the run context.
table_name: The name of the Spark or Delta table that the DataFrame originally came from.
Note that the table does not have to match the DataFrame exactly, since the DataFrame
may have been modified by Spark operations. This is used to reload the dataset upon
request via :py:func:`SparkDataset.source.load()
<mlflow.data.spark_dataset_source.SparkDatasetSource.load>`. If none of ``path``,
``table_name``, or ``sql`` are specified, a CodeDatasetSource is used, which will source
information from the run context.
version: If the DataFrame originally came from a Delta table, specifies the version of the
Delta table. This is used to reload the dataset upon request via
:py:func:`SparkDataset.source.load()
<mlflow.data.spark_dataset_source.SparkDatasetSource.load>`. ``version`` cannot be
specified if ``sql`` is specified.
sql: The Spark SQL statement that was originally used to construct the DataFrame. Note that
the Spark SQL statement does not have to match the DataFrame exactly, since the
DataFrame may have been modified by Spark operations. This is used to reload the dataset
upon request via :py:func:`SparkDataset.source.load()
<mlflow.data.spark_dataset_source.SparkDatasetSource.load>`. If none of ``path``,
``table_name``, or ``sql`` are specified, a CodeDatasetSource is used, which will source
information from the run context.
targets: Optional. The name of the Data Frame column containing targets (labels) for
supervised learning.
name: The name of the dataset. E.g. "wiki_train". If unspecified, a name is automatically
generated.
digest: The digest (hash, fingerprint) of the dataset. If unspecified, a digest is
automatically computed.
predictions: Optional. The name of the column containing model predictions,
if the dataset contains model predictions. If specified, this column
must be present in the dataframe (``df``).
Returns:
An instance of :py:class:`SparkDataset <mlflow.data.spark_dataset.SparkDataset>`.
"""
from mlflow.data.code_dataset_source import CodeDatasetSource
from mlflow.data.spark_delta_utils import (
_is_delta_table,
_is_delta_table_path,
_try_get_delta_table_latest_version_from_path,
_try_get_delta_table_latest_version_from_table_name,
)
from mlflow.tracking.context import registry
if (path, table_name, sql).count(None) < 2:
raise MlflowException(
"Must specify at most one of `path`, `table_name`, or `sql`.",
INVALID_PARAMETER_VALUE,
)
if (sql, version).count(None) == 0:
raise MlflowException(
"`version` may not be specified when `sql` is specified. `version` may only be"
" specified when `table_name` or `path` is specified.",
INVALID_PARAMETER_VALUE,
)
if sql is not None:
source = SparkDatasetSource(sql=sql)
elif path is not None:
if _is_delta_table_path(path):
version = version or _try_get_delta_table_latest_version_from_path(path)
source = DeltaDatasetSource(path=path, delta_table_version=version)
elif version is None:
source = SparkDatasetSource(path=path)
else:
raise MlflowException(
f"Version '{version}' was specified, but the path '{path}' does not refer"
f" to a Delta table.",
INVALID_PARAMETER_VALUE,
)
elif table_name is not None:
if _is_delta_table(table_name):
version = version or _try_get_delta_table_latest_version_from_table_name(table_name)
source = DeltaDatasetSource(
delta_table_name=table_name,
delta_table_version=version,
)
elif version is None:
source = SparkDatasetSource(table_name=table_name)
else:
raise MlflowException(
f"Version '{version}' was specified, but could not find a Delta table with name"
f" '{table_name}'.",
INVALID_PARAMETER_VALUE,
)
else:
context_tags = registry.resolve_tags()
source = CodeDatasetSource(tags=context_tags)
return SparkDataset(
df=df,
source=source,
targets=targets,
name=name,
digest=digest,
predictions=predictions,
)
| SparkDataset |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/overrides.py | {
"start": 890,
"end": 1092
} | class ____(B):
q: str = "q"
def __init__(self, arg):
super(B, self).__init__(arg)
def methodA(self, arg):
pass
@classmethod
def classMethod(cls, arg):
pass
| C |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 3406,
"end": 3684
} | class ____(TestCase):
date = date(10, 8, 2)
target_dt = "02.08.0010"
format = "%d.%m.%Y"
def test_format_datetime_gte_django4(self):
self.assertEqual(
self.target_dt, widgets.format_datetime(self.date, self.format)
)
| FormatDatetimeTest |
python | pypa__pip | src/pip/_internal/commands/install.py | {
"start": 2180,
"end": 30545
} | class ____(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.build_constraints())
self.cmd_opts.add_option(cmdoptions.requirements_from_scripts())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(
"--dry-run",
action="store_true",
dest="dry_run",
default=False,
help=(
"Don't actually install anything, just print what would be. "
"Can be used in combination with --ignore-installed "
"to 'resolve' the requirements."
),
)
self.cmd_opts.add_option(
"-t",
"--target",
dest="target_dir",
metavar="dir",
default=None,
help=(
"Install packages into <dir>. "
"By default this will not replace existing files/folders in "
"<dir>. Use --upgrade to replace existing packages in <dir> "
"with new versions."
),
)
cmdoptions.add_target_python_options(self.cmd_opts)
self.cmd_opts.add_option(
"--user",
dest="use_user_site",
action="store_true",
help=(
"Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)"
),
)
self.cmd_opts.add_option(
"--no-user",
dest="use_user_site",
action="store_false",
help=SUPPRESS_HELP,
)
self.cmd_opts.add_option(
"--root",
dest="root_path",
metavar="dir",
default=None,
help="Install everything relative to this alternate root directory.",
)
self.cmd_opts.add_option(
"--prefix",
dest="prefix_path",
metavar="dir",
default=None,
help=(
"Installation prefix where lib, bin and other top-level "
"folders are placed. Note that the resulting installation may "
"contain scripts and other resources which reference the "
"Python interpreter of pip, and not that of ``--prefix``. "
"See also the ``--python`` option if the intention is to "
"install packages into another (possibly pip-free) "
"environment."
),
)
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(
"-U",
"--upgrade",
dest="upgrade",
action="store_true",
help=(
"Upgrade all specified packages to the newest available "
"version. The handling of dependencies depends on the "
"upgrade-strategy used."
),
)
self.cmd_opts.add_option(
"--upgrade-strategy",
dest="upgrade_strategy",
default="only-if-needed",
choices=["only-if-needed", "eager"],
help=(
"Determines how dependency upgrading should be handled "
"[default: %default]. "
'"eager" - dependencies are upgraded regardless of '
"whether the currently installed version satisfies the "
"requirements of the upgraded package(s). "
'"only-if-needed" - are upgraded only when they do not '
"satisfy the requirements of the upgraded package(s)."
),
)
self.cmd_opts.add_option(
"--force-reinstall",
dest="force_reinstall",
action="store_true",
help="Reinstall all packages even if they are already up-to-date.",
)
self.cmd_opts.add_option(
"-I",
"--ignore-installed",
dest="ignore_installed",
action="store_true",
help=(
"Ignore the installed packages, overwriting them. "
"This can break your system if the existing package "
"is of a different version or was installed "
"with a different package manager!"
),
)
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.check_build_deps())
self.cmd_opts.add_option(cmdoptions.override_externally_managed())
self.cmd_opts.add_option(cmdoptions.config_settings())
self.cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
self.cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
self.cmd_opts.add_option(cmdoptions.root_user_action())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
self.cmd_opts.add_option(
"--report",
dest="json_report_file",
metavar="file",
default=None,
help=(
"Generate a JSON file describing what pip did to install "
"the provided requirements. "
"Can be used in combination with --dry-run and --ignore-installed "
"to 'resolve' the requirements. "
"When - is used as file name it writes to stdout. "
"When writing to stdout, please combine with the --quiet option "
"to avoid mixing pip logging output with JSON output."
),
)
@with_cleanup
def run(self, options: Values, args: list[str]) -> int:
if options.use_user_site and options.target_dir is not None:
raise CommandError("Can not combine '--user' and '--target'")
# Check whether the environment we're installing into is externally
# managed, as specified in PEP 668. Specifying --root, --target, or
# --prefix disables the check, since there's no reliable way to locate
# the EXTERNALLY-MANAGED file for those cases. An exception is also
# made specifically for "--dry-run --report" for convenience.
installing_into_current_environment = (
not (options.dry_run and options.json_report_file)
and options.root_path is None
and options.target_dir is None
and options.prefix_path is None
)
if (
installing_into_current_environment
and not options.override_externally_managed
):
check_externally_managed()
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_build_constraints(options)
cmdoptions.check_dist_restriction(options, check_target=True)
logger.verbose("Using %s", get_pip_version())
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir: TempDirectory | None = None
target_temp_dir_path: str | None = None
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (
# fmt: off
os.path.exists(options.target_dir) and
not os.path.isdir(options.target_dir)
# fmt: on
):
raise CommandError(
"Target path exists but is not a directory, will not continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
self.enter_context(target_temp_dir)
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="install",
globally_managed=True,
)
try:
reqs = self.get_requirements(args, options, finder, session)
wheel_cache = WheelCache(options.cache_dir)
# Only when installing is it permitted to use PEP 660.
# In other circumstances (pip wheel, pip download) we generate
# regular (i.e. non editable) metadata and wheels.
for req in reqs:
req.permit_editable_wheels = True
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=options.python_version,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=not options.target_dir
)
if options.json_report_file:
report = InstallationReport(requirement_set.requirements_to_install)
if options.json_report_file == "-":
print_json(data=report.to_dict())
else:
with open(options.json_report_file, "w", encoding="utf-8") as f:
json.dump(report.to_dict(), f, indent=2, ensure_ascii=False)
if options.dry_run:
would_install_items = sorted(
(r.metadata["name"], r.metadata["version"])
for r in requirement_set.requirements_to_install
)
if would_install_items:
write_output(
"Would install %s",
" ".join("-".join(item) for item in would_install_items),
)
return SUCCESS
# If there is any more preparation to do for the actual installation, do
# so now. This includes actually downloading the files in the case that
# we have been using PEP-658 metadata so far.
preparer.prepare_linked_requirements_more(
requirement_set.requirements.values()
)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = False
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
reqs_to_build = [
r for r in requirement_set.requirements_to_install if not r.is_wheel
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
verify=True,
)
if build_failures:
raise InstallWheelBuildError(build_failures)
to_install = resolver.get_installation_order(requirement_set)
# Check for conflicts in the package set we're installing.
conflicts: ConflictDetails | None = None
should_warn_about_conflicts = (
not options.ignore_dependencies and options.warn_about_conflicts
)
if should_warn_about_conflicts:
conflicts = self._determine_conflicts(to_install)
# Don't warn about script install locations if
# --target or --prefix has been specified
warn_script_location = options.warn_script_location
if options.target_dir or options.prefix_path:
warn_script_location = False
installed = install_given_reqs(
to_install,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
pycompile=options.compile,
progress_bar=options.progress_bar,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
env = get_environment(lib_locations)
# Display a summary of installed packages, with extra care to
# display a package name as it was requested by the user.
installed.sort(key=operator.attrgetter("name"))
summary = []
installed_versions = {}
for distribution in env.iter_all_distributions():
installed_versions[distribution.canonical_name] = distribution.version
for package in installed:
display_name = package.name
version = installed_versions.get(canonicalize_name(display_name), None)
if version:
text = f"{display_name}-{version}"
else:
text = display_name
summary.append(text)
if conflicts is not None:
self._warn_about_conflicts(
conflicts,
resolver_variant=self.determine_resolver_variant(options),
)
installed_desc = " ".join(summary)
if installed_desc:
write_output(
"Successfully installed %s",
installed_desc,
)
except OSError as error:
show_traceback = self.verbosity >= 1
message = create_os_error_message(
error,
show_traceback,
options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
if options.target_dir:
assert target_temp_dir
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
if options.root_user_action == "warn":
warn_if_run_as_root()
return SUCCESS
def _handle_target_dir(
self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool
) -> None:
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = get_scheme("", home=target_temp_dir.path)
purelib_dir = scheme.purelib
platlib_dir = scheme.platlib
data_dir = scheme.data
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
"Target directory %s already exists. Specify "
"--upgrade to force replacement.",
target_item_dir,
)
continue
if os.path.islink(target_item_dir):
logger.warning(
"Target directory %s already exists and is "
"a link. pip will not automatically replace "
"links, please remove if replacement is "
"desired.",
target_item_dir,
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(os.path.join(lib_dir, item), target_item_dir)
def _determine_conflicts(
self, to_install: list[InstallRequirement]
) -> ConflictDetails | None:
try:
return check_install_conflicts(to_install)
except Exception:
logger.exception(
"Error while checking for conflicts. Please file an issue on "
"pip's issue tracker: https://github.com/pypa/pip/issues/new"
)
return None
def _warn_about_conflicts(
self, conflict_details: ConflictDetails, resolver_variant: str
) -> None:
package_set, (missing, conflicting) = conflict_details
if not missing and not conflicting:
return
parts: list[str] = []
if resolver_variant == "legacy":
parts.append(
"pip's legacy dependency resolver does not consider dependency "
"conflicts when selecting packages. This behaviour is the "
"source of the following dependency conflicts."
)
else:
assert resolver_variant == "resolvelib"
parts.append(
"pip's dependency resolver does not currently take into account "
"all the packages that are installed. This behaviour is the "
"source of the following dependency conflicts."
)
# NOTE: There is some duplication here, with commands/check.py
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
message = (
f"{project_name} {version} requires {dependency[1]}, "
"which is not installed."
)
parts.append(message)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
message = (
"{name} {version} requires {requirement}, but {you} have "
"{dep_name} {dep_version} which is incompatible."
).format(
name=project_name,
version=version,
requirement=req,
dep_name=dep_name,
dep_version=dep_version,
you=("you" if resolver_variant == "resolvelib" else "you'll"),
)
parts.append(message)
logger.critical("\n".join(parts))
def get_lib_location_guesses(
user: bool = False,
home: str | None = None,
root: str | None = None,
isolated: bool = False,
prefix: str | None = None,
) -> list[str]:
scheme = get_scheme(
"",
user=user,
home=home,
root=root,
isolated=isolated,
prefix=prefix,
)
return [scheme.purelib, scheme.platlib]
def site_packages_writable(root: str | None, isolated: bool) -> bool:
return all(
test_writable_dir(d)
for d in set(get_lib_location_guesses(root=root, isolated=isolated))
)
def decide_user_install(
use_user_site: bool | None,
prefix_path: str | None = None,
target_dir: str | None = None,
root_path: str | None = None,
isolated_mode: bool = False,
) -> bool:
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
# If we have been asked for a user install explicitly, check compatibility.
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
# Catch all remaining cases which honour the site.ENABLE_USER_SITE
# value, such as a plain Python installation (e.g. no virtualenv).
if not site.ENABLE_USER_SITE:
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are disabled for this Python."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info(
"Defaulting to user installation because normal site-packages "
"is not writeable"
)
return True
def create_os_error_message(
error: OSError, show_traceback: bool, using_user_site: bool
) -> str:
"""Format an error message for an OSError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an OSError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not running_under_virtualenv() and not using_user_site:
parts.extend(
[
user_option_part,
" or ",
permissions_part.lower(),
]
)
else:
parts.append(permissions_part)
parts.append(".\n")
# Suggest to check "pip config debug" in case of invalid proxy
if type(error) is InvalidProxyURL:
parts.append(
'Consider checking your local proxy configuration with "pip config debug"'
)
parts.append(".\n")
# On Windows, errors like EINVAL or ENOENT may occur
# if a file or folder name exceeds 255 characters,
# or if the full path exceeds 260 characters and long path support isn't enabled.
# This condition checks for such cases and adds a hint to the error output.
if WINDOWS and error.errno in (errno.EINVAL, errno.ENOENT) and error.filename:
if any(len(part) > 255 for part in Path(error.filename).parts):
parts.append(
"HINT: This error might be caused by a file or folder name exceeding "
"255 characters, which is a Windows limitation even if long paths "
"are enabled.\n "
)
if len(error.filename) > 260:
parts.append(
"HINT: This error might have occurred since "
"this system does not have Windows Long Path "
"support enabled. You can find information on "
"how to enable this at "
"https://pip.pypa.io/warnings/enable-long-paths\n"
)
return "".join(parts).strip() + "\n"
| InstallCommand |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 10127,
"end": 10870
} | class ____(types.ModuleType):
"""Allow lazy loading of modules."""
def __init__(self, namespace):
super().__init__(namespace)
self.__file__ = "(spack namespace)"
self.__path__ = []
self.__name__ = namespace
self.__package__ = namespace
self.__modules = {}
def __getattr__(self, name):
"""Getattr lazily loads modules if they're not already loaded."""
submodule = f"{self.__package__}.{name}"
try:
setattr(self, name, importlib.import_module(submodule))
except ImportError:
msg = "'{0}' object has no attribute {1}"
raise AttributeError(msg.format(type(self), name))
return getattr(self, name)
| SpackNamespace |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_packaging.py | {
"start": 5036,
"end": 5792
} | class ____:
def test_fail_when_license_is_missing(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={})
# Act
result = packaging.CheckConnectorLicense()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "License is missing in the metadata file" in result.message
def test_fail_when_license_is_not_valid(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={"license": "MITO"})
# Act
result = packaging.CheckConnectorLicense()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Connector is not using a valid license" in result.message
| TestCheckConnectorLicense |
python | django__django | tests/contenttypes_tests/test_views.py | {
"start": 5087,
"end": 8767
} | class ____(TestCase):
def setUp(self):
Site.objects.clear_cache()
@classmethod
def setUpTestData(cls):
cls.site_2 = Site.objects.create(domain="example2.com", name="example2.com")
cls.site_3 = Site.objects.create(domain="example3.com", name="example3.com")
@mock.patch("django.apps.apps.get_model")
def test_shortcut_view_with_null_site_fk(self, get_model):
"""
The shortcut view works if a model's ForeignKey to site is None.
"""
get_model.side_effect = lambda *args, **kwargs: (
MockSite if args[0] == "sites.Site" else ModelWithNullFKToSite
)
obj = ModelWithNullFKToSite.objects.create(title="title")
url = "/shortcut/%s/%s/" % (
ContentType.objects.get_for_model(ModelWithNullFKToSite).id,
obj.pk,
)
response = self.client.get(url)
expected_url = "http://example.com%s" % obj.get_absolute_url()
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
@mock.patch("django.apps.apps.get_model")
def test_shortcut_view_with_site_m2m(self, get_model):
"""
When the object has a ManyToManyField to Site, redirect to the current
site if it's attached to the object or to the domain of the first site
found in the m2m relationship.
"""
get_model.side_effect = lambda *args, **kwargs: (
MockSite if args[0] == "sites.Site" else ModelWithM2MToSite
)
# get_current_site() will lookup a Site object, so these must match the
# domains in the MockSite model.
MockSite.objects.bulk_create(
[
MockSite(pk=1, domain="example.com"),
MockSite(pk=self.site_2.pk, domain=self.site_2.domain),
MockSite(pk=self.site_3.pk, domain=self.site_3.domain),
]
)
ct = ContentType.objects.get_for_model(ModelWithM2MToSite)
site_3_obj = ModelWithM2MToSite.objects.create(
title="Not Linked to Current Site"
)
site_3_obj.sites.add(MockSite.objects.get(pk=self.site_3.pk))
expected_url = "http://%s%s" % (
self.site_3.domain,
site_3_obj.get_absolute_url(),
)
with self.settings(SITE_ID=self.site_2.pk):
# Redirects to the domain of the first Site found in the m2m
# relationship (ordering is arbitrary).
response = self.client.get("/shortcut/%s/%s/" % (ct.pk, site_3_obj.pk))
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
obj_with_sites = ModelWithM2MToSite.objects.create(
title="Linked to Current Site"
)
obj_with_sites.sites.set(MockSite.objects.all())
shortcut_url = "/shortcut/%s/%s/" % (ct.pk, obj_with_sites.pk)
expected_url = "http://%s%s" % (
self.site_2.domain,
obj_with_sites.get_absolute_url(),
)
with self.settings(SITE_ID=self.site_2.pk):
# Redirects to the domain of the Site matching the current site's
# domain.
response = self.client.get(shortcut_url)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
with self.settings(SITE_ID=None, ALLOWED_HOSTS=[self.site_2.domain]):
# Redirects to the domain of the Site matching the request's host
# header.
response = self.client.get(shortcut_url, SERVER_NAME=self.site_2.domain)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
| ContentTypesViewsSiteRelTests |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 93617,
"end": 95903
} | class ____(test.TestCase):
# AddN special-cases adding the first M inputs to make (N - M) divisible by 8,
# after which it adds the remaining (N - M) tensors 8 at a time in a loop.
# Test N in [1, 10] so we check each special-case from 1 to 9 and one
# iteration of the loop.
_MAX_N = 10
def _supported_types(self):
if test.is_gpu_available():
return [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
dtypes.int64,
dtypes.bfloat16,
]
return [
dtypes.int8,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
dtypes.bfloat16,
]
def _buildData(self, shape, dtype):
data = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testAddN(self):
np.random.seed(12345)
with self.session(use_gpu=True) as _:
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = self.evaluate(math_ops.add_n(data))
expected = np.sum(
np.vstack([np.expand_dims(d, 0) for d in data]), axis=0
)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
if dtype == dtypes.bfloat16:
tol = 2e-2
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
def testBigAddN(self):
np.random.seed(12345)
with self.session(use_gpu=True) as _:
for dtype in self._supported_types():
for count in range(10, 31):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = self.evaluate(math_ops.add_n(data))
expected = np.sum(
np.vstack([np.expand_dims(d, 0) for d in data]), axis=0
)
tol = 5e-2 if dtype in [dtypes.float16, dtypes.bfloat16] else 5e-6
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
| AddNTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter3.py | {
"start": 286,
"end": 462
} | class ____[T](ModelBase):
data: set[T] = model_field(converter=set)
x = DC1([1, 2])
reveal_type(x, expected_text="DC1[int]")
reveal_type(x.data, expected_text="set[int]")
| DC1 |
python | pandas-dev__pandas | pandas/io/sql.py | {
"start": 32785,
"end": 52006
} | class ____(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name: str,
pandas_sql_engine,
frame=None,
index: bool | str | list[str] | None = True,
if_exists: Literal["fail", "replace", "append", "delete_rows"] = "fail",
prefix: str = "pandas",
index_label=None,
schema=None,
keys=None,
dtype: DtypeArg | None = None,
) -> None:
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
if not len(self.name):
raise ValueError("Empty table name specified")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self) -> str:
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.con))
def _execute_create(self) -> None:
# Inserting table into database, add to MetaData object
self.table = self.table.to_metadata(self.pd_sql.meta)
with self.pd_sql.run_transaction():
self.table.create(bind=self.pd_sql.con)
def create(self) -> None:
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
elif self.if_exists == "delete_rows":
self.pd_sql.delete_rows(self.name, self.schema)
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row, strict=True)) for row in data_iter]
result = self.pd_sql.execute(self.table.insert(), data)
return result.rowcount
def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
"""
Alternative to _execute_insert for DBs support multi-value INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
from sqlalchemy import insert
data = [dict(zip(keys, row, strict=True)) for row in data_iter]
stmt = insert(self.table).values(data)
result = self.pd_sql.execute(stmt)
return result.rowcount
def insert_data(self) -> tuple[list[str], list[np.ndarray]]:
if self.index is not None:
temp = self.frame.copy(deep=False)
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
# this just pre-allocates the list: None's will be replaced with ndarrays
# error: List item 0 has incompatible type "None"; expected "ndarray"
data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item]
for i, (_, ser) in enumerate(temp.items()):
if ser.dtype.kind == "M":
if isinstance(ser._values, ArrowExtensionArray):
import pyarrow as pa
if pa.types.is_date(ser.dtype.pyarrow_dtype):
# GH#53854 to_pydatetime not supported for pyarrow date dtypes
d = ser._values.to_numpy(dtype=object)
else:
d = ser.dt.to_pydatetime()._values
else:
d = ser._values.to_pydatetime()
elif ser.dtype.kind == "m":
vals = ser._values
if isinstance(vals, ArrowExtensionArray):
vals = vals.to_numpy(dtype=np.dtype("m8[ns]"))
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = ser._values.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(
self,
chunksize: int | None = None,
method: Literal["multi"] | Callable | None = None,
) -> int | None:
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return 0
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
total_inserted = None
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(
*(arr[start_i:end_i] for arr in data_list), strict=True
)
num_inserted = exec_insert(conn, keys, chunk_iter)
# GH 46891
if num_inserted is not None:
if total_inserted is None:
total_inserted = num_inserted
else:
total_inserted += num_inserted
return total_inserted
def _query_iterator(
self,
result,
exit_stack: ExitStack,
chunksize: int | None,
columns,
coerce_float: bool = True,
parse_dates=None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> Generator[DataFrame]:
"""Return generator through chunked result set."""
has_read_data = False
with exit_stack:
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
has_read_data = True
self.frame = _convert_arrays_to_dataframe(
data, columns, coerce_float, dtype_backend
)
self._harmonize_columns(
parse_dates=parse_dates, dtype_backend=dtype_backend
)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(
self,
exit_stack: ExitStack,
coerce_float: bool = True,
parse_dates=None,
columns=None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
from sqlalchemy import select
if columns is not None and len(columns) > 0:
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(*cols)
else:
sql_select = select(self.table)
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
exit_stack,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype_backend=dtype_backend,
)
else:
data = result.fetchall()
self.frame = _convert_arrays_to_dataframe(
data, column_names, coerce_float, dtype_backend
)
self._harmonize_columns(
parse_dates=parse_dates, dtype_backend=dtype_backend
)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return com.fill_missing_names(self.frame.index.names)
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import (
Column,
PrimaryKeyConstraint,
Table,
)
from sqlalchemy.schema import MetaData
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns: list[Any] = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
meta = MetaData()
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(
self,
parse_dates=None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> None:
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif dtype_backend == "numpy" and col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type)
elif (
using_string_dtype()
and is_string_dtype(col_type)
and is_object_dtype(self.frame[col_name])
):
self.frame[col_name] = df_col.astype(col_type)
elif dtype_backend == "numpy" and len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col: Index | Series):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
SmallInteger,
Text,
Time,
)
if col_type in ("datetime64", "datetime"):
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
# error: Item "Index" of "Union[Index, Series]" has no attribute "dt"
if col.dt.tz is not None: # type: ignore[union-attr]
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
if col.dtype.name.lower() in ("int8", "uint8", "int16"):
return SmallInteger
elif col.dtype.name.lower() in ("uint16", "int32"):
return Integer
elif col.dtype.name.lower() == "uint64":
raise ValueError("Unsigned 64 bit integer datatype is not supported")
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (
TIMESTAMP,
Boolean,
Date,
DateTime,
Float,
Integer,
String,
)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
elif isinstance(sqltype, String):
if using_string_dtype():
return StringDtype(na_value=np.nan)
return object
| SQLTable |
python | scikit-learn__scikit-learn | sklearn/utils/_testing.py | {
"start": 42609,
"end": 43860
} | class ____:
"""Minimal regressor implementation without inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
self.is_fitted_ = True
self._mean = np.mean(y)
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(shape=(X.shape[0],)) * self._mean
def score(self, X, y):
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
def __sklearn_tags__(self):
return Tags(
estimator_type="regressor",
classifier_tags=None,
regressor_tags=RegressorTags(),
transformer_tags=None,
target_tags=TargetTags(required=True),
)
| MinimalRegressor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.