language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/_private/authentication/grpc_authentication_server_interceptor.py | {
"start": 1291,
"end": 4360
} | class ____(aiogrpc.ServerInterceptor):
"""Async gRPC server interceptor that validates authentication tokens.
This interceptor checks the "authorization" metadata header for a valid
Bearer token when token authentication is enabled via RAY_AUTH_MODE=token.
If the token is missing or invalid, the request is rejected with UNAUTHENTICATED status.
"""
async def intercept_service(
self,
continuation: Callable[
[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]
],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
"""Intercept service calls to validate authentication.
This method is called once per RPC to get the handler. We wrap the handler
to validate authentication before executing the actual RPC method.
"""
# Get the actual handler
handler = await continuation(handler_call_details)
if handler is None:
return None
# Wrap the RPC behavior with authentication check
def wrap_rpc_behavior(behavior):
"""Wrap an RPC method to validate authentication first."""
if behavior is None:
return None
async def wrapped(request_or_iterator, context):
if not _authenticate_request(context.invocation_metadata()):
await context.abort(
grpc.StatusCode.UNAUTHENTICATED,
"Invalid or missing authentication token",
)
return await behavior(request_or_iterator, context)
return wrapped
# Create a wrapper class that implements RpcMethodHandler interface
class AuthenticatedHandler:
"""Wrapper handler that validates authentication."""
def __init__(self, original_handler, wrapper_func):
self._original = original_handler
self._wrap = wrapper_func
@property
def request_streaming(self):
return self._original.request_streaming
@property
def response_streaming(self):
return self._original.response_streaming
@property
def request_deserializer(self):
return self._original.request_deserializer
@property
def response_serializer(self):
return self._original.response_serializer
@property
def unary_unary(self):
return self._wrap(self._original.unary_unary)
@property
def unary_stream(self):
return self._wrap(self._original.unary_stream)
@property
def stream_unary(self):
return self._wrap(self._original.stream_unary)
@property
def stream_stream(self):
return self._wrap(self._original.stream_stream)
return AuthenticatedHandler(handler, wrap_rpc_behavior)
| AsyncAuthenticationServerInterceptor |
python | numpy__numpy | numpy/_core/_internal.py | {
"start": 7214,
"end": 7377
} | class ____:
def cast(self, num, obj):
return num.value
class c_void_p:
def __init__(self, ptr):
self.value = ptr
| _missing_ctypes |
python | doocs__leetcode | solution/0200-0299/0214.Shortest Palindrome/Solution.py | {
"start": 0,
"end": 508
} | class ____:
def shortestPalindrome(self, s: str) -> str:
base = 131
mod = 10**9 + 7
n = len(s)
prefix = suffix = 0
mul = 1
idx = 0
for i, c in enumerate(s):
prefix = (prefix * base + (ord(c) - ord('a') + 1)) % mod
suffix = (suffix + (ord(c) - ord('a') + 1) * mul) % mod
mul = (mul * base) % mod
if prefix == suffix:
idx = i + 1
return s if idx == n else s[idx:][::-1] + s
| Solution |
python | PyCQA__pylint | tests/functional/s/super/super_with_arguments.py | {
"start": 126,
"end": 195
} | class ____(Foo):
def __init__(self):
super().__init__()
| Baz |
python | google__pytype | pytype/tests/test_recursive_types.py | {
"start": 71,
"end": 1782
} | class ____(test_base.BaseTest):
"""Tests usage of recursive types in source code."""
def test_parameter(self):
self.Check("""
from typing import List, Union
Foo = Union[str, List['Foo']]
def f(x: Foo):
pass
""")
def test_comment(self):
self.Check("""
from typing import List, Union
Foo = Union[str, List['Foo']]
x = 'hello' # type: Foo
""")
def test_alias(self):
self.Check("""
from typing import Any, Iterable, Union
X = Union[Any, Iterable['X']]
Y = Union[Any, X]
""")
def test_generic_alias(self):
src = """
from typing import List, TypeVar, Union
T = TypeVar('T')
Tree = Union[T, List['Tree{inner_parameter}']]
def f(x: Tree[int]): ...
"""
for inner_parameter in ("", "[T]"):
with self.subTest(inner_parameter=inner_parameter):
self.Check(src.format(inner_parameter=inner_parameter))
def test_generic_alias_rename_type_params(self):
self.CheckWithErrors("""
from typing import List, Set, TypeVar, Union
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = Union[T1, Set[T2], List['X[T2, T1]']]
Y = X[int, str]
ok1: Y = 0
ok2: Y = {''}
ok3: Y = ['']
ok4: Y = [{0}]
bad1: Y = '' # annotation-type-mismatch
bad2: Y = {0} # annotation-type-mismatch
bad3: Y = [0] # annotation-type-mismatch
bad4: Y = [{''}] # annotation-type-mismatch
""")
def test_init(self):
self.Check("""
from typing import MutableSequence
class Node:
def __init__(self, *args: 'Node'):
pass
class MutableNode(Node, MutableSequence):
pass
""")
| UsageTest |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_compiler.py | {
"start": 130939,
"end": 139790
} | class ____(
fixtures.MappedTest,
AssertsCompiledSQL,
fixtures.CacheKeySuite,
fixtures.DistinctOnFixture,
):
"""Test 'DISTINCT' with SQL expression language and orm.Query with
an emphasis on PG's 'DISTINCT ON' syntax.
"""
__dialect__ = postgresql.dialect()
def setup_test(self):
self.table = Table(
"t",
MetaData(),
Column("id", Integer, primary_key=True),
Column("a", String),
Column("b", String),
)
def test_distinct_on_no_cols(self, distinct_on_fixture):
self.assert_compile(
distinct_on_fixture(select(self.table)),
"SELECT DISTINCT t.id, t.a, t.b FROM t",
)
def test_distinct_on_cols(self, distinct_on_fixture):
self.assert_compile(
distinct_on_fixture(select(self.table), self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t",
)
self.assert_compile(
distinct_on_fixture(
self.table.select(), self.table.c.a, self.table.c.b
),
"SELECT DISTINCT ON (t.a, t.b) t.id, t.a, t.b FROM t",
checkparams={},
)
def test_distinct_on_columns_generative_multi_call(
self, distinct_on_fixture
):
stmt = select(self.table)
stmt = distinct_on_fixture(stmt, self.table.c.a)
stmt = distinct_on_fixture(stmt, self.table.c.b)
self.assert_compile(
stmt,
"SELECT DISTINCT ON (t.a, t.b) t.id, t.a, t.b FROM t",
)
def test_distinct_on_dupe_columns_generative_multi_call(
self, distinct_on_fixture
):
stmt = select(self.table)
stmt = distinct_on_fixture(stmt, self.table.c.a)
stmt = distinct_on_fixture(stmt, self.table.c.a)
self.assert_compile(
stmt,
"SELECT DISTINCT ON (t.a, t.a) t.id, t.a, t.b FROM t",
)
def test_legacy_query_plain(self, distinct_on_fixture):
sess = Session()
self.assert_compile(
distinct_on_fixture(sess.query(self.table)),
"SELECT DISTINCT t.id AS t_id, t.a AS t_a, t.b AS t_b FROM t",
)
def test_legacy_query_on_columns(self, distinct_on_fixture):
sess = Session()
self.assert_compile(
distinct_on_fixture(sess.query(self.table), self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t",
)
def test_legacy_query_distinct_on_columns_multi_call(
self, distinct_on_fixture
):
sess = Session()
self.assert_compile(
distinct_on_fixture(
distinct_on_fixture(sess.query(self.table), self.table.c.a),
self.table.c.b,
),
"SELECT DISTINCT ON (t.a, t.b) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t",
)
def test_legacy_query_distinct_on_columns_subquery(
self, distinct_on_fixture
):
sess = Session()
class Foo:
pass
clear_mappers()
self.mapper_registry.map_imperatively(Foo, self.table)
sess = Session()
subq = sess.query(Foo).subquery()
f1 = aliased(Foo, subq)
self.assert_compile(
distinct_on_fixture(sess.query(f1), f1.a, f1.b),
"SELECT DISTINCT ON (anon_1.a, anon_1.b) anon_1.id "
"AS anon_1_id, anon_1.a AS anon_1_a, anon_1.b "
"AS anon_1_b FROM (SELECT t.id AS id, t.a AS a, "
"t.b AS b FROM t) AS anon_1",
)
def test_legacy_query_distinct_on_aliased(self, distinct_on_fixture):
class Foo:
pass
clear_mappers()
self.mapper_registry.map_imperatively(Foo, self.table)
a1 = aliased(Foo)
sess = Session()
q = distinct_on_fixture(sess.query(a1), a1.a)
self.assert_compile(
q,
"SELECT DISTINCT ON (t_1.a) t_1.id AS t_1_id, "
"t_1.a AS t_1_a, t_1.b AS t_1_b FROM t AS t_1",
)
def test_distinct_on_subquery_anon(self, distinct_on_fixture):
sq = select(self.table).alias()
q = distinct_on_fixture(
select(self.table.c.id, sq.c.id), sq.c.id
).where(self.table.c.id == sq.c.id)
self.assert_compile(
q,
"SELECT DISTINCT ON (anon_1.id) t.id, anon_1.id AS id_1 "
"FROM t, (SELECT t.id AS id, t.a AS a, t.b "
"AS b FROM t) AS anon_1 WHERE t.id = anon_1.id",
)
def test_distinct_on_subquery_named(self, distinct_on_fixture):
sq = select(self.table).alias("sq")
q = distinct_on_fixture(
select(self.table.c.id, sq.c.id), sq.c.id
).where(self.table.c.id == sq.c.id)
self.assert_compile(
q,
"SELECT DISTINCT ON (sq.id) t.id, sq.id AS id_1 "
"FROM t, (SELECT t.id AS id, t.a AS a, "
"t.b AS b FROM t) AS sq WHERE t.id = sq.id",
)
@fixtures.CacheKeySuite.run_suite_tests
def test_distinct_on_ext_cache_key(self):
def leg():
with expect_deprecated("Passing expression"):
return self.table.select().distinct(self.table.c.a)
return lambda: [
self.table.select().ext(distinct_on(self.table.c.a)),
self.table.select().ext(distinct_on(self.table.c.b)),
self.table.select().ext(
distinct_on(self.table.c.a, self.table.c.b)
),
self.table.select().ext(
distinct_on(self.table.c.b, self.table.c.a)
),
self.table.select(),
self.table.select().distinct(),
leg(),
]
def test_distinct_on_cache_key_equal(self, distinct_on_fixture):
self._run_cache_key_equal_fixture(
lambda: [
distinct_on_fixture(self.table.select(), self.table.c.a),
distinct_on_fixture(select(self.table), self.table.c.a),
],
compare_values=True,
)
self._run_cache_key_equal_fixture(
lambda: [
distinct_on_fixture(
distinct_on_fixture(self.table.select(), self.table.c.a),
self.table.c.b,
),
distinct_on_fixture(
select(self.table), self.table.c.a, self.table.c.b
),
],
compare_values=True,
)
def test_distinct_on_literal_binds(self, distinct_on_fixture):
self.assert_compile(
distinct_on_fixture(select(self.table), self.table.c.a == 10),
"SELECT DISTINCT ON (t.a = 10) t.id, t.a, t.b FROM t",
literal_binds=True,
)
def test_distinct_on_col_str(self, distinct_on_fixture):
stmt = distinct_on_fixture(select(self.table), "a")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t",
dialect="postgresql",
)
def test_distinct_on_label(self, distinct_on_fixture):
stmt = distinct_on_fixture(select(self.table.c.a.label("foo")), "foo")
self.assert_compile(stmt, "SELECT DISTINCT ON (foo) t.a AS foo FROM t")
def test_unresolvable_distinct_label(self, distinct_on_fixture):
stmt = distinct_on_fixture(
select(self.table.c.a.label("foo")), "not a label"
)
with expect_raises_message(
exc.CompileError,
"Can't resolve label reference for.* expression 'not a"
" label' should be explicitly",
):
self.assert_compile(stmt, "ingored")
def test_distinct_on_ext_with_legacy_distinct(self):
with (
expect_raises_message(
exc.InvalidRequestError,
re.escape(
"Cannot mix ``select.ext(distinct_on(...))`` and "
"``select.distinct(...)``"
),
),
expect_deprecated("Passing expression"),
):
s = (
self.table.select()
.distinct(self.table.c.b)
.ext(distinct_on(self.table.c.a))
)
# opposite order is not detected...
with expect_deprecated("Passing expression"):
s = (
self.table.select()
.ext(distinct_on(self.table.c.a))
.distinct(self.table.c.b)
)
# but it raises while compiling
with expect_raises_message(
exc.CompileError,
re.escape(
"Cannot mix ``select.ext(distinct_on(...))`` and "
"``select.distinct(...)``"
),
):
self.assert_compile(s, "ignored")
| DistinctOnTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/override2.py | {
"start": 374,
"end": 574
} | class ____:
@override
def __init__(self):
pass
def method1(self):
pass
@property
def prop_c(self) -> int:
return 0
def method2(self):
pass
| Base |
python | redis__redis-py | redis/connection.py | {
"start": 2468,
"end": 4368
} | class ____:
def __init__(self, buffer_cutoff, encode) -> None:
self._buffer_cutoff = buffer_cutoff
self.encode = encode
def pack(self, *args):
"""Pack a series of arguments into the Redis protocol"""
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], str):
args = tuple(args[0].encode().split()) + args[1:]
elif b" " in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in map(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (
len(buff) > buffer_cutoff
or arg_length > buffer_cutoff
or isinstance(arg, memoryview)
):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
)
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(
buff,
SYM_DOLLAR,
str(arg_length).encode(),
SYM_CRLF,
arg,
SYM_CRLF,
)
)
output.append(buff)
return output
| PythonRespSerializer |
python | pennersr__django-allauth | allauth/account/forms.py | {
"start": 23799,
"end": 24887
} | class ____(forms.Form):
email = EmailField(required=True)
def clean_email(self):
email = self.cleaned_data["email"].lower()
email = get_adapter().clean_email(email)
self.users = filter_users_by_email(email, is_active=True, prefer_verified=True)
if not self.users and not app_settings.PREVENT_ENUMERATION:
raise get_adapter().validation_error("unknown_email")
return self.cleaned_data["email"]
def save(self, request, **kwargs) -> str:
email = self.cleaned_data["email"]
if app_settings.PASSWORD_RESET_BY_CODE_ENABLED:
flows.password_reset_by_code.PasswordResetVerificationProcess.initiate(
request=request,
user=(self.users[0] if self.users else None),
email=email,
)
else:
token_generator = kwargs.get("token_generator", default_token_generator)
flows.password_reset.request_password_reset(
request, email, self.users, token_generator
)
return email
| ResetPasswordForm |
python | python__mypy | mypy/test/testfscache.py | {
"start": 177,
"end": 4465
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.mkdtemp()
self.oldcwd = os.getcwd()
os.chdir(self.tempdir)
self.fscache = FileSystemCache()
def tearDown(self) -> None:
os.chdir(self.oldcwd)
shutil.rmtree(self.tempdir)
def test_isfile_case_1(self) -> None:
self.make_file("bar.py")
self.make_file("pkg/sub_package/__init__.py")
self.make_file("pkg/sub_package/foo.py")
# Run twice to test both cached and non-cached code paths.
for i in range(2):
assert self.isfile_case("bar.py")
assert self.isfile_case("pkg/sub_package/__init__.py")
assert self.isfile_case("pkg/sub_package/foo.py")
assert not self.isfile_case("non_existent.py")
assert not self.isfile_case("pkg/non_existent.py")
assert not self.isfile_case("pkg/")
assert not self.isfile_case("bar.py/")
for i in range(2):
assert not self.isfile_case("Bar.py")
assert not self.isfile_case("pkg/sub_package/__init__.PY")
assert not self.isfile_case("pkg/Sub_Package/foo.py")
assert not self.isfile_case("Pkg/sub_package/foo.py")
def test_isfile_case_2(self) -> None:
self.make_file("bar.py")
self.make_file("pkg/sub_package/__init__.py")
self.make_file("pkg/sub_package/foo.py")
# Run twice to test both cached and non-cached code paths.
# This reverses the order of checks from test_isfile_case_1.
for i in range(2):
assert not self.isfile_case("Bar.py")
assert not self.isfile_case("pkg/sub_package/__init__.PY")
assert not self.isfile_case("pkg/Sub_Package/foo.py")
assert not self.isfile_case("Pkg/sub_package/foo.py")
for i in range(2):
assert self.isfile_case("bar.py")
assert self.isfile_case("pkg/sub_package/__init__.py")
assert self.isfile_case("pkg/sub_package/foo.py")
assert not self.isfile_case("non_existent.py")
assert not self.isfile_case("pkg/non_existent.py")
def test_isfile_case_3(self) -> None:
self.make_file("bar.py")
self.make_file("pkg/sub_package/__init__.py")
self.make_file("pkg/sub_package/foo.py")
# Run twice to test both cached and non-cached code paths.
for i in range(2):
assert self.isfile_case("bar.py")
assert not self.isfile_case("non_existent.py")
assert not self.isfile_case("pkg/non_existent.py")
assert not self.isfile_case("Bar.py")
assert not self.isfile_case("pkg/sub_package/__init__.PY")
assert not self.isfile_case("pkg/Sub_Package/foo.py")
assert not self.isfile_case("Pkg/sub_package/foo.py")
assert self.isfile_case("pkg/sub_package/__init__.py")
assert self.isfile_case("pkg/sub_package/foo.py")
def test_isfile_case_other_directory(self) -> None:
self.make_file("bar.py")
with tempfile.TemporaryDirectory() as other:
self.make_file("other_dir.py", base=other)
self.make_file("pkg/other_dir.py", base=other)
assert self.isfile_case(os.path.join(other, "other_dir.py"))
assert not self.isfile_case(os.path.join(other, "Other_Dir.py"))
assert not self.isfile_case(os.path.join(other, "bar.py"))
if os.path.exists(os.path.join(other, "PKG/other_dir.py")):
# We only check case for directories under our prefix, and since
# this path is not under the prefix, case difference is fine.
assert self.isfile_case(os.path.join(other, "PKG/other_dir.py"))
def make_file(self, path: str, base: str | None = None) -> None:
if base is None:
base = self.tempdir
fullpath = os.path.join(base, path)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
if not path.endswith("/"):
with open(fullpath, "w") as f:
f.write("# test file")
def isfile_case(self, path: str) -> bool:
return self.fscache.isfile_case(os.path.join(self.tempdir, path), self.tempdir)
| TestFileSystemCache |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 40547,
"end": 40734
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("COMMIT_OR_PR_TITLE", "PR_TITLE")
| SquashMergeCommitTitle |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 30050,
"end": 128278
} | class ____:
"""State machine encapsulating the lifetime of all tasks on a worker.
Not to be confused with :class:`distributed.scheduler.WorkerState`.
.. note::
The data attributes of this class are implementation details and may be
changed without a deprecation cycle.
.. warning::
The attributes of this class are all heavily correlated with each other.
*Do not* modify them directly, *ever*, as it is extremely easy to obtain a broken
state this way, which in turn will likely result in cluster-wide deadlocks.
The state should be exclusively mutated through :meth:`handle_stimulus`.
"""
#: Worker <IP address>:<port>. This is used in decision-making by the state machine,
#: e.g. to determine if a peer worker is running on the same host or not.
#: This attribute may not be known when the WorkerState is initialised. It *must* be
#: set before the first call to :meth:`handle_stimulus`.
address: str
#: ``{key: TaskState}``. The tasks currently executing on this worker (and any
#: dependencies of those tasks)
tasks: dict[Key, TaskState]
#: ``{ts.key: thread ID}``. This collection is shared by reference between
#: :class:`~distributed.worker.Worker` and this class. While the WorkerState is
#: thread-agnostic, it still needs access to this information in some cases.
#: This collection is populated by :meth:`distributed.worker.Worker.execute`.
#: It does not *need* to be populated for the WorkerState to work.
threads: dict[Key, int]
#: In-memory tasks data. This collection is shared by reference between
#: :class:`~distributed.worker.Worker`,
#: :class:`~distributed.worker_memory.WorkerMemoryManager`, and this class.
data: MutableMapping[Key, object]
#: ``{name: worker plugin}``. This collection is shared by reference between
#: :class:`~distributed.worker.Worker` and this class. The Worker managed adding and
#: removing plugins, while the WorkerState invokes the ``WorkerPlugin.transition``
#: method, is available.
plugins: dict[str, WorkerPlugin]
#: Priority heap of tasks that are ready to run and have no resource constrains.
#: Mutually exclusive with :attr:`constrained`.
ready: HeapSet[TaskState]
#: Priority heap of tasks that are ready to run, but are waiting on abstract
#: resources like GPUs. Mutually exclusive with :attr:`ready`.
#: See :attr:`available_resources` and :doc:`resources`.
constrained: HeapSet[TaskState]
#: Number of tasks that can be executing in parallel.
#: At any given time, :meth:`executing_count` <= nthreads.
nthreads: int
#: True if the state machine should start executing more tasks and fetch
#: dependencies whenever a slot is available. This property must be kept aligned
#: with the Worker: ``WorkerState.running == (Worker.status is Status.running)``.
running: bool
#: Tasks that are currently waiting for data
waiting: set[TaskState]
#: ``{worker address: {ts.key, ...}``.
#: The data that we care about that we think a worker has
has_what: defaultdict[str, set[Key]]
#: The tasks which still require data in order to execute and are in memory on at
#: least another worker, prioritized as per-worker heaps. All and only tasks with
#: ``TaskState.state == 'fetch'`` are in this collection. A :class:`TaskState` with
#: multiple entries in :attr:`~TaskState.who_has` will appear multiple times here.
data_needed: defaultdict[str, HeapSet[TaskState]]
#: Total number of tasks in fetch state. If a task is in more than one data_needed
#: heap, it's only counted once.
fetch_count: int
#: Number of bytes to gather from the same worker in a single call to
#: :meth:`BaseWorker.gather_dep`. Multiple small tasks that can be gathered from the
#: same worker will be batched in a single instruction as long as their combined
#: size doesn't exceed this value. If the first task to be gathered exceeds this
#: limit, it will still be gathered to ensure progress. Hence, this limit is not
#: absolute.
transfer_message_bytes_limit: float
#: All and only tasks with ``TaskState.state == 'missing'``.
missing_dep_flight: set[TaskState]
#: Tasks that are coming to us in current peer-to-peer connections.
#:
#: This set includes exclusively tasks with :attr:`~TaskState.state` == 'flight' as
#: well as tasks with :attr:`~TaskState.state` in ('cancelled', 'resumed') and
#: :attr:`~TaskState.previous` == 'flight`.
#:
#: See also :meth:`in_flight_tasks_count`.
in_flight_tasks: set[TaskState]
#: ``{worker address: {ts.key, ...}}``
#: The workers from which we are currently gathering data and the dependencies we
#: expect from those connections. Workers in this dict won't be asked for additional
#: dependencies until the current query returns.
in_flight_workers: dict[str, set[Key]]
#: Current total size of open data transfers from other workers
transfer_incoming_bytes: int
#: Maximum number of concurrent incoming data transfers from other workers.
#: See also :attr:`distributed.worker.Worker.transfer_outgoing_count_limit`.
transfer_incoming_count_limit: int
#: Total number of data transfers from other workers since the worker was started.
transfer_incoming_count_total: int
#: Ignore :attr:`transfer_incoming_count_limit` as long as :attr:`transfer_incoming_bytes` is
#: less than this value.
transfer_incoming_bytes_throttle_threshold: int
#: Peer workers that recently returned a busy status. Workers in this set won't be
#: asked for additional dependencies for some time.
busy_workers: set[str]
#: Counter that decreases every time the compute-task handler is invoked by the
#: Scheduler. It is appended to :attr:`TaskState.priority` and acts as a
#: tie-breaker between tasks that have the same priority on the Scheduler,
#: determining a last-in-first-out order between them.
generation: int
#: ``{resource name: amount}``. Total resources available for task execution.
#: See :doc: `resources`.
total_resources: dict[str, float]
#: ``{resource name: amount}``. Current resources that aren't being currently
#: consumed by task execution. Always less or equal to :attr:`total_resources`.
#: See :doc:`resources`.
available_resources: dict[str, float]
#: Set of tasks that are currently running.
#:
#: This set includes exclusively tasks with :attr:`~TaskState.state` == 'executing'
#: as well as tasks with :attr:`~TaskState.state` in ('cancelled', 'resumed') and
#: :attr:`~TaskState.previous` == 'executing`.
#:
#: See also :meth:`executing_count` and :attr:`long_running`.
executing: set[TaskState]
#: Set of tasks that are currently running and have called
#: :func:`~distributed.secede`, so they no longer count towards the maximum number
#: of concurrent tasks (nthreads).
#: These tasks do not appear in the :attr:`executing` set.
#:
#: This set includes exclusively tasks with
#: :attr:`~TaskState.state` == 'long-running' as well as tasks with
#: :attr:`~TaskState.state` in ('cancelled', 'resumed') and
#: :attr:`~TaskState.previous` == 'long-running`.
long_running: set[TaskState]
#: A number of tasks that this worker has run in its lifetime; this includes failed
#: and cancelled tasks. See also :meth:`executing_count`.
executed_count: int
#: Total size of all tasks in memory
nbytes: int
#: Actor tasks. See :doc:`actors`.
actors: dict[Key, object]
#: Transition log: ``[(..., stimulus_id: str | None, timestamp: float), ...]``
#: The number of stimuli logged is capped.
#: See also :meth:`story` and :attr:`stimulus_log`.
log: deque[tuple]
#: Log of all stimuli received by :meth:`handle_stimulus`.
#: The number of events logged is capped.
#: See also :attr:`log` and :meth:`stimulus_story`.
stimulus_log: deque[StateMachineEvent]
#: If True, enable expensive internal consistency check.
#: Typically disabled in production.
validate: bool
#: Current number of tasks and cumulative elapsed time in each state,
#: both broken down by :attr:`prefix`
task_counter: TaskCounter
#: Total number of state transitions so far.
#: See also :attr:`log` and :attr:`transition_counter_max`.
transition_counter: int
#: Raise an error if the :attr:`transition_counter` ever reaches this value.
#: This is meant for debugging only, to catch infinite recursion loops.
#: In production, it should always be set to False.
transition_counter_max: int | Literal[False]
#: Limit of bytes for incoming data transfers; this is used for throttling.
transfer_incoming_bytes_limit: float
#: Statically-seeded random state, used to guarantee determinism whenever a
#: pseudo-random choice is required
rng: random.Random
__slots__ = tuple(__annotations__)
def __init__(
self,
*,
nthreads: int = 1,
address: str | None = None,
data: MutableMapping[Key, object] | None = None,
threads: dict[Key, int] | None = None,
plugins: dict[str, WorkerPlugin] | None = None,
resources: Mapping[str, float] | None = None,
transfer_incoming_count_limit: int = 9999,
validate: bool = True,
transition_counter_max: int | Literal[False] = False,
transfer_incoming_bytes_limit: float = math.inf,
transfer_message_bytes_limit: float = math.inf,
):
self.nthreads = nthreads
# address may not be known yet when the State Machine is initialised.
# Raise AttributeError if a method tries reading it before it's been set.
if address:
self.address = address
# These collections are normally passed by reference by the Worker.
# For the sake of convenience, create independent ones during unit tests.
self.data = data if data is not None else {}
self.threads = threads if threads is not None else {}
self.plugins = plugins if plugins is not None else {}
self.total_resources = dict(resources) if resources is not None else {}
self.available_resources = self.total_resources.copy()
self.validate = validate
self.tasks = {}
self.running = True
self.waiting = set()
self.has_what = defaultdict(set)
self.data_needed = defaultdict(
partial(HeapSet[TaskState], key=operator.attrgetter("priority"))
)
self.fetch_count = 0
self.in_flight_workers = {}
self.busy_workers = set()
self.transfer_incoming_count_limit = transfer_incoming_count_limit
self.transfer_incoming_count_total = 0
self.transfer_incoming_bytes_throttle_threshold = int(10e6)
self.transfer_incoming_bytes = 0
self.missing_dep_flight = set()
self.generation = 0
self.ready = HeapSet(key=operator.attrgetter("priority"))
self.constrained = HeapSet(key=operator.attrgetter("priority"))
self.executing = set()
self.in_flight_tasks = set()
self.nbytes = 0
self.executed_count = 0
self.long_running = set()
self.transfer_message_bytes_limit = transfer_message_bytes_limit
maxlen = dask.config.get("distributed.admin.low-level-log-length")
self.log = deque(maxlen=maxlen)
self.stimulus_log = deque(maxlen=maxlen)
self.task_counter = TaskCounter()
self.transition_counter = 0
self.transition_counter_max = transition_counter_max
self.transfer_incoming_bytes_limit = transfer_incoming_bytes_limit
self.actors = {}
self.rng = random.Random(0)
def handle_stimulus(self, *stims: StateMachineEvent) -> Instructions:
"""Process one or more external events, transition relevant tasks to new states,
and return a list of instructions to be executed as a consequence.
See also
--------
BaseWorker.handle_stimulus
"""
instructions = []
handled = time()
for stim in stims:
if not isinstance(stim, FindMissingEvent):
self.stimulus_log.append(stim.to_loggable(handled=handled))
recs, instr = self._handle_event(stim)
instructions += instr
instructions += self._transitions(recs, stimulus_id=stim.stimulus_id)
return instructions
#############
# Accessors #
#############
@property
def executing_count(self) -> int:
"""Count of tasks currently executing on this worker and counting towards the
maximum number of threads.
It includes cancelled tasks, but does not include long running (a.k.a. seceded)
tasks.
See also
--------
WorkerState.executing
WorkerState.executed_count
WorkerState.nthreads
WorkerState.all_running_tasks
"""
return len(self.executing)
@property
def all_running_tasks(self) -> set[TaskState]:
"""All tasks that are currently occupying a thread. They may or may not count
towards the maximum number of threads.
These are:
- ts.status in (executing, long-running)
- ts.status in (cancelled, resumed) and ts.previous in (executing, long-running)
See also
--------
WorkerState.executing_count
"""
# Note: cancelled and resumed tasks are still in either of these sets
return self.executing | self.long_running
@property
def in_flight_tasks_count(self) -> int:
"""Number of tasks currently being replicated from other workers to this one.
See also
--------
WorkerState.in_flight_tasks
"""
return len(self.in_flight_tasks)
@property
def transfer_incoming_count(self) -> int:
"""Current number of open data transfers from other workers.
See also
--------
WorkerState.in_flight_workers
"""
return len(self.in_flight_workers)
#########################
# Shared helper methods #
#########################
def _ensure_task_exists(
self, key: Key, *, priority: tuple[int, ...], stimulus_id: str
) -> TaskState:
try:
ts = self.tasks[key]
logger.debug("Data task %s already known (stimulus_id=%s)", ts, stimulus_id)
except KeyError:
self.tasks[key] = ts = TaskState(key)
self.task_counter.new_task(ts)
if not ts.priority:
assert priority
ts.priority = priority
self.log.append((key, "ensure-task-exists", ts.state, stimulus_id, time()))
return ts
def _update_who_has(self, who_has: Mapping[Key, Collection[str]]) -> None:
for key, workers in who_has.items():
ts = self.tasks.get(key)
if not ts:
# The worker sent a refresh-who-has request to the scheduler but, by the
# time the answer comes back, some of the keys have been forgotten.
continue
workers = set(workers)
if self.address in workers:
workers.remove(self.address)
# This can only happen if rebalance() recently asked to release a key,
# but the RPC call hasn't returned yet. rebalance() is flagged as not
# being safe to run while the cluster is not at rest and has already
# been penned in to be redesigned on top of the AMM.
# It is not necessary to send a message back to the
# scheduler here, because it is guaranteed that there's already a
# release-worker-data message in transit to it.
if ts.state != "memory":
logger.debug( # pragma: nocover
"Scheduler claims worker %s holds data for task %s, "
"which is not true.",
self.address,
ts,
)
if ts.who_has == workers:
continue
for worker in ts.who_has - workers:
self.has_what[worker].remove(key)
if ts.state == "fetch":
self.data_needed[worker].remove(ts)
for worker in workers - ts.who_has:
self.has_what[worker].add(key)
if ts.state == "fetch":
self.data_needed[worker].add(ts)
ts.who_has = workers
def _purge_state(self, ts: TaskState) -> None:
"""Ensure that TaskState attributes are reset to a neutral default and
Worker-level state associated to the provided key is cleared (e.g.
who_has)
This is idempotent
"""
logger.debug("Purge task: %s", ts)
# Do not use self.data.pop(key, None), as it could unspill the data from disk!
if ts.key in self.data:
del self.data[ts.key]
self.actors.pop(ts.key, None)
self.threads.pop(ts.key, None)
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
self.data_needed[worker].discard(ts)
ts.who_has.clear()
for d in ts.dependencies:
ts.waiting_for_data.discard(d)
d.waiters.discard(ts)
ts.waiting_for_data.clear()
ts.nbytes = None
ts.previous = None
ts.next = None
ts.done = False
ts.coming_from = None
ts.exception = None
ts.traceback = None
ts.traceback_text = ""
ts.traceback_text = ""
self.missing_dep_flight.discard(ts)
self.ready.discard(ts)
self.constrained.discard(ts)
self.executing.discard(ts)
self.long_running.discard(ts)
self.in_flight_tasks.discard(ts)
self.waiting.discard(ts)
def _should_throttle_incoming_transfers(self) -> bool:
"""Decides whether the WorkerState should throttle data transfers from other workers.
Returns
-------
* True if the number of incoming data transfers reached its limit
and the size of incoming data transfers reached the minimum threshold for throttling
* True if the size of incoming data transfers reached its limit
* False otherwise
"""
reached_count_limit = (
self.transfer_incoming_count >= self.transfer_incoming_count_limit
)
reached_throttle_threshold = (
self.transfer_incoming_bytes
>= self.transfer_incoming_bytes_throttle_threshold
)
reached_bytes_limit = (
self.transfer_incoming_bytes >= self.transfer_incoming_bytes_limit
)
return reached_count_limit and reached_throttle_threshold or reached_bytes_limit
def _ensure_communicating(self, *, stimulus_id: str) -> RecsInstrs:
"""Transition tasks from fetch to flight, until there are no more tasks in fetch
state or a threshold has been reached.
"""
if not self.running or not self.data_needed:
return {}, []
if self._should_throttle_incoming_transfers():
return {}, []
recommendations: Recs = {}
instructions: Instructions = []
for worker, available_tasks in self._select_workers_for_gather():
assert worker != self.address
to_gather_tasks, message_nbytes = self._select_keys_for_gather(
available_tasks
)
# We always load at least one task
assert to_gather_tasks or self.transfer_incoming_bytes
# ...but that task might be selected in the previous iteration of the loop
if not to_gather_tasks:
break
to_gather_keys = {ts.key for ts in to_gather_tasks}
logger.debug(
"Gathering %d tasks from %s; %d more remain. "
"Pending workers: %d; connections: %d/%d; busy: %d",
len(to_gather_tasks),
worker,
len(available_tasks),
len(self.data_needed),
self.transfer_incoming_count,
self.transfer_incoming_count_limit,
len(self.busy_workers),
)
self.log.append(
("gather-dependencies", worker, to_gather_keys, stimulus_id, time())
)
for ts in to_gather_tasks:
if self.validate:
assert ts.state == "fetch"
assert worker in ts.who_has
assert ts not in recommendations
recommendations[ts] = ("flight", worker)
# A single invocation of _ensure_communicating may generate up to one
# GatherDep instruction per worker. Multiple tasks from the same worker may
# be batched in the same instruction by _select_keys_for_gather. But once
# a worker has been selected for a GatherDep and added to in_flight_workers,
# it won't be selected again until the gather completes.
instructions.append(
GatherDep(
worker=worker,
to_gather=to_gather_keys,
total_nbytes=message_nbytes,
stimulus_id=stimulus_id,
)
)
self.in_flight_workers[worker] = to_gather_keys
self.transfer_incoming_count_total += 1
self.transfer_incoming_bytes += message_nbytes
if self._should_throttle_incoming_transfers():
break
return recommendations, instructions
def _select_workers_for_gather(self) -> Iterator[tuple[str, HeapSet[TaskState]]]:
"""Helper of _ensure_communicating.
Yield the peer workers and tasks in data_needed, sorted by:
1. By highest-priority task available across all workers
2. If tied, first by local peer workers, then remote. Note that, if a task is
replicated across multiple host, it may go in a tie with itself.
3. If still tied, by number of tasks available to be fetched from the host
(see note below)
4. If still tied, by a random element. This is statically seeded to guarantee
reproducibility.
FIXME https://github.com/dask/distributed/issues/6620
You won't get determinism when a single task is replicated on multiple
workers, because TaskState.who_has changes order at every interpreter
restart.
Omit workers that are either busy or in flight.
Remove peer workers with no tasks from data_needed.
Note
----
Instead of number of tasks, we could've measured total nbytes and/or number of
tasks that only exist on the worker. Raw number of tasks is cruder but simpler.
"""
host = get_address_host(self.address)
heap = []
for worker, tasks in list(self.data_needed.items()):
if not tasks:
del self.data_needed[worker]
continue
if worker in self.in_flight_workers or worker in self.busy_workers:
continue
heap.append(
(
tasks.peek().priority,
get_address_host(worker) != host, # False < True
-len(tasks),
self.rng.random(),
worker,
tasks,
)
)
heapq.heapify(heap)
while heap:
_, is_remote, ntasks_neg, rnd, worker, tasks = heapq.heappop(heap)
# The number of tasks and possibly the top priority task may have changed
# since the last sort, since _select_keys_for_gather may have removed tasks
# that are also replicated on a higher-priority worker.
if not tasks:
del self.data_needed[worker]
elif -ntasks_neg != len(tasks):
heapq.heappush(
heap,
(tasks.peek().priority, is_remote, -len(tasks), rnd, worker, tasks),
)
else:
yield worker, tasks
if not tasks: # _select_keys_for_gather just emptied it
del self.data_needed[worker]
def _select_keys_for_gather(
self, available: HeapSet[TaskState]
) -> tuple[list[TaskState], int]:
"""Helper of _ensure_communicating.
Fetch all tasks that are replicated on the target worker within a single
message, up to transfer_message_bytes_limit or until we reach the limit
for the size of incoming data transfers.
"""
to_gather: list[TaskState] = []
message_nbytes = 0
while available:
ts = available.peek()
if self._task_exceeds_transfer_limits(ts, message_nbytes):
break
for worker in ts.who_has:
# This also effectively pops from available
self.data_needed[worker].remove(ts)
to_gather.append(ts)
message_nbytes += ts.get_nbytes()
return to_gather, message_nbytes
def _task_exceeds_transfer_limits(self, ts: TaskState, message_nbytes: int) -> bool:
"""Would asking to gather this task exceed transfer limits?
Parameters
----------
ts
Candidate task for gathering
message_nbytes
Total number of bytes already scheduled for gathering in this message
Returns
-------
exceeds_limit
True if gathering the task would exceed limits, False otherwise
(in which case the task can be gathered).
"""
if self.transfer_incoming_bytes == 0 and message_nbytes == 0:
# When there is no other traffic, the top-priority task is fetched
# regardless of its size to ensure progress
return False
incoming_bytes_allowance = (
self.transfer_incoming_bytes_limit - self.transfer_incoming_bytes
)
# If message_nbytes == 0, i.e., this is the first task to gather in this
# message, ignore `self.transfer_message_bytes_limit` for the top-priority
# task to ensure progress. Otherwise:
if message_nbytes != 0:
incoming_bytes_allowance = (
min(
incoming_bytes_allowance,
self.transfer_message_bytes_limit,
)
- message_nbytes
)
return ts.get_nbytes() > incoming_bytes_allowance
def _ensure_computing(self) -> RecsInstrs:
if not self.running:
return {}, []
recs: Recs = {}
while len(self.executing) < self.nthreads:
ts = self._next_ready_task()
if not ts:
break
if self.validate:
assert ts.state in READY
assert ts not in recs
recs[ts] = "executing"
self._acquire_resources(ts)
self.executing.add(ts)
return recs, []
def _next_ready_task(self) -> TaskState | None:
"""Pop the top-priority task from self.ready or self.constrained"""
if self.ready and self.constrained:
tsr = self.ready.peek()
tsc = self.constrained.peek()
assert tsr.priority
assert tsc.priority
if tsc.priority < tsr.priority and self._resource_restrictions_satisfied(
tsc
):
return self.constrained.pop()
else:
return self.ready.pop()
elif self.ready:
return self.ready.pop()
elif self.constrained:
tsc = self.constrained.peek()
if self._resource_restrictions_satisfied(tsc):
return self.constrained.pop()
return None
def _get_task_finished_msg(
self,
ts: TaskState,
run_id: int,
stimulus_id: str,
) -> TaskFinishedMsg:
if self.validate:
assert ts.state == "memory"
assert ts.key in self.data or ts.key in self.actors
assert ts.type is not None
assert ts.nbytes is not None
try:
type_serialized = pickle.dumps(ts.type)
except Exception:
# Some types fail pickling (example: _thread.lock objects);
# send their name as a best effort.
type_serialized = pickle.dumps(typename(ts.type))
return TaskFinishedMsg(
key=ts.key,
run_id=run_id,
nbytes=ts.nbytes,
type=type_serialized,
typename=typename(ts.type),
metadata=ts.metadata,
thread=self.threads.get(ts.key),
startstops=ts.startstops,
stimulus_id=stimulus_id,
)
###############
# Transitions #
###############
def _transition_generic_fetch(self, ts: TaskState, stimulus_id: str) -> RecsInstrs:
if not ts.who_has:
return {ts: "missing"}, []
ts.state = "fetch"
ts.done = False
self.fetch_count += 1
assert ts.priority
for w in ts.who_has:
self.data_needed[w].add(ts)
return {}, []
def _transition_missing_waiting(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
self.missing_dep_flight.discard(ts)
self._purge_state(ts)
return self._transition_released_waiting(ts, stimulus_id=stimulus_id)
def _transition_missing_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "missing"
if not ts.who_has:
return {}, []
self.missing_dep_flight.discard(ts)
return self._transition_generic_fetch(ts, stimulus_id=stimulus_id)
def _transition_missing_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
self.missing_dep_flight.discard(ts)
recs, instructions = self._transition_generic_released(
ts, stimulus_id=stimulus_id
)
assert ts.key in self.tasks
return recs, instructions
def _transition_flight_missing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
assert ts.done
return self._transition_generic_missing(ts, stimulus_id=stimulus_id)
def _transition_generic_missing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert not ts.who_has
ts.state = "missing"
self.missing_dep_flight.add(ts)
ts.done = False
return {}, []
def _transition_released_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "released"
return self._transition_generic_fetch(ts, stimulus_id=stimulus_id)
def _transition_generic_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
self._purge_state(ts)
recs: Recs = {}
for dependency in ts.dependencies:
if (
not dependency.waiters
and dependency.state not in READY | PROCESSING | {"memory"}
):
recs[dependency] = "released"
ts.state = "released"
if not ts.dependents:
recs[ts] = "forgotten"
return recs, []
def _transition_released_waiting(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert all(d.key in self.tasks for d in ts.dependencies)
recommendations: Recs = {}
ts.waiting_for_data.clear()
for dep_ts in ts.dependencies:
if dep_ts.state != "memory":
ts.waiting_for_data.add(dep_ts)
dep_ts.waiters.add(ts)
recommendations[dep_ts] = "fetch"
if not ts.waiting_for_data:
recommendations[ts] = "ready"
ts.state = "waiting"
self.waiting.add(ts)
return recommendations, []
def _transition_fetch_flight(
self, ts: TaskState, worker: str, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "fetch"
assert ts.who_has
# The task has already been removed by _ensure_communicating
for w in ts.who_has:
assert ts not in self.data_needed[w]
ts.done = False
ts.state = "flight"
ts.coming_from = worker
self.in_flight_tasks.add(ts)
self.fetch_count -= 1
return {}, []
def _transition_fetch_missing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
self.fetch_count -= 1
return self._transition_generic_missing(ts, stimulus_id=stimulus_id)
def _transition_fetch_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
self.fetch_count -= 1
return self._transition_generic_released(ts, stimulus_id=stimulus_id)
def _transition_memory_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
assert ts.nbytes is not None
self.nbytes -= ts.nbytes
recs, instructions = self._transition_generic_released(
ts, stimulus_id=stimulus_id
)
instructions.append(ReleaseWorkerDataMsg(key=ts.key, stimulus_id=stimulus_id))
return recs, instructions
def _transition_waiting_constrained(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts not in self.ready
assert ts not in self.constrained
ts.state = "constrained"
self.waiting.remove(ts)
self.constrained.add(ts)
return self._ensure_computing()
def _transition_executing_rescheduled(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""Note: this transition is triggered exclusively by a task raising the
Reschedule() Exception; it is not involved in work stealing.
"""
assert ts.done
return merge_recs_instructions(
({}, [RescheduleMsg(key=ts.key, stimulus_id=stimulus_id)]),
# Note: this is not the same as recommending {ts: "released"} on the
# previous line, as it would instead run the ("executing", "released")
# transition, which would need special code for ts.done=True.
self._transition_generic_released(ts, stimulus_id=stimulus_id),
)
def _transition_waiting_ready(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "waiting"
assert ts not in self.ready
assert ts not in self.constrained
assert not ts.waiting_for_data
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
assert dep.state == "memory"
if ts.resource_restrictions:
return {ts: "constrained"}, []
ts.state = "ready"
assert ts.priority is not None
self.waiting.remove(ts)
self.ready.add(ts)
return self._ensure_computing()
def _transition_generic_error(
self,
ts: TaskState,
exception: Serialize,
traceback: Serialize | None,
exception_text: str,
traceback_text: str,
run_id: int,
*,
stimulus_id: str,
) -> RecsInstrs:
ts.exception = exception
ts.traceback = traceback
ts.exception_text = exception_text
ts.traceback_text = traceback_text
ts.state = "error"
smsg = TaskErredMsg.from_task(
ts,
run_id=run_id,
stimulus_id=stimulus_id,
thread=self.threads.get(ts.key),
)
return {}, [smsg]
def _transition_resumed_error(
self,
ts: TaskState,
exception: Serialize,
traceback: Serialize | None,
exception_text: str,
traceback_text: str,
run_id: int,
*,
stimulus_id: str,
) -> RecsInstrs:
"""In case of failure of the previous state, discard the error and kick off the
next state without informing the scheduler
"""
assert ts.done
if ts.previous in ("executing", "long-running"):
assert ts.next == "fetch"
recs: Recs = {ts: "fetch"}
else:
assert ts.previous == "flight"
assert ts.next == "waiting"
recs = {ts: "waiting"}
ts.state = "released"
ts.done = False
ts.previous = None
ts.next = None
return recs, []
def _transition_resumed_rescheduled(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""If the task raises the Reschedule() exception, but the scheduler already told
the worker to fetch it somewhere else, silently transition to fetch.
Note that this transition effectively duplicates the logic of
_transition_resumed_error.
"""
assert ts.done
assert ts.previous in ("executing", "long-running")
assert ts.next == "fetch"
ts.state = "released"
ts.done = False
ts.previous = None
ts.next = None
return {ts: "fetch"}, []
def _transition_resumed_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""
See also
--------
_transition_cancelled_fetch
_transition_cancelled_waiting
_transition_flight_fetch
"""
if ts.previous == "flight":
if self.validate:
assert ts.next == "waiting"
if ts.done:
# We arrived here either from GatherDepNetworkFailureEvent or from
# GatherDepSuccessEvent but without the key in the data attribute.
# We would now normally try to fetch the task from another peer worker
# or transition it to missing if none are left; here instead we're going
# to compute the task as we had been asked by the scheduler.
ts.state = "released"
ts.done = False
ts.previous = None
ts.next = None
return {ts: "waiting"}, []
else:
# We're back where we started. We should forget about the entire
# cancellation attempt
ts.state = "flight"
ts.previous = None
ts.next = None
elif self.validate:
assert ts.previous in ("executing", "long-running")
assert ts.next == "fetch"
# None of the exit events of execute recommend a transition to fetch
assert not ts.done
return {}, []
def _transition_resumed_missing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
return {ts: "fetch"}, []
def _transition_resumed_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
# None of the exit events of execute or gather_dep recommend a transition to
# released
assert not ts.done
ts.state = "cancelled"
ts.next = None
return {}, []
def _transition_cancelled_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""
See also
--------
_transition_cancelled_waiting
_transition_resumed_fetch
"""
if ts.previous == "flight":
if ts.done:
# gather_dep just completed for a cancelled task.
# Discard output and possibly forget
return {ts: "released"}, []
else:
# Forget the task was cancelled to begin with
ts.state = "flight"
ts.previous = None
return {}, []
else:
assert ts.previous in ("executing", "long-running")
# None of the exit events of execute recommend a transition to fetch
assert not ts.done
ts.state = "resumed"
ts.next = "fetch"
return {}, []
def _transition_cancelled_waiting(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""
See also
--------
_transition_cancelled_fetch
_transition_cancelled_or_resumed_long_running
_transition_resumed_fetch
"""
# None of the exit events of gather_dep or execute recommend a transition to
# waiting
assert not ts.done
if ts.previous == "executing":
# Forget the task was cancelled to begin with
ts.state = "executing"
ts.previous = None
return {}, []
elif ts.previous == "long-running":
# Forget the task was cancelled to begin with, and inform the scheduler
# in arrears that it has seceded.
# Note that, if the task seceded before it was cancelled, this will cause
# the message to be sent twice.
ts.state = "long-running"
ts.previous = None
smsg = LongRunningMsg(
key=ts.key,
run_id=ts.run_id,
compute_duration=None,
stimulus_id=stimulus_id,
)
return {}, [smsg]
else:
assert ts.previous == "flight"
ts.state = "resumed"
ts.next = "waiting"
return {}, []
def _transition_cancelled_released(
self,
ts: TaskState,
*args: Any, # extra arguments of transitions to memory or error - ignored
stimulus_id: str,
) -> RecsInstrs:
if not ts.done:
return {}, []
ts.previous = None
ts.done = False
return self._transition_generic_released(ts, stimulus_id=stimulus_id)
def _transition_executing_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
"""We can't stop executing a task just because the scheduler asked us to,
so we're entering cancelled state and waiting until it completes.
"""
if self.validate:
assert ts.state in ("executing", "long-running")
assert not ts.next
assert not ts.done
ts.previous = cast(Literal["executing", "long-running"], ts.state)
ts.state = "cancelled"
return {}, []
def _transition_constrained_executing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "constrained"
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts not in self.ready
assert ts not in self.constrained
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
ts.state = "executing"
instr = Execute(key=ts.key, stimulus_id=stimulus_id)
return {}, [instr]
def _transition_ready_executing(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
if self.validate:
assert ts.state == "ready"
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts not in self.ready
assert ts not in self.constrained
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
ts.state = "executing"
instr = Execute(key=ts.key, stimulus_id=stimulus_id)
return {}, [instr]
def _transition_flight_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
# If this transition is called after the flight coroutine has finished,
# we can reset the task and transition to fetch again. If it is not yet
# finished, this should be a no-op
if not ts.done:
return {}, []
return self._transition_generic_fetch(ts, stimulus_id=stimulus_id)
def _transition_flight_released(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
# None of the exit events of gather_dep recommend a transition to released
assert not ts.done
ts.previous = "flight"
ts.next = None
# See https://github.com/dask/distributed/pull/5046#discussion_r685093940
ts.state = "cancelled"
return {}, []
def _transition_executing_long_running(
self, ts: TaskState, compute_duration: float, *, stimulus_id: str
) -> RecsInstrs:
"""
See also
--------
_transition_cancelled_or_resumed_long_running
"""
ts.state = "long-running"
self.executing.discard(ts)
self.long_running.add(ts)
smsg = LongRunningMsg(
key=ts.key,
run_id=ts.run_id,
compute_duration=compute_duration,
stimulus_id=stimulus_id,
)
return merge_recs_instructions(
({}, [smsg]),
self._ensure_computing(),
)
def _transition_cancelled_or_resumed_long_running(
self, ts: TaskState, compute_duration: float, *, stimulus_id: str
) -> RecsInstrs:
"""Handles transitions:
- cancelled(executing) -> long-running
- cancelled(long-running) -> long-running (user called secede() twice)
- resumed(executing->fetch) -> long-running
- resumed(long-running->fetch) -> long-running (user called secede() twice)
Unlike in the executing->long_running transition, do not send LongRunningMsg.
From the scheduler's perspective, this task no longer exists (cancelled) or is
in memory on another worker (resumed). So it shouldn't hear about it.
Instead, we're going to send the LongRunningMsg when and if the task
transitions back to waiting.
See also
--------
_transition_executing_long_running
_transition_cancelled_waiting
"""
assert ts.previous in ("executing", "long-running")
ts.previous = "long-running"
self.executing.discard(ts)
self.long_running.add(ts)
return self._ensure_computing()
def _transition_executing_memory(
self, ts: TaskState, value: object, run_id: int, *, stimulus_id: str
) -> RecsInstrs:
"""This transition is *normally* triggered by ExecuteSuccessEvent.
However, beware that it can also be triggered by scatter().
"""
return self._transition_to_memory(
ts, value, "task-finished", run_id=run_id, stimulus_id=stimulus_id
)
def _transition_released_memory(
self, ts: TaskState, value: object, run_id: int, *, stimulus_id: str
) -> RecsInstrs:
"""This transition is triggered by scatter().
This transition does not send any message back to the scheduler, because the
scheduler doesn't know this key exists yet.
"""
return self._transition_to_memory(
ts, value, False, run_id=RUN_ID_SENTINEL, stimulus_id=stimulus_id
)
def _transition_flight_memory(
self, ts: TaskState, value: object, run_id: int, *, stimulus_id: str
) -> RecsInstrs:
"""This transition is *normally* triggered by GatherDepSuccessEvent.
However, beware that it can also be triggered by scatter().
"""
return self._transition_to_memory(
ts, value, "add-keys", run_id=RUN_ID_SENTINEL, stimulus_id=stimulus_id
)
def _transition_resumed_memory(
self, ts: TaskState, value: object, run_id: int, *, stimulus_id: str
) -> RecsInstrs:
"""Normally, we send to the scheduler a 'task-finished' message for a completed
execution and 'add-data' for a completed replication from another worker. The
scheduler's reaction to the two messages is fundamentally different; namely,
add-data is only admissible for tasks that are already in memory on another
worker, and won't trigger transitions.
In the case of resumed tasks, the scheduler's expectation is set by ts.next -
which means, the opposite of what the worker actually just completed.
"""
msg_type: Literal["add-keys", "task-finished"]
if ts.previous in ("executing", "long-running"):
assert ts.next == "fetch"
msg_type = "add-keys"
else:
assert ts.previous == "flight"
assert ts.next == "waiting"
msg_type = "task-finished"
ts.previous = None
ts.next = None
return self._transition_to_memory(
ts, value, msg_type, run_id=run_id, stimulus_id=stimulus_id
)
def _transition_to_memory(
self,
ts: TaskState,
value: object,
msg_type: Literal[False, "add-keys", "task-finished"],
run_id: int,
*,
stimulus_id: str,
) -> RecsInstrs:
"""Insert a task's output in self.data and set the state to memory.
This method is the one and only place where keys are inserted in self.data.
There are three ways to get here:
1. task execution just terminated successfully. Initial state is one of
- executing
- long-running
- resumed(prev=executing next=fetch)
- resumed(prev=long-running next=fetch)
2. transfer from another worker terminated successfully. Initial state is
- flight
- resumed(prev=flight next=waiting)
3. scatter. In this case *normally* the task is in released state, but nothing
stops a client to scatter a key while is in any other state; these race
conditions are not well tested and are expected to misbehave.
"""
recommendations: Recs = {}
instructions: Instructions = []
if self.validate:
assert ts.key not in self.data
assert ts.state != "memory"
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
try:
self.data[ts.key] = value
except Exception as e:
# distributed.worker.memory.target is enabled and the value is
# individually larger than target * memory_limit.
# Inserting this task in the SpillBuffer caused it to immediately
# spilled to disk, and it failed to serialize.
# Third-party MutableMappings (dask-cuda etc.) may have other use cases
# for this.
msg = error_message(e)
return {ts: tuple(msg.values()) + (run_id,)}, []
stop = time()
if stop - start > 0.005:
# The SpillBuffer has spilled this task (if larger than target) or other
# tasks (if smaller than target) to disk.
# Alternatively, a third-party MutableMapping may have spent time in
# other activities, e.g. transferring data between GPGPU and system
# memory.
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
ts.state = "memory"
if ts.nbytes is None:
ts.nbytes = sizeof(value)
self.nbytes += ts.nbytes
ts.type = type(value)
for dep in ts.dependents:
dep.waiting_for_data.discard(ts)
if not dep.waiting_for_data and dep.state == "waiting":
recommendations[dep] = "ready"
self.log.append((ts.key, "put-in-memory", stimulus_id, time()))
# NOTE: The scheduler's reaction to these two messages is fundamentally
# different. Namely, add-keys is only admissible for tasks that are already in
# memory on another worker, and won't trigger transitions.
if msg_type == "add-keys":
instructions.append(AddKeysMsg(keys=[ts.key], stimulus_id=stimulus_id))
elif msg_type == "task-finished":
assert run_id != RUN_ID_SENTINEL
instructions.append(
self._get_task_finished_msg(
ts,
run_id=run_id,
stimulus_id=stimulus_id,
)
)
else:
# This happens on scatter(), where the scheduler doesn't know yet that the
# key exists.
assert msg_type is False
return recommendations, instructions
def _transition_released_forgotten(
self, ts: TaskState, *, stimulus_id: str
) -> RecsInstrs:
recommendations: Recs = {}
# Dependents _should_ be released by the scheduler before this
if self.validate:
assert not any(d.state != "forgotten" for d in ts.dependents)
for dep in ts.dependencies:
dep.dependents.discard(ts)
if dep.state == "released" and not dep.dependents:
recommendations[dep] = "forgotten"
self._purge_state(ts)
# Mark state as forgotten in case it is still referenced
ts.state = "forgotten"
self.tasks.pop(ts.key, None)
return recommendations, []
# {
# (start, finish):
# transition_<start>_<finish>(
# self, ts: TaskState, *args, stimulus_id: str
# ) -> (recommendations, instructions)
# }
_TRANSITIONS_TABLE: ClassVar[
Mapping[tuple[TaskStateState, TaskStateState], Callable[..., RecsInstrs]]
] = {
("cancelled", "error"): _transition_cancelled_released,
("cancelled", "fetch"): _transition_cancelled_fetch,
("cancelled", "long-running"): _transition_cancelled_or_resumed_long_running,
("cancelled", "memory"): _transition_cancelled_released,
("cancelled", "missing"): _transition_cancelled_released,
("cancelled", "released"): _transition_cancelled_released,
("cancelled", "rescheduled"): _transition_cancelled_released,
("cancelled", "waiting"): _transition_cancelled_waiting,
("resumed", "error"): _transition_resumed_error,
("resumed", "fetch"): _transition_resumed_fetch,
("resumed", "long-running"): _transition_cancelled_or_resumed_long_running,
("resumed", "memory"): _transition_resumed_memory,
("resumed", "released"): _transition_resumed_released,
("resumed", "rescheduled"): _transition_resumed_rescheduled,
("constrained", "executing"): _transition_constrained_executing,
("constrained", "released"): _transition_generic_released,
("error", "released"): _transition_generic_released,
("executing", "error"): _transition_generic_error,
("executing", "long-running"): _transition_executing_long_running,
("executing", "memory"): _transition_executing_memory,
("executing", "released"): _transition_executing_released,
("executing", "rescheduled"): _transition_executing_rescheduled,
("fetch", "flight"): _transition_fetch_flight,
("fetch", "missing"): _transition_fetch_missing,
("fetch", "released"): _transition_fetch_released,
("flight", "error"): _transition_generic_error,
("flight", "fetch"): _transition_flight_fetch,
("flight", "memory"): _transition_flight_memory,
("flight", "missing"): _transition_flight_missing,
("flight", "released"): _transition_flight_released,
("long-running", "error"): _transition_generic_error,
("long-running", "memory"): _transition_executing_memory,
("long-running", "rescheduled"): _transition_executing_rescheduled,
("long-running", "released"): _transition_executing_released,
("memory", "released"): _transition_memory_released,
("missing", "error"): _transition_generic_error,
("missing", "fetch"): _transition_missing_fetch,
("missing", "released"): _transition_missing_released,
("missing", "waiting"): _transition_missing_waiting,
("ready", "executing"): _transition_ready_executing,
("ready", "released"): _transition_generic_released,
("released", "error"): _transition_generic_error,
("released", "fetch"): _transition_released_fetch,
("released", "forgotten"): _transition_released_forgotten,
("released", "memory"): _transition_released_memory,
("released", "missing"): _transition_generic_missing,
("released", "waiting"): _transition_released_waiting,
("waiting", "constrained"): _transition_waiting_constrained,
("waiting", "ready"): _transition_waiting_ready,
("waiting", "released"): _transition_generic_released,
}
def _notify_plugins(self, method_name: str, *args: Any, **kwargs: Any) -> None:
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception", name, exc_info=True
)
def _transition(
self,
ts: TaskState,
finish: TaskStateState | tuple,
*args: Any,
stimulus_id: str,
) -> RecsInstrs:
"""Transition a key from its current state to the finish state
See Also
--------
Worker.transitions: wrapper around this method
"""
if isinstance(finish, tuple):
# the concatenated transition path might need to access the tuple
assert not args
args = finish[1:]
finish = cast(TaskStateState, finish[0])
if ts.state == finish:
return {}, []
start = ts.state
func = self._TRANSITIONS_TABLE.get((start, finish))
# Notes:
# - in case of transition through released, this counter is incremented by 2
# - this increase happens before the actual transitions, so that it can
# catch potential infinite recursions
self.transition_counter += 1
if (
self.transition_counter_max
and self.transition_counter >= self.transition_counter_max
):
raise TransitionCounterMaxExceeded(ts.key, start, finish, self.story(ts))
if func is not None:
recs, instructions = func(self, ts, *args, stimulus_id=stimulus_id)
self._notify_plugins("transition", ts.key, start, finish)
elif "released" not in (start, finish):
# start -> "released" -> finish
try:
recs, instructions = self._transition(
ts, "released", stimulus_id=stimulus_id
)
v_state: TaskStateState
v_args: list | tuple
while v := recs.pop(ts, None):
if isinstance(v, tuple):
v_state, *v_args = v
else:
v_state, v_args = v, ()
if v_state == "forgotten":
# We do not want to forget. The purpose of this
# transition path is to get to `finish`
continue
recs, instructions = merge_recs_instructions(
(recs, instructions),
self._transition(ts, v_state, *v_args, stimulus_id=stimulus_id),
)
recs, instructions = merge_recs_instructions(
(recs, instructions),
self._transition(ts, finish, *args, stimulus_id=stimulus_id),
)
except (InvalidTransition, RecommendationsConflict) as e:
raise InvalidTransition(ts.key, start, finish, self.story(ts)) from e
else:
raise InvalidTransition(ts.key, start, finish, self.story(ts))
self.log.append(
(
# key
ts.key,
# initial
start,
# recommended
finish,
# final
ts.state,
# new recommendations
{
ts.key: new[0] if isinstance(new, tuple) else new
for ts, new in recs.items()
},
stimulus_id,
time(),
)
)
return recs, instructions
def _resource_restrictions_satisfied(self, ts: TaskState) -> bool:
if not ts.resource_restrictions:
return True
return all(
self.available_resources[resource] >= needed
for resource, needed in ts.resource_restrictions.items()
)
def _acquire_resources(self, ts: TaskState) -> None:
if ts.resource_restrictions:
for resource, needed in ts.resource_restrictions.items():
self.available_resources[resource] -= needed
def _release_resources(self, ts: TaskState) -> None:
if ts.resource_restrictions:
for resource, needed in ts.resource_restrictions.items():
self.available_resources[resource] += needed
def _transitions(self, recommendations: Recs, *, stimulus_id: str) -> Instructions:
"""Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
instructions = []
tasks = set()
initial_states: dict[TaskState, TaskStateState] = {}
def process_recs(recs: Recs) -> None:
while recs:
ts, finish = recs.popitem()
tasks.add(ts)
initial_states.setdefault(ts, ts.state)
a_recs, a_instructions = self._transition(
ts, finish, stimulus_id=stimulus_id
)
recs.update(a_recs)
instructions.extend(a_instructions)
process_recs(recommendations.copy())
# We could call _ensure_communicating after we change something that could
# trigger a new call to gather_dep (e.g. on transitions to fetch,
# GatherDepDoneEvent, or RetryBusyWorkerEvent). However, doing so we'd
# potentially call it too early, before all tasks have transitioned to fetch.
# This in turn would hurt aggregation of multiple tasks into a single GatherDep
# instruction.
# Read: https://github.com/dask/distributed/issues/6497
a_recs, a_instructions = self._ensure_communicating(stimulus_id=stimulus_id)
instructions += a_instructions
process_recs(a_recs)
self.task_counter.transitions(initial_states)
if self.validate:
# Full state validation is very expensive
for ts in tasks:
self.validate_task(ts)
return instructions
##########
# Events #
##########
@singledispatchmethod
def _handle_event(self, ev: StateMachineEvent) -> RecsInstrs:
raise TypeError(ev) # pragma: nocover
@_handle_event.register
def _handle_update_data(self, ev: UpdateDataEvent) -> RecsInstrs:
recommendations: Recs = {}
for key, value in ev.data.items():
try:
ts = self.tasks[key]
except KeyError:
self.tasks[key] = ts = TaskState(key)
self.task_counter.new_task(ts)
recommendations[ts] = ("memory", value, RUN_ID_SENTINEL)
self.log.append(
(key, "receive-from-scatter", ts.state, ev.stimulus_id, time())
)
return recommendations, []
@_handle_event.register
def _handle_free_keys(self, ev: FreeKeysEvent) -> RecsInstrs:
"""Handler to be called by the scheduler.
The given keys are no longer referred to and required by the scheduler.
The worker is now allowed to release the key, if applicable.
This does not guarantee that the memory is released since the worker may
still decide to hold on to the data and task since it is required by an
upstream dependency.
"""
self.log.append(("free-keys", ev.keys, ev.stimulus_id, time()))
recommendations: Recs = {}
for key in ev.keys:
ts = self.tasks.get(key)
if ts:
recommendations[ts] = "released"
return recommendations, []
@_handle_event.register
def _handle_remove_replicas(self, ev: RemoveReplicasEvent) -> RecsInstrs:
"""Stream handler notifying the worker that it might be holding unreferenced,
superfluous data.
This should not actually happen during ordinary operations and is only intended
to correct any erroneous state. An example where this is necessary is if a
worker fetches data for a downstream task but that task is released before the
data arrives. In this case, the scheduler will notify the worker that it may be
holding this unnecessary data, if the worker hasn't released the data itself,
already.
This handler only releases tasks that are indeed in state memory.
For stronger guarantees, see handler free_keys
"""
recommendations: Recs = {}
instructions: Instructions = []
for key in ev.keys:
ts = self.tasks.get(key)
if ts is None or ts.state != "memory":
continue
# If the task is still in executing or long-running, the scheduler
# should never have asked the worker to drop this key.
# We cannot simply forget it because there is a time window between
# setting the state to executing/long-running and
# preparing/collecting the data for the task.
# If a dependency was released during this time, this would pop up
# as a KeyError during execute which is hard to understand
for dep in ts.dependents:
if dep.state in ("executing", "long-running"):
raise RuntimeError(
f"Cannot remove replica of {ts.key!r} while {dep.key!r} in state {dep.state!r}."
) # pragma: no cover
self.log.append((ts.key, "remove-replica", ev.stimulus_id, time()))
recommendations[ts] = "released"
return recommendations, instructions
@_handle_event.register
def _handle_acquire_replicas(self, ev: AcquireReplicasEvent) -> RecsInstrs:
if self.validate:
assert ev.who_has.keys() == ev.nbytes.keys()
assert all(ev.who_has.values())
recommendations: Recs = {}
for key, nbytes in ev.nbytes.items():
ts = self._ensure_task_exists(
key=key,
# Transfer this data after all dependency tasks of computations with
# default or explicitly high (>0) user priority and before all
# computations with low priority (<0). Note that the priority= parameter
# of compute() is multiplied by -1 before it reaches TaskState.priority.
priority=(1,),
stimulus_id=ev.stimulus_id,
)
if ts.state != "memory":
ts.nbytes = nbytes
recommendations[ts] = "fetch"
self._update_who_has(ev.who_has)
return recommendations, []
@_handle_event.register
def _handle_compute_task(self, ev: ComputeTaskEvent) -> RecsInstrs:
try:
ts = self.tasks[ev.key]
logger.debug(
"Asked to compute an already known task %s",
{"task": ts, "stimulus_id": ev.stimulus_id},
)
except KeyError:
self.tasks[ev.key] = ts = TaskState(ev.key)
self.task_counter.new_task(ts)
self.log.append((ev.key, "compute-task", ts.state, ev.stimulus_id, time()))
ts.run_id = ev.run_id
recommendations: Recs = {}
instructions: Instructions = []
if ts.state in READY | {
"executing",
"long-running",
"waiting",
}:
pass
elif ts.state == "memory":
instructions.append(
self._get_task_finished_msg(
ts, run_id=ev.run_id, stimulus_id=ev.stimulus_id
)
)
elif ts.state in {
"released",
"fetch",
"flight",
"missing",
"cancelled",
"resumed",
"error",
}:
recommendations[ts] = "waiting"
ts.run_spec = ev.run_spec
priority = ev.priority + (self.generation,)
self.generation -= 1
if ev.actor:
self.actors[ts.key] = None
ts.exception = None
ts.traceback = None
ts.exception_text = ""
ts.traceback_text = ""
ts.priority = priority
ts.annotations = ev.annotations
ts.span_id = ev.span_id
# If we receive ComputeTaskEvent twice for the same task, resources may have
# changed, but the task is still running. Preserve the previous resource
# restrictions so that they can be properly released when it eventually
# completes.
if not (
ts.state in ("cancelled", "resumed")
and ts.previous in ("executing", "long-running")
):
ts.resource_restrictions = ev.resource_restrictions
if self.validate:
assert ev.who_has.keys() == ev.nbytes.keys()
for dep_workers in ev.who_has.values():
assert dep_workers
assert len(dep_workers) == len(set(dep_workers))
for dep_key, nbytes in ev.nbytes.items():
dep_ts = self._ensure_task_exists(
key=dep_key,
priority=priority,
stimulus_id=ev.stimulus_id,
)
if dep_ts.state != "memory":
dep_ts.nbytes = nbytes
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
self._update_who_has(ev.who_has)
else:
raise RuntimeError( # pragma: nocover
f"Unexpected task state encountered for {ts}; "
f"stimulus_id={ev.stimulus_id}; story={self.story(ts)}"
)
return recommendations, instructions
def _gather_dep_done_common(self, ev: GatherDepDoneEvent) -> Iterator[TaskState]:
"""Common code for the handlers of all subclasses of GatherDepDoneEvent.
Yields the tasks that need to transition out of flight.
The task states can be flight, cancelled, or resumed, but in case of scatter()
they can also be in memory or error states.
See also
--------
_execute_done_common
"""
self.transfer_incoming_bytes -= ev.total_nbytes
keys = self.in_flight_workers.pop(ev.worker)
for key in keys:
ts = self.tasks[key]
ts.done = True
ts.coming_from = None
self.in_flight_tasks.remove(ts)
yield ts
@_handle_event.register
def _handle_gather_dep_success(self, ev: GatherDepSuccessEvent) -> RecsInstrs:
"""gather_dep terminated successfully.
The response may contain fewer keys than the request.
"""
recommendations: Recs = {}
for ts in self._gather_dep_done_common(ev):
if ts.key in ev.data:
recommendations[ts] = ("memory", ev.data[ts.key], ts.run_id)
else:
self.log.append((ts.key, "missing-dep", ev.stimulus_id, time()))
if self.validate:
assert ts.state != "fetch"
assert ts not in self.data_needed[ev.worker]
ts.who_has.discard(ev.worker)
if ev.worker in self.has_what:
self.has_what[ev.worker].discard(ts.key)
recommendations[ts] = "fetch"
return recommendations, []
@_handle_event.register
def _handle_gather_dep_busy(self, ev: GatherDepBusyEvent) -> RecsInstrs:
"""gather_dep terminated: remote worker is busy"""
# Avoid hammering the worker. If there are multiple replicas
# available, immediately try fetching from a different worker.
self.busy_workers.add(ev.worker)
recommendations: Recs = {}
refresh_who_has = []
for ts in self._gather_dep_done_common(ev):
recommendations[ts] = "fetch"
if not ts.who_has - self.busy_workers:
refresh_who_has.append(ts.key)
instructions: Instructions = [
RetryBusyWorkerLater(worker=ev.worker, stimulus_id=ev.stimulus_id),
]
if refresh_who_has:
# All workers that hold known replicas of our tasks are busy.
# Try querying the scheduler for unknown ones.
instructions.append(
RequestRefreshWhoHasMsg(
keys=refresh_who_has, stimulus_id=ev.stimulus_id
)
)
return recommendations, instructions
@_handle_event.register
def _handle_gather_dep_network_failure(
self, ev: GatherDepNetworkFailureEvent
) -> RecsInstrs:
"""gather_dep terminated: network failure while trying to
communicate with remote worker
Though the network failure could be transient, we assume it is not, and
preemptively act as though the other worker has died (including removing all
keys from it, even ones we did not fetch).
This optimization leads to faster completion of the fetch, since we immediately
either retry a different worker, or ask the scheduler to inform us of a new
worker if no other worker is available.
"""
recommendations: Recs = {}
for ts in self._gather_dep_done_common(ev):
self.log.append((ts.key, "missing-dep", ev.stimulus_id, time()))
recommendations[ts] = "fetch"
for ts in self.data_needed.pop(ev.worker, ()):
if self.validate:
assert ts.state == "fetch"
assert ev.worker in ts.who_has
if ts.who_has == {ev.worker}:
# This can override a recommendation from the previous for loop
recommendations[ts] = "missing"
for key in self.has_what.pop(ev.worker, ()):
ts = self.tasks[key]
ts.who_has.remove(ev.worker)
return recommendations, []
@_handle_event.register
def _handle_gather_dep_failure(self, ev: GatherDepFailureEvent) -> RecsInstrs:
"""gather_dep terminated: generic error raised (not a network failure);
e.g. data failed to deserialize.
"""
recommendations: Recs = {
ts: (
"error",
ev.exception,
ev.traceback,
ev.exception_text,
ev.traceback_text,
ts.run_id,
)
for ts in self._gather_dep_done_common(ev)
}
return recommendations, []
@_handle_event.register
def _handle_remove_worker(self, ev: RemoveWorkerEvent) -> RecsInstrs:
recommendations: Recs = {}
for ts in self.data_needed.pop(ev.worker, ()):
if self.validate:
assert ts.state == "fetch"
assert ev.worker in ts.who_has
if ts.who_has == {ev.worker}:
# This can override a recommendation from the previous for loop
recommendations[ts] = "missing"
for key in self.has_what.pop(ev.worker, ()):
ts = self.tasks[key]
ts.who_has.remove(ev.worker)
return recommendations, []
@_handle_event.register
def _handle_secede(self, ev: SecedeEvent) -> RecsInstrs:
ts = self.tasks.get(ev.key)
if not ts:
return {}, []
return {ts: ("long-running", ev.compute_duration)}, []
@_handle_event.register
def _handle_steal_request(self, ev: StealRequestEvent) -> RecsInstrs:
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(ev.key)
state = ts.state if ts is not None else None
smsg = StealResponseMsg(key=ev.key, state=state, stimulus_id=ev.stimulus_id)
if state in READY | {"waiting"}:
# If task is marked as "constrained" we haven't yet assigned it an
# `available_resources` to run on, that happens in
# `_transition_constrained_executing`
assert ts
return {ts: "released"}, [smsg]
else:
return {}, [smsg]
@_handle_event.register
def _handle_pause(self, ev: PauseEvent) -> RecsInstrs:
"""Prevent any further tasks to be executed or gathered. Tasks that are
currently executing or in flight will continue to progress.
"""
self.running = False
return {}, []
@_handle_event.register
def _handle_unpause(self, ev: UnpauseEvent) -> RecsInstrs:
"""Emerge from paused status"""
self.running = True
return self._ensure_computing()
@_handle_event.register
def _handle_retry_busy_worker(self, ev: RetryBusyWorkerEvent) -> RecsInstrs:
self.busy_workers.discard(ev.worker)
return {}, []
@_handle_event.register
def _handle_cancel_compute(self, ev: CancelComputeEvent) -> RecsInstrs:
"""Cancel a task on a best-effort basis. This is only possible while a task
is in state `waiting` or `ready`; nothing will happen otherwise.
"""
ts = self.tasks.get(ev.key)
if not ts or ts.state not in READY | {"waiting"}:
return {}, []
self.log.append((ev.key, "cancel-compute", ev.stimulus_id, time()))
# All possible dependents of ts should not be in state Processing on
# scheduler side and therefore should not be assigned to a worker, yet.
assert not ts.dependents
return {ts: "released"}, []
def _execute_done_common(
self, ev: ExecuteDoneEvent
) -> tuple[TaskState, Recs, Instructions]:
"""Common code for the handlers of all subclasses of ExecuteDoneEvent.
The task state can be executing, cancelled, or resumed, but in case of scatter()
it can also be in memory or error state.
See also
--------
_gather_dep_done_common
"""
# key *must* be still in tasks - see _transition_released_forgotten
ts = self.tasks.get(ev.key)
assert ts, self.story(ev.key)
if self.validate:
assert (ts in self.executing) != (ts in self.long_running) # XOR
ts.done = True
self.executed_count += 1
self._release_resources(ts)
self.executing.discard(ts)
self.long_running.discard(ts)
recs, instr = self._ensure_computing()
assert ts not in recs
return ts, recs, instr
@_handle_event.register
def _handle_execute_success(self, ev: ExecuteSuccessEvent) -> RecsInstrs:
"""Task completed successfully"""
ts, recs, instr = self._execute_done_common(ev)
# This is used for scheduler-side occupancy heuristics; it's important that it
# does not contain overhead from the thread pool or the worker's event loop
# (which are not the task's fault and are unpredictable).
ts.startstops.append({"action": "compute", "start": ev.start, "stop": ev.stop})
ts.nbytes = ev.nbytes
ts.type = ev.type
recs[ts] = ("memory", ev.value, ev.run_id)
return recs, instr
@_handle_event.register
def _handle_execute_failure(self, ev: ExecuteFailureEvent) -> RecsInstrs:
"""Task execution failed"""
ts, recs, instr = self._execute_done_common(ev)
if ev.start is not None and ev.stop is not None:
ts.startstops.append(
{"action": "compute", "start": ev.start, "stop": ev.stop}
)
recs[ts] = (
"error",
ev.exception,
ev.traceback,
ev.exception_text,
ev.traceback_text,
ev.run_id,
)
return recs, instr
@_handle_event.register
def _handle_reschedule(self, ev: RescheduleEvent) -> RecsInstrs:
"""Task raised Reschedule() exception while it was running.
Note: this has nothing to do with work stealing, which instead causes a
FreeKeysEvent.
"""
ts, recs, instr = self._execute_done_common(ev)
recs[ts] = "rescheduled"
return recs, instr
@_handle_event.register
def _handle_find_missing(self, ev: FindMissingEvent) -> RecsInstrs:
if not self.missing_dep_flight:
return {}, []
if self.validate:
for ts in self.missing_dep_flight:
assert not ts.who_has, self.story(ts)
smsg = RequestRefreshWhoHasMsg(
keys=[ts.key for ts in self.missing_dep_flight],
stimulus_id=ev.stimulus_id,
)
return {}, [smsg]
@_handle_event.register
def _handle_refresh_who_has(self, ev: RefreshWhoHasEvent) -> RecsInstrs:
self._update_who_has(ev.who_has)
recommendations: Recs = {}
instructions: Instructions = []
for key in ev.who_has:
ts = self.tasks.get(key)
if not ts:
continue
if ts.who_has and ts.state == "missing":
recommendations[ts] = "fetch"
elif not ts.who_has and ts.state == "fetch":
recommendations[ts] = "missing"
# Note: if ts.who_has and ts.state == "fetch", we may have just acquired new
# replicas whereas all previously known workers are in flight or busy. We
# rely on _transitions to call _ensure_communicating every time, even in
# absence of recommendations, to potentially kick off a new call to
# gather_dep.
return recommendations, instructions
###############
# Diagnostics #
###############
def story(self, *keys_or_tasks_or_stimuli: str | Key | TaskState) -> list[tuple]:
"""Return all records from the transitions log involving one or more tasks or
stimulus_id's
"""
keys_or_stimuli = {
e.key if isinstance(e, TaskState) else e for e in keys_or_tasks_or_stimuli
}
return worker_story(keys_or_stimuli, self.log)
def stimulus_story(
self, *keys_or_tasks: Key | TaskState
) -> list[StateMachineEvent]:
"""Return all state machine events involving one or more tasks"""
keys = {e.key if isinstance(e, TaskState) else e for e in keys_or_tasks}
return [ev for ev in self.stimulus_log if getattr(ev, "key", None) in keys]
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Client.dump_cluster_state
distributed.utils.recursive_to_dict
"""
info = {
"address": self.address,
"nthreads": self.nthreads,
"running": self.running,
"ready": [ts.key for ts in self.ready.sorted()],
"constrained": [ts.key for ts in self.constrained.sorted()],
"data": dict.fromkeys(self.data),
"data_needed": {
w: [ts.key for ts in tss.sorted()]
for w, tss in self.data_needed.items()
},
"executing": {ts.key for ts in self.executing},
"has_what": dict(self.has_what),
"long_running": {ts.key for ts in self.long_running},
"in_flight_tasks": {ts.key for ts in self.in_flight_tasks},
"in_flight_workers": self.in_flight_workers,
"missing_dep_flight": [ts.key for ts in self.missing_dep_flight],
"busy_workers": self.busy_workers,
"log": self.log,
"stimulus_log": self.stimulus_log,
"transition_counter": self.transition_counter,
"tasks": self.tasks,
"task_counts": dict(self.task_counter.current_count()),
"task_cumulative_elapsed": dict(self.task_counter.cumulative_elapsed()),
}
info = {k: v for k, v in info.items() if k not in exclude}
return recursive_to_dict(info, exclude=exclude)
##############
# Validation #
##############
def _validate_task_memory(self, ts: TaskState) -> None:
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
def _validate_task_executing(self, ts: TaskState) -> None:
"""Validate tasks:
- ts.state == executing
- ts.state == long-running
- ts.state == cancelled, ts.previous == executing
- ts.state == cancelled, ts.previous == long-running
- ts.state == resumed, ts.previous == executing, ts.next == fetch
- ts.state == resumed, ts.previous == long-running, ts.next == fetch
"""
if ts.state == "executing" or ts.previous == "executing":
assert ts in self.executing
assert ts not in self.long_running
else:
assert ts.state == "long-running" or ts.previous == "long-running"
assert ts not in self.executing
assert ts in self.long_running
assert ts.run_spec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
# FIXME https://github.com/dask/distributed/issues/6893
# This assertion can be false for
# - cancelled or resumed tasks
# - executing tasks which used to be cancelled in the past
# for dep in ts.dependencies:
# assert dep.state == "memory", self.story(dep)
# assert dep.key in self.data or dep.key in self.actors
def _validate_task_ready(self, ts: TaskState) -> None:
"""Validate tasks:
- ts.state == ready
- ts.state == constrained
"""
if ts.state == "ready":
assert not ts.resource_restrictions
assert ts in self.ready
assert ts not in self.constrained
else:
assert ts.resource_restrictions
assert ts.state == "constrained"
assert ts not in self.ready
assert ts in self.constrained
assert ts.key not in self.data
assert not ts.done
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def _validate_task_waiting(self, ts: TaskState) -> None:
assert ts.key not in self.data
assert not ts.done
assert ts in self.waiting
assert ts.waiting_for_data
assert ts.waiting_for_data == {
dep
for dep in ts.dependencies
if dep.key not in self.data and dep.key not in self.actors
}
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
def _validate_task_flight(self, ts: TaskState) -> None:
"""Validate tasks:
- ts.state == flight
- ts.state == cancelled, ts.previous == flight
- ts.state == resumed, ts.previous == flight, ts.next == waiting
"""
assert ts.key not in self.data
assert ts.key not in self.actors
assert ts in self.in_flight_tasks
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def _validate_task_fetch(self, ts: TaskState) -> None:
assert ts.key not in self.data
assert ts.key not in self.actors
assert self.address not in ts.who_has
assert not ts.done
assert ts.who_has
for w in ts.who_has:
assert ts.key in self.has_what[w]
assert ts in self.data_needed[w]
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
def _validate_task_missing(self, ts: TaskState) -> None:
assert ts.key not in self.data
assert ts.key not in self.actors
assert not ts.who_has
assert not ts.done
assert not any(ts.key in has_what for has_what in self.has_what.values())
assert ts in self.missing_dep_flight
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
def _validate_task_cancelled(self, ts: TaskState) -> None:
assert ts.next is None
if ts.previous in ("executing", "long-running"):
self._validate_task_executing(ts)
else:
assert ts.previous == "flight"
self._validate_task_flight(ts)
def _validate_task_resumed(self, ts: TaskState) -> None:
if ts.previous in ("executing", "long-running"):
assert ts.next == "fetch"
self._validate_task_executing(ts)
else:
assert ts.previous == "flight"
assert ts.next == "waiting"
self._validate_task_flight(ts)
for dep in ts.dependents:
assert dep not in self.ready
assert dep not in self.constrained
def _validate_task_released(self, ts: TaskState) -> None:
assert ts.key not in self.data
assert ts.key not in self.actors
assert not ts.next
assert not ts.previous
for tss in self.data_needed.values():
assert ts not in tss
assert ts not in self.executing
assert ts not in self.in_flight_tasks
assert ts not in self.missing_dep_flight
# The below assert statement is true most of the time. If a task performs the
# transition flight->cancel->waiting, its dependencies are normally in released
# state. However, the compute-task call for their previous dependent provided
# them with who_has, such that this assert is no longer true.
#
# assert not any(ts.key in has_what for has_what in self.has_what.values())
assert not ts.waiting_for_data
assert not ts.done
assert not ts.exception
assert not ts.traceback
def validate_task(self, ts: TaskState) -> None:
try:
if ts.key in self.tasks:
assert self.tasks[ts.key] is ts
if ts.state == "memory":
self._validate_task_memory(ts)
elif ts.state == "waiting":
self._validate_task_waiting(ts)
elif ts.state == "missing":
self._validate_task_missing(ts)
elif ts.state == "cancelled":
self._validate_task_cancelled(ts)
elif ts.state == "resumed":
self._validate_task_resumed(ts)
elif ts.state in ("ready", "constrained"):
self._validate_task_ready(ts)
elif ts.state in ("executing", "long-running"):
self._validate_task_executing(ts)
elif ts.state == "flight":
self._validate_task_flight(ts)
elif ts.state == "fetch":
self._validate_task_fetch(ts)
elif ts.state == "released":
self._validate_task_released(ts)
except Exception as e:
logger.exception(e)
raise InvalidTaskState(
key=ts.key, state=ts.state, story=self.story(ts)
) from e
def validate_state(self) -> None:
for ts in self.tasks.values():
# check that worker has task
for worker in ts.who_has:
assert worker != self.address
assert ts.key in self.has_what[worker]
# check that deps have a set state and that dependency<->dependent links
# are there
for dep in ts.dependencies:
# self.tasks was just a dict of tasks
# and this check was originally that the key was in `task_state`
# so we may have popped the key out of `self.tasks` but the
# dependency can still be in `memory` before GC grabs it...?
# Might need better bookkeeping
assert self.tasks[dep.key] is dep
assert ts in dep.dependents, self.story(ts)
for ts_wait in ts.waiting_for_data:
assert self.tasks[ts_wait.key] is ts_wait
assert ts_wait.state in WAITING_FOR_DATA, self.story(ts_wait)
for worker, keys in self.has_what.items():
assert worker != self.address
for k in keys:
assert k in self.tasks, self.story(k)
assert worker in self.tasks[k].who_has
# Test contents of the various sets of TaskState objects
fetch_tss = set()
for worker, tss in self.data_needed.items():
for ts in tss:
fetch_tss.add(ts)
assert ts.state == "fetch", self.story(ts)
assert worker in ts.who_has, f"{ts}; {ts.who_has=}"
assert len(fetch_tss) == self.fetch_count
for ts in self.missing_dep_flight:
assert ts.state == "missing", self.story(ts)
for ts in self.ready:
assert ts.state == "ready", self.story(ts)
for ts in self.constrained:
assert ts.state == "constrained", self.story(ts)
for ts in self.executing:
assert ts.state == "executing" or (
ts.state in ("cancelled", "resumed") and ts.previous == "executing"
), self.story(ts)
for ts in self.long_running:
assert ts.state == "long-running" or (
ts.state in ("cancelled", "resumed") and ts.previous == "long-running"
), self.story(ts)
for ts in self.in_flight_tasks:
assert ts.state == "flight" or (
ts.state in ("cancelled", "resumed") and ts.previous == "flight"
), self.story(ts)
for ts in self.waiting:
assert ts.state == "waiting", self.story(ts)
# Test that there aren't multiple TaskState objects with the same key in any
# Set[TaskState]. See note in TaskState.__hash__.
for ts in chain(
*self.data_needed.values(),
self.missing_dep_flight,
self.ready,
self.constrained,
self.in_flight_tasks,
self.executing,
self.long_running,
self.waiting,
):
assert self.tasks[ts.key] is ts, f"{self.tasks[ts.key]} is not {ts}"
expect_nbytes = sum(
self.tasks[key].nbytes or 0 for key in chain(self.data, self.actors)
)
assert self.nbytes == expect_nbytes, f"{self.nbytes=}; expected {expect_nbytes}"
for key in self.data:
assert key in self.tasks, self.story(key)
for key in self.actors:
assert key in self.tasks, self.story(key)
for ts in self.tasks.values():
self.validate_task(ts)
expect_state_count = Counter(
(ts.prefix, ts.state) for ts in self.tasks.values()
)
assert self.task_counter.current_count() == expect_state_count, (
self.task_counter.current_count(),
expect_state_count,
)
if self.transition_counter_max:
assert self.transition_counter < self.transition_counter_max
self._validate_resources()
def _validate_resources(self) -> None:
"""Assert that available_resources + resources held by tasks = total_resources"""
assert self.total_resources.keys() == self.available_resources.keys()
total = self.total_resources.copy()
for k, v in self.available_resources.items():
assert v > -1e-9, self.available_resources
total[k] -= v
for ts in self.all_running_tasks:
if ts.resource_restrictions:
for k, v in ts.resource_restrictions.items():
assert v >= 0, (ts, ts.resource_restrictions)
total[k] -= v
assert all((abs(v) < 1e-9) for v in total.values()), total
| WorkerState |
python | ipython__ipython | IPython/core/completer.py | {
"start": 30548,
"end": 47365
} | class ____(Configurable):
greedy = Bool(
False,
help="""Activate greedy completion.
.. deprecated:: 8.8
Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
When enabled in IPython 8.8 or newer, changes configuration as follows:
- ``Completer.evaluation = 'unsafe'``
- ``Completer.auto_close_dict_keys = True``
""",
).tag(config=True)
evaluation = Enum(
("forbidden", "minimal", "limited", "unsafe", "dangerous"),
default_value="limited",
help="""Policy for code evaluation under completion.
Successive options allow to enable more eager evaluation for better
completion suggestions, including for nested dictionaries, nested lists,
or even results of function calls.
Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
Allowed values are:
- ``forbidden``: no evaluation of code is permitted,
- ``minimal``: evaluation of literals and access to built-in namespace;
no item/attribute evaluation, no access to locals/globals,
no evaluation of any operations or comparisons.
- ``limited``: access to all namespaces, evaluation of hard-coded methods
(for example: :any:`dict.keys`, :any:`object.__getattr__`,
:any:`object.__getitem__`) on allow-listed objects (for example:
:any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
- ``unsafe``: evaluation of all methods and function calls but not of
syntax with side-effects like `del x`,
- ``dangerous``: completely arbitrary evaluation; does not support auto-import.
To override specific elements of the policy, you can use ``policy_overrides`` trait.
""",
).tag(config=True)
use_jedi = Bool(default_value=JEDI_INSTALLED,
help="Experimental: Use Jedi to generate autocompletions. "
"Default to True if jedi is installed.").tag(config=True)
jedi_compute_type_timeout = Int(default_value=400,
help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
performance by preventing jedi to build its cache.
""").tag(config=True)
debug = Bool(default_value=False,
help='Enable debug for the Completer. Mostly print extra '
'information for experimental jedi integration.')\
.tag(config=True)
backslash_combining_completions = Bool(True,
help="Enable unicode completions, e.g. \\alpha<tab> . "
"Includes completion of latex commands, unicode names, and expanding "
"unicode characters back to latex commands.").tag(config=True)
auto_close_dict_keys = Bool(
False,
help="""
Enable auto-closing dictionary keys.
When enabled string keys will be suffixed with a final quote
(matching the opening quote), tuple keys will also receive a
separating comma if needed, and keys which are final will
receive a closing bracket (``]``).
""",
).tag(config=True)
policy_overrides = DictTrait(
default_value={},
key_trait=Unicode(),
help="""Overrides for policy evaluation.
For example, to enable auto-import on completion specify:
.. code-block::
ipython --Completer.policy_overrides='{"allow_auto_import": True}' --Completer.use_jedi=False
""",
).tag(config=True)
@observe("evaluation")
def _evaluation_changed(self, _change):
_validate_policy_overrides(
policy_name=self.evaluation, policy_overrides=self.policy_overrides
)
@observe("policy_overrides")
def _policy_overrides_changed(self, _change):
_validate_policy_overrides(
policy_name=self.evaluation, policy_overrides=self.policy_overrides
)
auto_import_method = DottedObjectName(
default_value="importlib.import_module",
allow_none=True,
help="""\
Provisional:
This is a provisional API in IPython 9.3, it may change without warnings.
A fully qualified path to an auto-import method for use by completer.
The function should take a single string and return `ModuleType` and
can raise `ImportError` exception if module is not found.
The default auto-import implementation does not populate the user namespace with the imported module.
""",
).tag(config=True)
def __init__(self, namespace=None, global_namespace=None, **kwargs):
"""Create a new completer for the command line.
Completer(namespace=ns, global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = True
else:
self.use_main_ns = False
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
self.custom_matchers = []
super(Completer, self).__init__(**kwargs)
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text: str, context: Optional[CompletionContext] = None):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
matches = []
match_append = matches.append
n = len(text)
search_lists = [
keyword.kwlist,
builtin_mod.__dict__.keys(),
list(self.namespace.keys()),
list(self.global_namespace.keys()),
]
if context and context.full_text.count("\n") > 1:
# try to evaluate on full buffer
previous_lines = "\n".join(
context.full_text.split("\n")[: context.cursor_line]
)
if previous_lines:
all_code_lines_before_cursor = (
self._extract_code(previous_lines) + "\n" + text
)
context = EvaluationContext(
globals=self.global_namespace,
locals=self.namespace,
evaluation=self.evaluation,
auto_import=self._auto_import,
policy_overrides=self.policy_overrides,
)
try:
obj = guarded_eval(
all_code_lines_before_cursor,
context,
)
except Exception as e:
if self.debug:
warnings.warn(f"Evaluation exception {e}")
search_lists.append(list(context.transient_locals.keys()))
for lst in search_lists:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
shortened = {
"_".join([sub[0] for sub in word.split("_")]): word
for word in lst
if snake_case_re.match(word)
}
for word in shortened.keys():
if word[:n] == text and word != "__builtins__":
match_append(shortened[word])
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
return self._attr_matches(text)[0]
# we simple attribute matching with normal identifiers.
_ATTR_MATCH_RE = re.compile(r"(.+)\.(\w*)$")
def _strip_code_before_operator(self, code: str) -> str:
o_parens = {"(", "[", "{"}
c_parens = {")", "]", "}"}
# Dry-run tokenize to catch errors
try:
_ = list(tokenize.generate_tokens(iter(code.splitlines()).__next__))
except tokenize.TokenError:
# Try trimming the expression and retrying
trimmed_code = self._trim_expr(code)
try:
_ = list(
tokenize.generate_tokens(iter(trimmed_code.splitlines()).__next__)
)
code = trimmed_code
except tokenize.TokenError:
return code
tokens = _parse_tokens(code)
encountered_operator = False
after_operator = []
nesting_level = 0
for t in tokens:
if t.type == tokenize.OP:
if t.string in o_parens:
nesting_level += 1
elif t.string in c_parens:
nesting_level -= 1
elif t.string != "." and nesting_level == 0:
encountered_operator = True
after_operator = []
continue
if encountered_operator:
after_operator.append(t.string)
if encountered_operator:
return "".join(after_operator)
else:
return code
def _extract_code(self, line: str):
"""No-op in Completer, but can be used in subclasses to customise behaviour"""
return line
def _attr_matches(
self,
text: str,
include_prefix: bool = True,
context: Optional[CompletionContext] = None,
) -> tuple[Sequence[str], str]:
m2 = self._ATTR_MATCH_RE.match(text)
if not m2:
return [], ""
expr, attr = m2.group(1, 2)
try:
expr = self._strip_code_before_operator(expr)
except tokenize.TokenError:
pass
obj = self._evaluate_expr(expr)
if obj is not_found:
if context:
# try to evaluate on full buffer
previous_lines = "\n".join(
context.full_text.split("\n")[: context.cursor_line]
)
if previous_lines:
all_code_lines_before_cursor = (
self._extract_code(previous_lines) + "\n" + expr
)
obj = self._evaluate_expr(all_code_lines_before_cursor)
if obj is not_found:
return [], ""
if self.limit_to__all__ and hasattr(obj, '__all__'):
words = get__all__entries(obj)
else:
words = dir2(obj)
try:
words = generics.complete_object(obj, words)
except TryNext:
pass
except AssertionError:
raise
except Exception:
# Silence errors from completion function
pass
# Build match list to return
n = len(attr)
# Note: ideally we would just return words here and the prefix
# reconciliator would know that we intend to append to rather than
# replace the input text; this requires refactoring to return range
# which ought to be replaced (as does jedi).
if include_prefix:
tokens = _parse_tokens(expr)
rev_tokens = reversed(tokens)
skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
name_turn = True
parts = []
for token in rev_tokens:
if token.type in skip_over:
continue
if token.type == tokenize.NAME and name_turn:
parts.append(token.string)
name_turn = False
elif (
token.type == tokenize.OP and token.string == "." and not name_turn
):
parts.append(token.string)
name_turn = True
else:
# short-circuit if not empty nor name token
break
prefix_after_space = "".join(reversed(parts))
else:
prefix_after_space = ""
return (
["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
"." + attr,
)
def _trim_expr(self, code: str) -> str:
"""
Trim the code until it is a valid expression and not a tuple;
return the trimmed expression for guarded_eval.
"""
while code:
code = code[1:]
try:
res = ast.parse(code)
except SyntaxError:
continue
assert res is not None
if len(res.body) != 1:
continue
if not isinstance(res.body[0], ast.Expr):
continue
expr = res.body[0].value
if isinstance(expr, ast.Tuple) and not code[-1] == ")":
# we skip implicit tuple, like when trimming `fun(a,b`<completion>
# as `a,b` would be a tuple, and we actually expect to get only `b`
continue
return code
return ""
def _evaluate_expr(self, expr):
obj = not_found
done = False
while not done and expr:
try:
obj = guarded_eval(
expr,
EvaluationContext(
globals=self.global_namespace,
locals=self.namespace,
evaluation=self.evaluation,
auto_import=self._auto_import,
policy_overrides=self.policy_overrides,
),
)
done = True
except (SyntaxError, TypeError) as e:
if self.debug:
warnings.warn(f"Trimming because of {e}")
# TypeError can show up with something like `+ d`
# where `d` is a dictionary.
# trim the expression to remove any invalid prefix
# e.g. user starts `(d[`, so we get `expr = '(d'`,
# where parenthesis is not closed.
# TODO: make this faster by reusing parts of the computation?
expr = self._trim_expr(expr)
except Exception as e:
if self.debug:
warnings.warn(f"Evaluation exception {e}")
done = True
if self.debug:
warnings.warn(f"Resolved to {obj}")
return obj
@property
def _auto_import(self):
if self.auto_import_method is None:
return None
if not hasattr(self, "_auto_import_func"):
self._auto_import_func = import_item(self.auto_import_method)
return self._auto_import_func
def get__all__entries(obj):
"""returns the strings in the __all__ attribute"""
try:
words = getattr(obj, '__all__')
except Exception:
return []
return [w for w in words if isinstance(w, str)]
| Completer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 219224,
"end": 219616
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("end", "start")
end = sgqlc.types.Field(
sgqlc.types.non_null(CheckAnnotationPosition), graphql_name="end"
)
start = sgqlc.types.Field(
sgqlc.types.non_null(CheckAnnotationPosition), graphql_name="start"
)
| CheckAnnotationSpan |
python | huggingface__transformers | src/transformers/models/altclip/modeling_altclip.py | {
"start": 11754,
"end": 12450
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
ALT_ROBERTA_SELF_ATTENTION_CLASSES = {
"eager": AltRobertaSelfAttention,
}
| AltRobertaSelfOutput |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 22108,
"end": 25869
} | class ____(object, metaclass=ABCMeta):
"""Abstract base class for External Memory Management (EMM) Plugins."""
def __init__(self, *args, **kwargs):
if 'context' not in kwargs:
raise RuntimeError("Memory manager requires a context")
self.context = kwargs.pop('context')
@abstractmethod
def memalloc(self, size):
"""
Allocate on-device memory in the current context.
:param size: Size of allocation in bytes
:type size: int
:return: A memory pointer instance that owns the allocated memory
:rtype: :class:`MemoryPointer`
"""
@abstractmethod
def memhostalloc(self, size, mapped, portable, wc):
"""
Allocate pinned host memory.
:param size: Size of the allocation in bytes
:type size: int
:param mapped: Whether the allocated memory should be mapped into the
CUDA address space.
:type mapped: bool
:param portable: Whether the memory will be considered pinned by all
contexts, and not just the calling context.
:type portable: bool
:param wc: Whether to allocate the memory as write-combined.
:type wc: bool
:return: A memory pointer instance that owns the allocated memory. The
return type depends on whether the region was mapped into
device memory.
:rtype: :class:`MappedMemory` or :class:`PinnedMemory`
"""
@abstractmethod
def mempin(self, owner, pointer, size, mapped):
"""
Pin a region of host memory that is already allocated.
:param owner: The object that owns the memory.
:param pointer: The pointer to the beginning of the region to pin.
:type pointer: int
:param size: The size of the region in bytes.
:type size: int
:param mapped: Whether the region should also be mapped into device
memory.
:type mapped: bool
:return: A memory pointer instance that refers to the allocated
memory.
:rtype: :class:`MappedMemory` or :class:`PinnedMemory`
"""
@abstractmethod
def initialize(self):
"""
Perform any initialization required for the EMM plugin instance to be
ready to use.
:return: None
"""
@abstractmethod
def get_ipc_handle(self, memory):
"""
Return an IPC handle from a GPU allocation.
:param memory: Memory for which the IPC handle should be created.
:type memory: :class:`MemoryPointer`
:return: IPC handle for the allocation
:rtype: :class:`IpcHandle`
"""
@abstractmethod
def get_memory_info(self):
"""
Returns ``(free, total)`` memory in bytes in the context. May raise
:class:`NotImplementedError`, if returning such information is not
practical (e.g. for a pool allocator).
:return: Memory info
:rtype: :class:`MemoryInfo`
"""
@abstractmethod
def reset(self):
"""
Clears up all memory allocated in this context.
:return: None
"""
@abstractmethod
def defer_cleanup(self):
"""
Returns a context manager that ensures the implementation of deferred
cleanup whilst it is active.
:return: Context manager
"""
@property
@abstractmethod
def interface_version(self):
"""
Returns an integer specifying the version of the EMM Plugin interface
supported by the plugin implementation. Should always return 1 for
implementations of this version of the specification.
"""
| BaseCUDAMemoryManager |
python | PyCQA__pylint | tests/functional/u/unsubscriptable_object.py | {
"start": 404,
"end": 520
} | class ____(TypedDict):
"""It's the identity."""
name: str
T = TypeVar("T", bound=Mapping)
@dataclass
| Identity |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 87036,
"end": 88541
} | class ____(BaseModel, extra="forbid"):
prefetch: Optional[Union[List["Prefetch"], "Prefetch"]] = Field(
default=None,
description="Sub-requests to perform first. If present, the query will be performed on the results of the prefetches.",
)
query: Optional["QueryInterface"] = Field(
default=None,
description="Query to perform. If missing without prefetches, returns points ordered by their IDs.",
)
using: Optional[str] = Field(
default=None,
description="Define which vector name to use for querying. If missing, the default vector is used.",
)
filter: Optional["Filter"] = Field(
default=None, description="Filter conditions - return only those points that satisfy the specified conditions."
)
params: Optional["SearchParams"] = Field(default=None, description="Search params for when there is no prefetch")
score_threshold: Optional[float] = Field(
default=None, description="Return points with scores better than this threshold."
)
limit: Optional[int] = Field(default=None, description="Max number of points to return. Default is 10.")
lookup_from: Optional["LookupLocation"] = Field(
default=None,
description="The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector Note: the other collection vectors should have the same vector size as the 'using' vector in the current collection",
)
| Prefetch |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 15190,
"end": 17761
} | class ____(NonStrictDataModel):
"""
:param bidirectional: If set then frames retreival can go either forward or
backwards. Otherwise only forward. The default is False. The limitations of
bidirectional navigation: - Frames are always returned in sequential order -
The iteration is finite (no support for infinite iteration)
:type bidirectional: bool
:param navigate_backwards: When bidirectional is True, settings this to True
navigates backwards duing frames retreival. Default is False
:type navigate_backwards: bool
"""
_schema = {
"properties": {
"bidirectional": {
"description": (
"If set then frames retreival can go either forward or backwards. Otherwise only forward.\n "
" The default is False. The limitations of bidirectional navigation:\n -"
" Frames are always returned in sequential order\n - The iteration is finite (no"
" support for infinite iteration)\n "
),
"type": ["boolean", "null"],
},
"navigate_backwards": {
"description": (
"When bidirectional is True, settings this to True navigates backwards duing frames retreival."
" Default is False"
),
"type": ["boolean", "null"],
},
},
"type": "object",
}
def __init__(self, bidirectional=None, navigate_backwards=None, **kwargs):
super(FlowControl, self).__init__(**kwargs)
self.bidirectional = bidirectional
self.navigate_backwards = navigate_backwards
@schema_property("bidirectional")
def bidirectional(self):
return self._property_bidirectional
@bidirectional.setter
def bidirectional(self, value):
if value is None:
self._property_bidirectional = None
return
self.assert_isinstance(value, "bidirectional", (bool,))
self._property_bidirectional = value
@schema_property("navigate_backwards")
def navigate_backwards(self):
return self._property_navigate_backwards
@navigate_backwards.setter
def navigate_backwards(self, value):
if value is None:
self._property_navigate_backwards = None
return
self.assert_isinstance(value, "navigate_backwards", (bool,))
self._property_navigate_backwards = value
| FlowControl |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP049_1.py | {
"start": 55,
"end": 121
} | class ____[_T: (str, bytes)]:
var: _T
# python 3.13+ default
| Foo |
python | google__jax | jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_mask_info.py | {
"start": 1026,
"end": 5369
} | class ____(NamedTuple):
"""Contains runtime masking information for the Splash attention kernel.
The arrays data_next, mask_next and block_mask are placed in TPU
scalar-memory. This is a scarse resource so the mask creation logic attempts
to shrink the data-type of these arrays to the smallest possible one.
This can be: np.int32, np.int16 or np.int8.
For the arrays data_next, mask_next and block_mask the size of the first
dimension can be one of the two following values: num_head or
num_head_shards.
The first dimension has size:
* num_head_shards when there is only one unique mask for each head in a shard.
In this case the three arrays are broadcasted to all the heads in the shard.
* num_heads when there is more than one unique mask for each head in the
shard.
Attributes:
data_next: An integer[num_heads_or_shards, num_q_blocks, num_kv_blocks]
NumPy array where each entry contains the next `kv` block index to
prefetch.
mask_next: An integer[num_heads_or_shards, num_q_blocks, num_kv_blocks]
NumPy array where each entry contains the next mask block index in
`partial_mask_blocks` to prefetch.
block_mask: An integer[num_heads_or_shards, num_q_blocks, num_kv_blocks]
NumPy array whose entries can be 0, 1 or 2. An entry of 0 indicates that
the corresponding block in the full mask was all zeros. An entry of 1
indicates that the corresponding block in the full mask contained both
zeros and ones. An entry of 2 indicates the corresponding block was
entirely ones.
partial_mask_blocks: A bool[num_partial_blocks, block_q, block_kv] NumPy
array that contains the blocks of the original mask that contained both
zeros and ones. The entries in `mask_next` point to indices in the first
axis of this array.
q_sequence: A i32[q_sequence_length] NumPy array. When using causal masking,
this contains the list of indices that correspond to q tokens. For plain
causal this is just np.arange(q_sequence_length).
is_dynamic_mask: A bool indicating whether the mask is dynamic or static.
When True, the leading dimensions of `partial_mask_blocks` (num_heads,
q_blocks, kv_blocks) are not collapsed, allowing us to shard it along
those dimensions.
"""
data_next: np.ndarray | jax.Array | None
mask_next: np.ndarray | jax.Array | None
block_mask: np.ndarray | jax.Array | None
partial_mask_blocks: np.ndarray | jax.Array | None
q_sequence: np.ndarray | None
is_dynamic_mask: bool = None
def _downcast_to_small_type(array: np.ndarray) -> np.ndarray:
"""Downcast numpy array.
If possible, downcast the data-type of the input array to the smallest numpy
type (among np.int16 and np.int8) that fits the content of the array.
Args:
array: the array to downcast
Returns:
The downcasted array.
Raises:
ValueError: if the input array is not np.int32 or if its elements are not
all positive.
"""
if array.dtype != np.int32:
raise ValueError('Expected int32 input.')
if not np.all(array >= 0):
raise ValueError('Expected non-negative array.')
if array.size == 0:
return array
max_value = np.max(array)
if max_value <= np.iinfo(np.int8).max:
return array.astype(np.int8)
elif max_value <= np.iinfo(np.int16).max:
return array.astype(np.int16)
else:
return array.astype(np.int32)
def _check_mask(mask: mask_lib.Mask) -> None:
"""Check that the given mask is valid.
A row of all zeros along the kv dimension would result in a division by zero
when computing the softmax. This function is meant to protect against that
case.
Args:
mask: the mask to check.
Raises:
ValueError: the mask is invalid.
"""
assert len(mask.shape) == 2
exception_message = (
'Some rows of the mask (along the kv dimension) are all zeros.\nThis is'
' would result in a division by zero when computing the attention'
' softmax.'
)
is_row_non_zero = np.zeros(mask.shape[0], dtype=np.bool_)
for col in range(mask.shape[1]):
# Mask only supports slice indices.
is_row_non_zero = np.logical_or(
is_row_non_zero,
mask[(slice(0, mask.shape[0]), slice(col, col + 1))][:, 0],
)
if not is_row_non_zero.all():
raise ValueError(exception_message)
| MaskInfo |
python | django__django | django/templatetags/tz.py | {
"start": 2892,
"end": 5273
} | class ____(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ""
@register.tag("localtime")
def localtime_tag(parser, token):
"""
Force or prevent conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ("on", "off"):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_tz = bits[1] == "on"
nodelist = parser.parse(("endlocaltime",))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag("timezone")
def timezone_tag(parser, token):
"""
Enable a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, the default time zone is
used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" % bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(("endtimezone",))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Store the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
args = token.contents.split()
if len(args) != 3 or args[1] != "as":
raise TemplateSyntaxError(
"'get_current_timezone' requires 'as variable' (got %r)" % args
)
return GetCurrentTimezoneNode(args[2])
| GetCurrentTimezoneNode |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/tests/test_image_vision_llm.py | {
"start": 2593,
"end": 8772
} | class ____:
"""
This double fakes the `Blip2ForConditionalGeneration` model object
in order to avoid having to download checkpoints for these tests.
"""
def generate(self, **kwargs) -> list:
"""
The output is the tokenized version of the prompt
"Question: describe what you see in this image. \
Answer: a black and white checkered pattern"
It should be of type `torch.Tensor`. However, we will fake it as a
list of integers order to not require `torch` or `numpy` imports.
"""
return [
[
2,
45641,
35,
6190,
99,
47,
192,
11,
42,
2274,
4,
31652,
35,
10,
909,
8,
1104,
5851,
438,
20093,
6184,
50118,
]
]
def to(self, device) -> None:
"""
This is just a dummy method for the purposes of the test (it
needs to be defined, but is not used). Hence, we return nothing.
"""
@contextmanager
def _get_custom_import(torch_installed: bool):
"""
Simulate absence of PyTorch installation depending on the input flag.
Args:
torch_installed (bool): Flag indicating whether or not PyTorch is installed.
Returns:
Generator: Parametrized `_custom_import()` function.
"""
# Store the original __import__ function
original_import = builtins.__import__
def _custom_import(module_name: str, *args, **kwargs) -> ModuleType:
"""
If `torch_installed` is False, act as if PyTorch is not installed.
"""
if module_name == "torch" and not torch_installed:
raise ImportError('No module named "torch.')
return original_import(module_name, *args, **kwargs)
try:
# Replace the built-in __import__ function
builtins.__import__ = _custom_import
yield
except Exception:
# Restore the original import function
builtins.__import__ = original_import
raise
finally:
# Restore the original import function
builtins.__import__ = original_import
@pytest.mark.skipif(
Image is None,
reason="PIL not installed",
)
@pytest.mark.parametrize(
"torch_installed",
[
pytest.param(
False,
id="torch_not_installed",
),
pytest.param(
True,
id="torch_installed",
),
],
)
def test_image_vision_llm_reader_load_data_with_parser_config(
torch_installed: bool, test_16x16_png_image_file: str
):
"""
We use doubles (mocks and fakes) for the model and the tokenizer objects
in order to avoid having to download checkpoints as part of tests, while
still covering all essential `ImageVisionLLMReader` class functionality.
"""
with (
mock.patch(
"transformers.Blip2ForConditionalGeneration.from_pretrained",
return_value=ModelFake(),
) as model,
mock.patch(
"transformers.Blip2Processor.from_pretrained",
return_value=TokenizerFake(),
) as processor,
):
parser_config = {
"processor": processor(),
"model": model(),
"device": "auto", # not used (placeholder)
"dtype": float, # not used (placeholder)
}
if torch_installed:
image_vision_llm_reader = ImageVisionLLMReader(
parser_config=parser_config, keep_image=True
)
assert image_vision_llm_reader._torch_imported
else:
with _get_custom_import(torch_installed=False):
image_vision_llm_reader = ImageVisionLLMReader(
parser_config=parser_config, keep_image=True
)
assert not image_vision_llm_reader._torch_imported
result = image_vision_llm_reader.load_data(file=test_16x16_png_image_file)[0]
assert (
result.text
== "Question: describe what you see in this image. Answer: a black and white checkered pattern"
)
@pytest.mark.skipif(
Image is None,
reason="PIL not installed",
)
@pytest.mark.parametrize(
"torch_installed",
[
pytest.param(
False,
id="torch_not_installed",
),
pytest.param(
True,
id="torch_installed",
),
],
)
def test_image_vision_llm_reader_load_data_wo_parser_config(
torch_installed: bool, test_16x16_png_image_file: str
):
"""
We use doubles (mocks and fakes) for the model and the tokenizer objects
in order to avoid having to download checkpoints as part of tests, while
still covering most of the `ImageVisionLLMReader` class functionality.
"""
with (
mock.patch(
"transformers.Blip2ForConditionalGeneration.from_pretrained",
return_value=ModelFake(),
),
mock.patch(
"transformers.Blip2Processor.from_pretrained",
return_value=TokenizerFake(),
),
):
if torch_installed:
image_vision_llm_reader = ImageVisionLLMReader()
result = image_vision_llm_reader.load_data(file=test_16x16_png_image_file)[
0
]
assert (
result.text
== "Question: describe what you see in this image. Answer: a black and white checkered pattern"
)
else:
with _get_custom_import(torch_installed=False):
with pytest.raises(ImportError) as excinfo:
image_vision_llm_reader = ImageVisionLLMReader()
assert (
str(excinfo.value)
== "Please install extra dependencies that are required for the ImageCaptionReader: `pip install torch transformers sentencepiece Pillow`"
)
| ModelFake |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 143054,
"end": 145644
} | class ____(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example:
.. testcode::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(
DelimitedList(csv_value).parse_string(
"dkls,lsdkjf,s12 34,@!#,213"
)
)
prints:
.. testoutput::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(
self, not_chars: str = "", min: int = 1, max: int = 0, exact: int = 0, **kwargs
) -> None:
super().__init__()
notChars: str = deprecate_argument(kwargs, "notChars", "")
self.skipWhitespace = False
self.notChars = not_chars or notChars
self.notCharsSet = set(self.notChars)
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use"
" Opt(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = f"Expected {self.name}"
self._may_return_empty = self.minLen == 0
self.mayIndexError = False
def _generateDefaultName(self) -> str:
not_chars_str = _collapse_string_to_ranges(self.notChars)
if len(not_chars_str) > 16:
return f"!W:({self.notChars[: 16 - 3]}...)"
else:
return f"!W:({self.notChars})"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
notchars = self.notCharsSet
if instring[loc] in notchars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
| CharsNotIn |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_workflows.py | {
"start": 11935,
"end": 13346
} | class ____:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
start_date_filter = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(minutes=5)
execution_mock = mock.MagicMock()
execution_mock.start_time = start_date_filter
mock_hook.return_value.list_executions.return_value = [execution_mock]
op = WorkflowsListExecutionsOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
context = mock.MagicMock()
result = op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_executions.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == [mock_object.to_dict.return_value]
| TestWorkflowExecutionsListExecutionsOperator |
python | getsentry__sentry | src/sentry/workflow_engine/models/data_condition.py | {
"start": 3863,
"end": 10565
} | class ____(DefaultFieldsModel):
"""
A data condition is a way to specify a logic condition, if the condition is met, the condition_result is returned.
"""
__relocation_scope__ = RelocationScope.Organization
__repr__ = sane_repr("type", "comparison", "condition_result", "condition_group_id")
# The comparison is the value that the condition is compared to for the evaluation, this must be a primitive value
comparison = models.JSONField()
# The condition_result is the value that is returned if the condition is met, this must be a primitive value
condition_result = models.JSONField()
# The type of condition, this is used to initialize the condition classes
type = models.CharField(
max_length=200, choices=[(t.value, t.value) for t in Condition], default=Condition.EQUAL
)
condition_group = models.ForeignKey(
"workflow_engine.DataConditionGroup",
related_name="conditions",
on_delete=models.CASCADE,
)
def get_snapshot(self) -> DataConditionSnapshot:
return {
"id": self.id,
"type": self.type,
"comparison": self.comparison,
"condition_result": self.condition_result,
}
def get_condition_result(self) -> DataConditionResult | ConditionError:
match self.condition_result:
case float() | bool():
return self.condition_result
case int() | DetectorPriorityLevel():
try:
return DetectorPriorityLevel(self.condition_result)
except ValueError:
return self.condition_result
case _:
logger.error(
"Invalid condition result",
extra={"condition_result": self.condition_result, "id": self.id},
)
return ConditionError(msg="Invalid condition result")
def _evaluate_operator(
self, condition_type: Condition, value: T
) -> DataConditionResult | ConditionError:
# If the condition is a base type, handle it directly
op = CONDITION_OPS[condition_type]
try:
return op(cast(Any, value), self.comparison)
except TypeError:
logger.exception(
"Invalid comparison for data condition",
extra={
"comparison": self.comparison,
"value": value,
"type": self.type,
"condition_id": self.id,
},
)
return ConditionError(msg="Invalid comparison for data condition")
@scopedstats.timer()
def _evaluate_condition(
self, condition_type: Condition, value: T
) -> DataConditionResult | ConditionError:
try:
handler = condition_handler_registry.get(condition_type)
except registry.NoRegistrationExistsError:
logger.exception(
"No registration exists for condition",
extra={"type": self.type, "id": self.id},
)
return ConditionError(msg="No registration exists for condition")
should_be_fast = not is_slow_condition(self)
start_time = time.time()
try:
with metrics.timer(
"workflow_engine.data_condition.evaluation_duration",
tags={"type": self.type, "speed_category": "fast" if should_be_fast else "slow"},
):
result = handler.evaluate_value(value, self.comparison)
except DataConditionEvaluationException as e:
metrics.incr("workflow_engine.data_condition.evaluation_error")
logger.info(
"A known error occurred while evaluating a data condition",
extra={
"condition_id": self.id,
"type": self.type,
"comparison": self.comparison,
"value": value,
"error": str(e),
},
)
return ConditionError(msg=str(e))
finally:
duration = time.time() - start_time
if should_be_fast and duration >= FAST_CONDITION_TOO_SLOW_THRESHOLD.total_seconds():
logger.error(
"Fast condition evaluation too slow; took %s seconds",
duration,
extra={
"condition_id": self.id,
"duration": duration,
"type": self.type,
"value": value,
"comparison": self.comparison,
},
)
return result
def evaluate_value(self, value: T) -> DataConditionResult | ConditionError:
try:
condition_type = Condition(self.type)
except ValueError:
logger.exception(
"Invalid condition type",
extra={"type": self.type, "id": self.id},
)
return ConditionError(msg="Invalid condition type")
result: DataConditionResult | ConditionError
if condition_type in CONDITION_OPS:
result = self._evaluate_operator(condition_type, value)
else:
result = self._evaluate_condition(condition_type, value)
metrics.incr("workflow_engine.data_condition.evaluation", tags={"type": self.type})
if isinstance(result, bool):
# If the result is True, get the result from `.condition_result`
return self.get_condition_result() if result else None
return result
def is_slow_condition(condition: DataCondition) -> bool:
return Condition(condition.type) in SLOW_CONDITIONS
def enforce_data_condition_json_schema(data_condition: DataCondition) -> None:
condition_type = Condition(data_condition.type)
if condition_type in CONDITION_OPS:
# don't enforce schema for default ops, this can be any type
return
try:
handler = condition_handler_registry.get(condition_type)
except registry.NoRegistrationExistsError:
logger.exception(
"No registration exists for condition",
extra={"type": data_condition.type, "id": data_condition.id},
)
return None
schema = handler.comparison_json_schema
try:
validate(data_condition.comparison, schema)
except ValidationError as e:
raise ValidationError(f"Invalid config: {e.message}")
@receiver(pre_save, sender=DataCondition)
def enforce_comparison_schema(sender, instance: DataCondition, **kwargs):
enforce_data_condition_json_schema(instance)
| DataCondition |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 10952,
"end": 11366
} | class ____(
NamedTuple("_LoadableRepositorySymbol", [("repository_name", str), ("attribute", str)])
):
def __new__(cls, repository_name: str, attribute: str):
return super().__new__(
cls,
repository_name=check.str_param(repository_name, "repository_name"),
attribute=check.str_param(attribute, "attribute"),
)
@whitelist_for_serdes
| LoadableRepositorySymbol |
python | keras-team__keras | keras/src/dtype_policies/dtype_policy_map.py | {
"start": 243,
"end": 10840
} | class ____(DTypePolicy, MutableMapping):
"""Dict-like object mapping layer paths to `DTypePolicy` instances.
`DTypePolicyMap` can be used in `get_config` in layers and subclasses to
support a complex configurations of dtype policies.
For example, we can modify `get_config` in `layers.MultiHeadAttention` as
follows to support the mixing of dtype policies, such as quantization.
```python
@keras.saving.register_keras_serializable("MyPackage")
class MyMultiHeadAttention(keras.layers.MultiHeadAttention):
def get_config(self):
config = super().get_config()
dtype_policy_map = dtype_policies.DTypePolicyMap()
for layer in self._flatten_layers():
if layer.dtype_policy.quantization_mode is not None:
dtype_policy_map[layer.path] = layer.dtype_policy
if len(dtype_policy_map) > 0:
config.update({"dtype": dtype_policy_map})
return config
```
Internally, `DTypePolicyMap` uses a string as a key and a `DTypePolicy`
as the value. Typically, the key used for querying is the `Layer.path`.
However, it is also possible to set a regex as the key. See the docstring of
`get` for more details.
Args:
default_policy: An optional `DTypePolicy` instance specifying the
default dtype policy. If not specified, the value will default to
`keras.config.dtype_policy()`.
policy_map: An optional dict that maps string to `DTypePolicy`
instances. Defaults to `None`
Example:
```python
>>> from keras.src import dtype_policies
>>> bfloat16 = dtype_policies.DTypePolicy("bfloat16")
>>> float16 = dtype_policies.DTypePolicy("float16")
>>> float32 = dtype_policies.DTypePolicy("float32")
>>> policy_map = DTypePolicyMap(default_policy=float32)
# Set policies using an exact path and a regex pattern.
# Note: "decoder" will only match the exact path, not its children.
>>> policy_map["encoder/layer_0/dense"] = bfloat16
>>> policy_map["encoder/.*"] = float16
>>> policy_map["decoder"] = bfloat16
# 1. An exact match is found and returned directly.
>>> policy_map["encoder/layer_0/dense"].name
'bfloat16'
# 2. A regex match is found for a child layer.
# It matches the "encoder/.*" pattern.
>>> policy_map["encoder/attention/query"].name
'float16'
# 3. No implicit prefix matching occurs.
# "decoder/attention" does not match the key "decoder".
# The default policy is returned.
>>> policy_map["decoder/attention"].name
'float32'
# 4. A ValueError is raised if a path matches multiple patterns.
>>> policy_map["encoder/attention/.*"] = bfloat16
# "encoder/attention/query" now matches two patterns:
# - "encoder/.*"
# - "encoder/attention/.*"
>>> try:
... policy_map["encoder/attention/query"]
... except ValueError as e:
... print(e)
Path 'encoder/attention/query' matches multiple dtype policy ..
```
"""
def __init__(self, default_policy=None, policy_map=None):
if isinstance(default_policy, DTypePolicyMap):
raise ValueError("`default_policy` cannot be a `DTypePolicyMap`.")
if policy_map is not None and not isinstance(policy_map, dict):
raise TypeError(
"If specified, `policy_map` must be a dict. "
f"Received: policy_map={policy_map} of type {type(policy_map)}"
)
self._default_policy_arg = default_policy
self._default_policy = dtype_policies.get(default_policy)
self._policy_map = policy_map or dict()
@property
def name(self):
return f"map_{self.default_policy._name}"
@property
def default_policy(self):
"""The default dtype policy.
If `default_policy` is not specified in the constructor, this property
will be `keras.config.dtype_policy()`.
"""
return dtype_policies.get(self._default_policy)
@property
def variable_dtype(self):
return self.default_policy.variable_dtype
@property
def compute_dtype(self):
return self.default_policy.compute_dtype
@property
def quantization_mode(self):
return self.default_policy.quantization_mode
def __getitem__(self, key):
"""Retrieves the corresponding `DTypePolicy` by the string key.
This method first attempts an exact key match. If no exact match is
found, it treats all keys in the map as regular expression patterns
and uses `re.fullmatch` to find a policy.
For example, to apply a policy to all sublayers of an `encoder` block,
the key should be explicitly set to `"encoder/.*"`. A key of
`"encoder"` will only match the layer with that exact path.
Args:
key: str. The key to query for a `DTypePolicy`.
Returns:
The corresponding `DTypePolicy`. If no match is found, this method
returns `self.default_policy`.
Raises:
ValueError: If the `key` matches more than one regex pattern in the
map.
Example:
```python
>>> from keras.src import dtype_policies
>>> bfloat16 = dtype_policies.DTypePolicy("bfloat16")
>>> float16 = dtype_policies.DTypePolicy("float16")
>>> float32 = dtype_policies.DTypePolicy("float32")
>>> policy_map = DTypePolicyMap(default_policy=float32)
# Set policies using an exact path and a regex pattern.
# Note: "decoder" will only match the exact path, not its children.
>>> policy_map["encoder/layer_0/dense"] = bfloat16
>>> policy_map["encoder/.*"] = float16
>>> policy_map["decoder"] = bfloat16
# 1. An exact match is found and returned directly.
>>> policy_map["encoder/layer_0/dense"].name
'bfloat16'
# 2. A regex match is found for a child layer.
# It matches the "encoder/.*" pattern.
>>> policy_map["encoder/attention/query"].name
'float16'
# 3. No implicit prefix matching occurs.
# "decoder/attention" does not match the key "decoder".
# The default policy is returned.
>>> policy_map["decoder/attention"].name
'float32'
# 4. A ValueError is raised if a path matches multiple patterns.
>>> policy_map["encoder/attention/.*"] = bfloat16
# "encoder/attention/query" now matches two patterns:
# - "encoder/.*"
# - "encoder/attention/.*"
>>> try:
... policy_map["encoder/attention/query"]
... except ValueError as e:
... print(e)
Path 'encoder/attention/query' matches multiple dtype policy ..
```
"""
# 1. Check for an exact match.
if key in self._policy_map:
return self._policy_map[key]
# 2. Fallback to a full regex match.
matching_keys = [
pattern
for pattern in self._policy_map
if re.fullmatch(pattern, key)
]
# 3. Handle cases based on the number of matches found.
if len(matching_keys) > 1:
raise ValueError(
f"Path '{key}' matches multiple dtype policy "
f"specification keys: {matching_keys}. Please make "
"sure each path only matches at most "
"one dtype policy specification key in the DTypePolicyMap."
)
elif len(matching_keys) == 1:
return self._policy_map[matching_keys[0]]
# 4. If there were no matches, return the default.
return self.default_policy
def __setitem__(self, key, policy):
"""Insert `DTypePolicy` to the `DTypePolicyMap`.
Args:
key: String key for the `DTypePolicy`.
policy: The `DTypePolicy`.
"""
if key in self._policy_map:
raise ValueError(
f"{key} already exist in the DTypePolicyMap with "
f"value {self._policy_map[key]}. Please make sure to "
"not use duplicated keys."
)
try:
policy = dtype_policies.get(policy)
except Exception:
raise ValueError(
"Cannot interpret the assigned value by "
"`keras.dtype_policies.get`. "
f"Received: {policy} of type {type(policy)}"
)
self._policy_map[key] = policy
def __delitem__(self, key):
# Let the dict to handle the key missing error
return self._policy_map.pop(key)
def __contains__(self, key):
return key in self._policy_map
def get_config(self):
from keras.src.saving import serialization_lib
policy_map = self._policy_map
if self._default_policy_arg is None:
# `default_policy=None` enables us to defer to
# `keras.config.dtype_policy()` during loading.
# To support this feature, we can set `_name` and `_source_name` to
# `None` in `DTypePolicy` and `QuantizedDTypePolicy`,
# respectively.
for policy in policy_map.values():
if isinstance(policy, dtype_policies.QuantizedDTypePolicy):
policy._name = None
policy._source_name = None
elif isinstance(policy, dtype_policies.DTypePolicy):
policy._name = None
return {
"default_policy": self._default_policy_arg,
"policy_map": serialization_lib.serialize_keras_object(policy_map),
}
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.src.saving import serialization_lib
config = config.copy()
config["policy_map"] = serialization_lib.deserialize_keras_object(
config["policy_map"], custom_objects=custom_objects
)
return cls(**config)
def __len__(self):
return len(self._policy_map)
def __iter__(self):
return iter(self._policy_map)
def __repr__(self):
default_policy = (
self._default_policy.name
if self._default_policy is not None
else None
)
mapping = []
for k, v in self._policy_map.items():
mapping.append((k, v.name))
return (
f"<DTypePolicyMap at {hex(id(self))} "
f"default_policy={default_policy}, "
f"mapping={mapping}>"
)
| DTypePolicyMap |
python | PrefectHQ__prefect | tests/runner/test_storage.py | {
"start": 3265,
"end": 5015
} | class ____(Block):
"""Mock GitLab credentials block for testing."""
_block_type_slug = "gitlab-credentials"
token: Optional[SecretStr] = None
def format_git_credentials(self, url: str) -> str:
"""
Format and return the full git URL with GitLab credentials embedded.
Handles both personal access tokens and deploy tokens correctly:
- Personal access tokens: prefixed with "oauth2:"
- Deploy tokens (username:token format): used as-is
- Already prefixed tokens: not double-prefixed
Args:
url: Repository URL (e.g., "https://gitlab.com/org/repo.git")
Returns:
Complete URL with credentials embedded
Raises:
ValueError: If token is not configured
"""
if not self.token:
raise ValueError("Token is required for GitLab authentication")
token_value = self.token.get_secret_value()
# Deploy token detection: contains ":" but not "oauth2:" prefix
# Deploy tokens should not have oauth2: prefix (GitLab 16.3.4+ rejects them)
# See: https://github.com/PrefectHQ/prefect/issues/10832
if ":" in token_value and not token_value.startswith("oauth2:"):
credentials = token_value
# Personal access token: add oauth2: prefix
# See: https://github.com/PrefectHQ/prefect/issues/16836
elif not token_value.startswith("oauth2:"):
credentials = f"oauth2:{token_value}"
else:
# Already prefixed
credentials = token_value
# Insert credentials into URL
parsed = urlparse(url)
return urlunparse(parsed._replace(netloc=f"{credentials}@{parsed.netloc}"))
| MockGitLabCredentials |
python | apache__thrift | lib/py/src/TMultiplexedProcessor.py | {
"start": 3114,
"end": 3337
} | class ____(TProtocolDecorator.TProtocolDecorator):
def __init__(self, protocol, messageBegin):
self.messageBegin = messageBegin
def readMessageBegin(self):
return self.messageBegin
| StoredMessageProtocol |
python | walkccc__LeetCode | solutions/1012. Numbers With Repeated Digits/1012.py | {
"start": 0,
"end": 1018
} | class ____:
def numDupDigitsAtMostN(self, n: int) -> int:
return n - self._countSpecialNumbers(n)
# Same as 2376. Count Special Integers
def _countSpecialNumbers(self, n: int) -> int:
s = str(n)
@functools.lru_cache(None)
def dp(i: int, used: int, tight: bool) -> int:
"""
Returns the number of special integers, considering the i-th digit, where
`used` is the bitmask of the used digits, and `tight` indicates if the
current digit is tightly bound.
"""
if i == len(s):
return 1
res = 0
maxDigit = int(s[i]) if tight else 9
for d in range(maxDigit + 1):
# `d` is used.
if used >> d & 1:
continue
# Use `d` now.
nextTight = tight and (d == maxDigit)
if used == 0 and d == 0: # Don't count leading 0s as used.
res += dp(i + 1, used, nextTight)
else:
res += dp(i + 1, used | 1 << d, nextTight)
return res
return dp(0, 0, True) - 1 # - 0
| Solution |
python | python-markdown__markdown | markdown/extensions/abbr.py | {
"start": 4911,
"end": 6600
} | class ____(BlockProcessor):
""" Parse text for abbreviation references. """
RE = re.compile(r'^[*]\[(?P<abbr>[^\\]*?)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE)
def __init__(self, parser: BlockParser, abbrs: dict):
self.abbrs: dict = abbrs
super().__init__(parser)
def test(self, parent: etree.Element, block: str) -> bool:
return True
def run(self, parent: etree.Element, blocks: list[str]) -> bool:
"""
Find and remove all abbreviation references from the text.
Each reference is added to the abbreviation collection.
"""
block = blocks.pop(0)
m = self.RE.search(block)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
if title and abbr:
if title == "''" or title == '""':
self.abbrs.pop(abbr)
else:
self.abbrs[abbr] = title
if block[m.end():].strip():
# Add any content after match back to blocks as separate block
blocks.insert(0, block[m.end():].lstrip('\n'))
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
AbbrPreprocessor = deprecated("This class has been renamed to `AbbrBlockprocessor`.")(AbbrBlockprocessor)
@deprecated("This class will be removed in the future; use `AbbrTreeprocessor` instead.")
| AbbrBlockprocessor |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 49915,
"end": 52852
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = create_ordering_token()
with ops.device(dev1):
token1 = create_ordering_token()
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0,
name='FirstChainedDev0')
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1,
name='FirstChainedDev1')
# Launch the second collective without token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=create_ordering_token(),
name='UnchainedDev0')
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=create_ordering_token(),
name='UnchainedDev1')
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key + 1,
ordering_token=token0,
name='SecondChainedDev0')
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key + 1,
ordering_token=token1,
name='SecondChainedDev1')
graph = f.get_concrete_function().graph
for device, suffix in [(dev0, 'Dev0'), (dev1, 'Dev1')]:
first = graph.get_operation_by_name('FirstChained' + suffix)
second = graph.get_operation_by_name('Unchained' + suffix)
third = graph.get_operation_by_name('SecondChained' + suffix)
self.assertIsNotNone(first)
self.assertTrue(first.device.endswith(device))
self.assertIsNotNone(second)
self.assertTrue(second.device.endswith(device))
self.assertIsNotNone(third)
self.assertTrue(third.device.endswith(device))
# Try to find the third collective, which should have the first collective
# as a control input.
self.assertLen(third.control_inputs, 1)
self.assertEqual(third.control_inputs[0].name, 'FirstChained' + suffix)
self.assertEmpty(second.control_inputs)
self.assertEmpty(first.control_inputs)
| OrderingTest |
python | doocs__leetcode | solution/3300-3399/3339.Find the Number of K-Even Arrays/Solution2.py | {
"start": 0,
"end": 550
} | class ____:
def countOfArrays(self, n: int, m: int, k: int) -> int:
f = [[[0] * 2 for _ in range(k + 1)] for _ in range(n + 1)]
cnt0 = m // 2
cnt1 = m - cnt0
mod = 10**9 + 7
f[0][0][1] = 1
for i in range(1, n + 1):
for j in range(k + 1):
f[i][j][0] = (
(f[i - 1][j][1] + (f[i - 1][j - 1][0] if j else 0)) * cnt0 % mod
)
f[i][j][1] = (f[i - 1][j][0] + f[i - 1][j][1]) * cnt1 % mod
return sum(f[n][k]) % mod
| Solution |
python | openai__openai-python | src/openai/types/beta/realtime/response_audio_done_event.py | {
"start": 200,
"end": 679
} | class ____(BaseModel):
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.audio.done"]
"""The event type, must be `response.audio.done`."""
| ResponseAudioDoneEvent |
python | openai__openai-python | src/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py | {
"start": 212,
"end": 861
} | class ____(BaseModel):
audio_start_ms: int
"""
Milliseconds from the start of all audio written to the buffer during the
session when speech was first detected. This will correspond to the beginning of
audio sent to the model, and thus includes the `prefix_padding_ms` configured in
the Session.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the user message item that will be created when speech stops."""
type: Literal["input_audio_buffer.speech_started"]
"""The event type, must be `input_audio_buffer.speech_started`."""
| InputAudioBufferSpeechStartedEvent |
python | streamlit__streamlit | lib/tests/streamlit/elements/radio_test.py | {
"start": 1295,
"end": 16926
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall radio protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.radio("the label", ("m", "f"))
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert c.default == 0
assert not c.disabled
assert c.HasField("default")
assert c.captions == []
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.radio("the label", ("m", "f"), disabled=True)
c = self.get_delta_from_queue().new_element.radio
assert c.disabled
def test_none_value(self):
"""Test that it can be called with None as index value."""
st.radio("the label", ("m", "f"), index=None)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
# If a proto property is null is not determined by this value,
# but by the check via the HasField method:
assert c.default == 0
assert not c.HasField("default")
def test_horizontal(self):
"""Test that it can be called with horizontal param."""
st.radio("the label", ("m", "f"), horizontal=True)
c = self.get_delta_from_queue().new_element.radio
assert c.horizontal
def test_horizontal_default_value(self):
"""Test that it can called with horizontal param value False by default."""
st.radio("the label", ("m", "f"))
c = self.get_delta_from_queue().new_element.radio
assert not c.horizontal
def test_valid_value(self):
"""Test that valid value is an int."""
st.radio("the label", ("m", "f"), 1)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 1
def test_noneType_option(self):
"""Test NoneType option value."""
current_value = st.radio("the label", (None, "selected"), 0)
assert current_value is None
@parameterized.expand(
SHARED_TEST_CASES,
)
def test_option_types(self, name: str, input_data: Any, metadata: CaseMetadata):
"""Test that it supports different types of options."""
st.radio("the label", input_data)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert {str(item) for item in c.options} == {
str(item) for item in metadata.expected_sequence
}
def test_cast_options_to_string(self):
"""Test that it casts options to string."""
arg_options = ["some str", 123, None, {}]
proto_options = ["some str", "123", "None", "{}"]
st.radio("the label", arg_options)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert c.options == proto_options
def test_format_function(self):
"""Test that it formats options."""
arg_options = [{"name": "john", "height": 180}, {"name": "lisa", "height": 200}]
proto_options = ["john", "lisa"]
st.radio("the label", arg_options, format_func=lambda x: x["name"])
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert c.options == proto_options
@parameterized.expand([((),), ([],), (np.array([]),), (pd.Series(np.array([])),)])
def test_no_options(self, options):
"""Test that it handles no options."""
st.radio("the label", options)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert c.default == 0
assert c.options == []
def test_invalid_value(self):
"""Test that value must be an int."""
with pytest.raises(StreamlitAPIException):
st.radio("the label", ("m", "f"), "1")
def test_invalid_value_range(self):
"""Test that value must be within the length of the options."""
with pytest.raises(StreamlitAPIException):
st.radio("the label", ("m", "f"), 2)
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
st.radio("foo", ["bar", "baz"])
proto = self.get_delta_from_queue().new_element.radio
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("form"):
st.radio("foo", ["bar", "baz"])
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
radio_proto = self.get_delta_from_queue(1).new_element.radio
assert radio_proto.form_id == form_proto.form.form_id
def test_inside_column(self):
"""Test that it works correctly inside of a column."""
col1, _col2 = st.columns(2)
with col1:
st.radio("foo", ["bar", "baz"])
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
radio_proto = self.get_delta_from_queue().new_element.radio
assert radio_proto.label == "foo"
assert radio_proto.options == ["bar", "baz"]
assert radio_proto.default == 0
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.radio("the label", ("m", "f"), label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.radio("the label", ("m", "f"), label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_no_captions(self):
"""Test that it can be called with no captions."""
st.radio("the label", ("option1", "option2", "option3"), captions=None)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert c.captions == []
def test_some_captions(self):
"""Test that it can be called with some captions."""
st.radio(
"the label",
("option1", "option2", "option3", "option4"),
captions=("first caption", None, "", "last caption"),
)
c = self.get_delta_from_queue().new_element.radio
assert c.label == "the label"
assert c.default == 0
assert c.captions == ["first caption", "", "", "last caption"]
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.radio("the label", ["option 1", "option 2"]))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_radio_with_width(self):
"""Test st.radio with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for index, (
width_value,
expected_width_spec,
field_name,
field_value,
) in enumerate(test_cases):
with self.subTest(width_value=width_value):
st.radio(
f"test label {index}", ["option 1", "option 2"], width=width_value
)
el = self.get_delta_from_queue().new_element
assert el.radio.label == f"test label {index}"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
def test_radio_with_invalid_width(self):
"""Test st.radio with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid width value: -100. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.radio("test label", ["option 1", "option 2"], width=width_value)
assert str(exc.value) == expected_error_message
def test_radio_default_width(self):
"""Test that st.radio defaults to content width."""
st.radio("test label", ["option 1", "option 2"])
el = self.get_delta_from_queue().new_element
assert el.radio.label == "test label"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params
st.radio(
label="Label 1",
key="radio_key",
index=0,
help="Help 1",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
horizontal=False,
captions=["c1", "c2"],
# Whitelisted kwargs:
options=["a", "b"],
format_func=lambda x: x.capitalize(),
)
c1 = self.get_delta_from_queue().new_element.radio
id1 = c1.id
# Second render with different non-whitelisted params but same key
st.radio(
label="Label 2",
key="radio_key",
index=1,
help="Help 2",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
horizontal=True,
captions=["c1x", "c2x"],
# Whitelisted kwargs:
options=["a", "b"],
format_func=lambda x: x.capitalize(),
)
c2 = self.get_delta_from_queue().new_element.radio
id2 = c2.id
assert id1 == id2
@parameterized.expand(
[
(
"options",
{"options": ["a", "b"], "format_func": str},
{"options": ["a", "b", "c"], "format_func": str},
),
(
"format_func",
{"options": ["a", "b"], "format_func": str},
{"options": ["a", "b"], "format_func": str.upper},
),
]
)
def test_whitelisted_stable_key_kwargs(
self, _name: str, first_kwargs: dict, second_kwargs: dict
) -> None:
"""Test that the widget ID changes when a whitelisted kwarg changes even when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.radio(label="Label 1", key="radio_key2", **first_kwargs)
c1 = self.get_delta_from_queue().new_element.radio
id1 = c1.id
st.radio(label="Label 2", key="radio_key2", **second_kwargs)
c2 = self.get_delta_from_queue().new_element.radio
id2 = c2.id
assert id1 != id2
def test_radio_interaction():
"""Test interactions with an empty radio widget."""
def script():
import streamlit as st
st.radio("the label", ("m", "f"), index=None)
at = AppTest.from_function(script).run()
radio = at.radio[0]
assert radio.value is None
# Select option m
at = radio.set_value("m").run()
radio = at.radio[0]
assert radio.value == "m"
# # Clear the value
at = radio.set_value(None).run()
radio = at.radio[0]
assert radio.value is None
def test_radio_enum_coercion():
"""Test E2E Enum Coercion on a radio."""
def script():
from enum import Enum
import streamlit as st
class EnumA(Enum):
A = 1
B = 2
C = 3
selected = st.radio("my_enum", EnumA, index=0)
st.text(id(selected.__class__))
st.text(id(EnumA))
st.text(selected in EnumA)
at = AppTest.from_function(script).run()
def test_enum():
radio = at.radio[0]
original_class = radio.value.__class__
radio.set_value(original_class.C).run()
assert at.text[0].value == at.text[1].value, "Enum Class ID not the same"
assert at.text[2].value == "True", "Not all enums found in class"
with patch_config_options({"runner.enumCoercion": "nameOnly"}):
test_enum()
with (
patch_config_options({"runner.enumCoercion": "off"}),
pytest.raises(AssertionError),
):
test_enum() # expect a failure with the config value off.
def test_None_session_state_value_retained():
def script():
import streamlit as st
if "radio" not in st.session_state:
st.session_state["radio"] = None
st.radio("radio", ["a", "b", "c"], key="radio")
st.button("button")
at = AppTest.from_function(script).run()
at = at.button[0].click().run()
assert at.radio[0].value is None
| RadioTest |
python | sphinx-doc__sphinx | sphinx/config.py | {
"start": 2780,
"end": 6204
} | class ____:
__slots__ = 'default', 'rebuild', 'valid_types', 'description'
default: Any
rebuild: _ConfigRebuild
valid_types: _OptValidTypes
description: str
def __init__(
self,
default: Any,
rebuild: _ConfigRebuild,
valid_types: _OptValidTypes,
description: str = '',
) -> None:
"""Configuration option type for Sphinx.
The type is intended to be immutable; changing the field values
is an unsupported action.
No validation is performed on the values, though consumers will
likely expect them to be of the types advertised.
The old tuple-based interface will be removed in Sphinx 9.
"""
super().__setattr__('default', default)
super().__setattr__('rebuild', rebuild)
super().__setattr__('valid_types', valid_types)
super().__setattr__('description', description)
def __repr__(self) -> str:
return (
f'{self.__class__.__qualname__}('
f'default={self.default!r}, '
f'rebuild={self.rebuild!r}, '
f'valid_types={self.rebuild!r}, '
f'description={self.description!r})'
)
def __eq__(self, other: object) -> bool:
if isinstance(other, _Opt):
self_tpl = (
self.default,
self.rebuild,
self.valid_types,
self.description,
)
other_tpl = (
other.default,
other.rebuild,
other.valid_types,
other.description,
)
return self_tpl == other_tpl
return NotImplemented
def __lt__(self, other: _Opt) -> bool:
if self.__class__ is other.__class__:
self_tpl = (
self.default,
self.rebuild,
self.valid_types,
self.description,
)
other_tpl = (
other.default,
other.rebuild,
other.valid_types,
other.description,
)
return self_tpl > other_tpl
return NotImplemented
def __hash__(self) -> int:
return hash((self.default, self.rebuild, self.valid_types, self.description))
def __setattr__(self, key: str, value: Any) -> None:
if key in {'default', 'rebuild', 'valid_types', 'description'}:
msg = f'{self.__class__.__name__!r} object does not support assignment to {key!r}'
raise TypeError(msg)
super().__setattr__(key, value)
def __delattr__(self, key: str) -> None:
if key in {'default', 'rebuild', 'valid_types', 'description'}:
msg = f'{self.__class__.__name__!r} object does not support deletion of {key!r}'
raise TypeError(msg)
super().__delattr__(key)
def __getstate__(self) -> tuple[Any, _ConfigRebuild, _OptValidTypes, str]:
return self.default, self.rebuild, self.valid_types, self.description
def __setstate__(
self, state: tuple[Any, _ConfigRebuild, _OptValidTypes, str]
) -> None:
default, rebuild, valid_types, description = state
super().__setattr__('default', default)
super().__setattr__('rebuild', rebuild)
super().__setattr__('valid_types', valid_types)
super().__setattr__('description', description)
| _Opt |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 61150,
"end": 62390
} | class ____(TestCase):
"""Test behavior of `latest()` without any field parameters"""
def setUp(self):
poll = Poll.objects.create(question="Does `latest()` work?", pub_date=yesterday)
poll.pub_date = today
poll.save()
def write_history(self, new_attributes):
poll_history = HistoricalPoll.objects.all()
for historical_poll, new_values in zip(poll_history, new_attributes):
for fieldname, value in new_values.items():
setattr(historical_poll, fieldname, value)
historical_poll.save()
def test_ordered(self):
self.write_history(
[{"pk": 1, "history_date": yesterday}, {"pk": 2, "history_date": today}]
)
self.assertEqual(HistoricalPoll.objects.latest().pk, 2)
def test_jumbled(self):
self.write_history(
[{"pk": 1, "history_date": today}, {"pk": 2, "history_date": yesterday}]
)
self.assertEqual(HistoricalPoll.objects.latest().pk, 1)
def test_sameinstant(self):
self.write_history(
[{"pk": 1, "history_date": yesterday}, {"pk": 2, "history_date": yesterday}]
)
self.assertEqual(HistoricalPoll.objects.latest().pk, 2)
| TestLatest |
python | huggingface__transformers | src/transformers/models/gemma/modeling_gemma.py | {
"start": 2108,
"end": 2782
} | class ____(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
# Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.eps}"
| GemmaRMSNorm |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 43843,
"end": 44070
} | class ____(BaseModel, extra="forbid"):
"""
Geo point payload schema
"""
lon: float = Field(..., description="Geo point payload schema")
lat: float = Field(..., description="Geo point payload schema")
| GeoPoint |
python | zarr-developers__zarr-python | src/zarr/storage/_obstore.py | {
"start": 10888,
"end": 17429
} | class ____(TypedDict):
"""A response buffer associated with the original index that it should be restored to."""
original_request_index: int
"""The positional index in the original key_ranges input"""
buffer: Buffer
"""The buffer returned from obstore's range request."""
async def _make_bounded_requests(
store: _UpstreamObjectStore,
path: str,
requests: list[_BoundedRequest],
prototype: BufferPrototype,
semaphore: asyncio.Semaphore,
) -> list[_Response]:
"""Make all bounded requests for a specific file.
`obstore.get_ranges_async` allows for making concurrent requests for multiple ranges
within a single file, and will e.g. merge concurrent requests. This only uses one
single Python coroutine.
"""
import obstore as obs
starts = [r["start"] for r in requests]
ends = [r["end"] for r in requests]
async with semaphore:
responses = await obs.get_ranges_async(store, path=path, starts=starts, ends=ends)
buffer_responses: list[_Response] = []
for request, response in zip(requests, responses, strict=True):
buffer_responses.append(
{
"original_request_index": request["original_request_index"],
"buffer": prototype.buffer.from_bytes(response), # type: ignore[arg-type]
}
)
return buffer_responses
async def _make_other_request(
store: _UpstreamObjectStore,
request: _OtherRequest,
prototype: BufferPrototype,
semaphore: asyncio.Semaphore,
) -> list[_Response]:
"""Make offset or full-file requests.
We return a `list[_Response]` for symmetry with `_make_bounded_requests` so that all
futures can be gathered together.
"""
import obstore as obs
async with semaphore:
if request["range"] is None:
resp = await obs.get_async(store, request["path"])
else:
resp = await obs.get_async(store, request["path"], options={"range": request["range"]})
buffer = await resp.bytes_async()
return [
{
"original_request_index": request["original_request_index"],
"buffer": prototype.buffer.from_bytes(buffer), # type: ignore[arg-type]
}
]
async def _make_suffix_request(
store: _UpstreamObjectStore,
request: _SuffixRequest,
prototype: BufferPrototype,
semaphore: asyncio.Semaphore,
) -> list[_Response]:
"""Make suffix requests.
This is separated out from `_make_other_request` because some object stores (Azure)
don't support suffix requests. In this case, our workaround is to first get the
length of the object and then manually request the byte range at the end.
We return a `list[_Response]` for symmetry with `_make_bounded_requests` so that all
futures can be gathered together.
"""
import obstore as obs
async with semaphore:
try:
resp = await obs.get_async(store, request["path"], options={"range": request["range"]})
buffer = await resp.bytes_async()
except obs.exceptions.NotSupportedError:
head_resp = await obs.head_async(store, request["path"])
file_size = head_resp["size"]
suffix_len = request["range"]["suffix"]
buffer = await obs.get_range_async(
store,
request["path"],
start=file_size - suffix_len,
length=suffix_len,
)
return [
{
"original_request_index": request["original_request_index"],
"buffer": prototype.buffer.from_bytes(buffer), # type: ignore[arg-type]
}
]
async def _get_partial_values(
store: _UpstreamObjectStore,
prototype: BufferPrototype,
key_ranges: Iterable[tuple[str, ByteRequest | None]],
) -> list[Buffer | None]:
"""Make multiple range requests.
ObjectStore has a `get_ranges` method that will additionally merge nearby ranges,
but it's _per_ file. So we need to split these key_ranges into **per-file** key
ranges, and then reassemble the results in the original order.
We separate into different requests:
- One call to `obstore.get_ranges_async` **per target file**
- One call to `obstore.get_async` for each other request.
"""
key_ranges = list(key_ranges)
per_file_bounded_requests: dict[str, list[_BoundedRequest]] = defaultdict(list)
other_requests: list[_OtherRequest] = []
suffix_requests: list[_SuffixRequest] = []
for idx, (path, byte_range) in enumerate(key_ranges):
if byte_range is None:
other_requests.append(
{
"original_request_index": idx,
"path": path,
"range": None,
}
)
elif isinstance(byte_range, RangeByteRequest):
per_file_bounded_requests[path].append(
{"original_request_index": idx, "start": byte_range.start, "end": byte_range.end}
)
elif isinstance(byte_range, OffsetByteRequest):
other_requests.append(
{
"original_request_index": idx,
"path": path,
"range": {"offset": byte_range.offset},
}
)
elif isinstance(byte_range, SuffixByteRequest):
suffix_requests.append(
{
"original_request_index": idx,
"path": path,
"range": {"suffix": byte_range.suffix},
}
)
else:
raise ValueError(f"Unsupported range input: {byte_range}")
semaphore = asyncio.Semaphore(config.get("async.concurrency"))
futs: list[Coroutine[Any, Any, list[_Response]]] = []
for path, bounded_ranges in per_file_bounded_requests.items():
futs.append(
_make_bounded_requests(store, path, bounded_ranges, prototype, semaphore=semaphore)
)
for request in other_requests:
futs.append(_make_other_request(store, request, prototype, semaphore=semaphore)) # noqa: PERF401
for suffix_request in suffix_requests:
futs.append(_make_suffix_request(store, suffix_request, prototype, semaphore=semaphore)) # noqa: PERF401
buffers: list[Buffer | None] = [None] * len(key_ranges)
for responses in await asyncio.gather(*futs):
for resp in responses:
buffers[resp["original_request_index"]] = resp["buffer"]
return buffers
| _Response |
python | redis__redis-py | redis/client.py | {
"start": 52369,
"end": 65624
} | class ____(Redis):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline(when running in transactional mode,
which is the default behavior) are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
def __init__(
self,
connection_pool: ConnectionPool,
response_callbacks,
transaction,
shard_hint,
):
self.connection_pool = connection_pool
self.connection: Optional[Connection] = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.command_stack = []
self.scripts: Set[Script] = set()
self.explicit_transaction = False
def __enter__(self) -> "Pipeline":
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self) -> int:
return len(self.command_stack)
def __bool__(self) -> bool:
"""Pipeline instances should always evaluate to True"""
return True
def reset(self) -> None:
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command("UNWATCH")
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def close(self) -> None:
"""Close the pipeline"""
self.reset()
def multi(self) -> None:
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError("Cannot issue nested calls to MULTI")
if self.command_stack:
raise RedisError(
"Commands without an initial WATCH have already been issued"
)
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == "WATCH") and not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def _disconnect_reset_raise_on_watching(
self,
conn: AbstractConnection,
error: Exception,
) -> None:
"""
Close the connection reset watching state and
raise an exception if we were watching.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
conn.disconnect()
# if we were already watching a variable, the watch is no longer
# valid since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
self.reset()
raise WatchError(
f"A {type(error).__name__} occurred while watching one or more keys"
)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on the supported
errors for retry if we're already WATCHing a variable.
Used when issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection()
self.connection = conn
return conn.retry.call_with_retry(
lambda: self._send_command_parse_response(
conn, command_name, *args, **options
),
lambda error: self._disconnect_reset_raise_on_watching(conn, error),
)
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(
self, connection: Connection, commands, raise_on_error
) -> List:
cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
all_cmds = connection.pack_commands(
[args for args, options in cmds if EMPTY_RESPONSE not in options]
)
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, "_")
except ResponseError as e:
errors.append((0, e))
# and all the other commands
for i, command in enumerate(commands):
if EMPTY_RESPONSE in command[1]:
errors.append((i, command[1][EMPTY_RESPONSE]))
else:
try:
self.parse_response(connection, "_")
except ResponseError as e:
self.annotate_exception(e, i + 1, command[0])
errors.append((i, e))
# parse the EXEC.
try:
response = self.parse_response(connection, "_")
except ExecAbortError:
if errors:
raise errors[0][1]
raise
# EXEC clears any watched keys
self.watching = False
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError(
"Wrong number of response items from pipeline execution"
)
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in zip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
# Remove keys entry, it needs only for cache.
options.pop("keys", None)
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
responses = []
for args, options in commands:
try:
responses.append(self.parse_response(connection, args[0], **options))
except ResponseError as e:
responses.append(e)
if raise_on_error:
self.raise_first_error(commands, responses)
return responses
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = " ".join(map(safe_str, command))
msg = (
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
f"caused error: {exception.args[0]}"
)
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = Redis.parse_response(self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == "WATCH":
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate("SCRIPT EXISTS", *shas)
if not all(exists):
for s, exist in zip(scripts, exists):
if not exist:
s.sha = immediate("SCRIPT LOAD", s.script)
def _disconnect_raise_on_watching(
self,
conn: AbstractConnection,
error: Exception,
) -> None:
"""
Close the connection, raise an exception if we were watching.
The supported exceptions are already checked in the
retry object so we don't need to do it here.
After we disconnect the connection, it will try to reconnect and
do a health check as part of the send_command logic(on connection level).
"""
conn.disconnect()
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
raise WatchError(
f"A {type(error).__name__} occurred while watching one or more keys"
)
def execute(self, raise_on_error: bool = True) -> List[Any]:
"""Execute all the commands in the current pipeline"""
stack = self.command_stack
if not stack and not self.watching:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection()
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return conn.retry.call_with_retry(
lambda: execute(conn, stack, raise_on_error),
lambda error: self._disconnect_raise_on_watching(conn, error),
)
finally:
# in reset() the connection is disconnected before returned to the pool if
# it is marked for reconnect.
self.reset()
def discard(self):
"""
Flushes all previously queued commands
See: https://redis.io/commands/DISCARD
"""
self.execute_command("DISCARD")
def watch(self, *names):
"""Watches the values at keys ``names``"""
if self.explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
return self.execute_command("WATCH", *names)
def unwatch(self) -> bool:
"""Unwatches all previously specified keys"""
return self.watching and self.execute_command("UNWATCH") or True
| Pipeline |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scalarstring.py | {
"start": 474,
"end": 1571
} | class ____(str):
__slots__ = Anchor.attrib
def __new__(cls, *args, **kw):
# type: (Any, Any) -> Any
anchor = kw.pop('anchor', None)
ret_val = str.__new__(cls, *args, **kw)
if anchor is not None:
ret_val.yaml_set_anchor(anchor, always_dump=True)
return ret_val
def replace(self, old, new, maxreplace=-1):
# type: (Any, Any, int) -> Any
return type(self)((str.replace(self, old, new, maxreplace)))
@property
def anchor(self):
# type: () -> Any
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any=False):
# type: (bool) -> Any
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value, always_dump=False):
# type: (Any, bool) -> None
self.anchor.value = value
self.anchor.always_dump = always_dump
| ScalarString |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 50701,
"end": 50872
} | class ____(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type="meta")
# Skips a test on MPS if the condition is true.
| skipMetaIf |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 8180,
"end": 9449
} | class ____(ProxyModel):
"""
Enum members are represented exactly like their values.
"""
def __init__(self, dmm, fe_type):
super(EnumModel, self).__init__(dmm, fe_type)
self._proxied_model = dmm.lookup(fe_type.dtype)
@register_default(types.Opaque)
@register_default(types.PyObject)
@register_default(types.RawPointer)
@register_default(types.NoneType)
@register_default(types.StringLiteral)
@register_default(types.EllipsisType)
@register_default(types.Function)
@register_default(types.Type)
@register_default(types.Object)
@register_default(types.Module)
@register_default(types.Phantom)
@register_default(types.UndefVar)
@register_default(types.ContextManager)
@register_default(types.Dispatcher)
@register_default(types.ObjModeDispatcher)
@register_default(types.ExceptionClass)
@register_default(types.Dummy)
@register_default(types.ExceptionInstance)
@register_default(types.ExternalFunction)
@register_default(types.EnumClass)
@register_default(types.IntEnumClass)
@register_default(types.NumberClass)
@register_default(types.TypeRef)
@register_default(types.NamedTupleClass)
@register_default(types.DType)
@register_default(types.RecursiveCall)
@register_default(types.MakeFunctionLiteral)
@register_default(types.Poison)
| EnumModel |
python | getsentry__sentry | src/sentry/api/serializers/models/pullrequest.py | {
"start": 441,
"end": 1082
} | class ____(TypedDict):
id: str
title: str | None
message: str | None
dateCreated: datetime
repository: RepositorySerializerResponse
author: Author
externalUrl: str
def get_users_for_pull_requests(item_list, user=None):
authors = list(
CommitAuthor.objects.filter(id__in=[i.author_id for i in item_list if i.author_id])
)
if authors:
org_ids = {item.organization_id for item in item_list}
if len(org_ids) == 1:
return get_users_for_authors(organization_id=org_ids.pop(), authors=authors, user=user)
return {}
@register(PullRequest)
| PullRequestSerializerResponse |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial004.py | {
"start": 109,
"end": 162
} | class ____(BaseModel):
url: str
name: str
| Image |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/plus/login_server.py | {
"start": 2278,
"end": 3630
} | class ____(HTTPServer):
organization: Optional[str] = None
token: Optional[str] = None
def __init__(self, host: tuple[str, int], nonce: str):
super().__init__(host, create_token_callback_handler(nonce))
def shutdown(self):
# Stop serving the token server
# https://stackoverflow.com/a/36017741
setattr(self, "_BaseServer__shutdown_request", True)
def set_result(self, organization: str, token: str):
self.organization = organization
self.token = token
def get_organization(self) -> Optional[str]:
return self.organization
def get_token(self) -> Optional[str]:
return self.token
def _generate_nonce():
return "".join(
random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8)
)
def start_login_server(base_url: Optional[str] = None) -> tuple[TokenServer, str]:
"""Starts a login server on a free port and returns
the server and the URL to open in the browser.
"""
from dagster_shared.utils import find_free_port
port = find_free_port()
nonce = _generate_nonce()
escaped = parse.quote(f"/cli-auth/{nonce}?port={port}")
auth_url = f"{base_url or 'https://dagster.cloud'}?next={escaped}"
server = TokenServer(("localhost", port), nonce)
return server, auth_url
| TokenServer |
python | catalyst-team__catalyst | catalyst/contrib/optimizers/lookahead.py | {
"start": 194,
"end": 3897
} | class ____(Optimizer):
"""Implements Lookahead algorithm.
It has been proposed in `Lookahead Optimizer: k steps forward,
1 step back`_.
Adapted from:
https://github.com/alphadl/lookahead.pytorch (MIT License)
.. _`Lookahead Optimizer\: k steps forward, 1 step back`:
https://arxiv.org/abs/1907.08610
"""
def __init__(self, optimizer: Optimizer, k: int = 5, alpha: float = 0.5):
"""@TODO: Docs. Contribution is welcome."""
self.optimizer = optimizer
self.k = k
self.alpha = alpha
self.param_groups = self.optimizer.param_groups
self.defaults = self.optimizer.defaults
self.state = defaultdict(dict)
self.fast_state = self.optimizer.state
for group in self.param_groups:
group["counter"] = 0
def update(self, group):
"""@TODO: Docs. Contribution is welcome."""
for fast in group["params"]:
param_state = self.state[fast]
if "slow_param" not in param_state:
param_state["slow_param"] = torch.zeros_like(fast.data)
param_state["slow_param"].copy_(fast.data)
slow = param_state["slow_param"]
slow += (fast.data - slow) * self.alpha
fast.data.copy_(slow)
def update_lookahead(self):
"""@TODO: Docs. Contribution is welcome."""
for group in self.param_groups:
self.update(group)
def step(self, closure: Optional[Callable] = None):
"""Makes optimizer step.
Args:
closure (callable, optional): A closure that reevaluates
the model and returns the loss.
Returns:
computed loss
"""
loss = self.optimizer.step(closure)
for group in self.param_groups:
if group["counter"] == 0:
self.update(group)
group["counter"] += 1
if group["counter"] >= self.k:
group["counter"] = 0
return loss
def state_dict(self):
"""@TODO: Docs. Contribution is welcome."""
fast_state_dict = self.optimizer.state_dict()
slow_state = {
(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()
}
fast_state = fast_state_dict["state"]
param_groups = fast_state_dict["param_groups"]
return {
"fast_state": fast_state,
"slow_state": slow_state,
"param_groups": param_groups,
}
def load_state_dict(self, state_dict):
"""@TODO: Docs. Contribution is welcome."""
slow_state_dict = {
"state": state_dict["slow_state"],
"param_groups": state_dict["param_groups"],
}
fast_state_dict = {
"state": state_dict["fast_state"],
"param_groups": state_dict["param_groups"],
}
super(Lookahead, self).load_state_dict(slow_state_dict)
self.optimizer.load_state_dict(fast_state_dict)
self.fast_state = self.optimizer.state
def add_param_group(self, param_group):
"""@TODO: Docs. Contribution is welcome."""
param_group["counter"] = 0
self.optimizer.add_param_group(param_group)
@classmethod
def get_from_params(
cls, params: Dict, base_optimizer_params: Dict = None, **kwargs
) -> "Lookahead":
"""@TODO: Docs. Contribution is welcome."""
from catalyst.registry import REGISTRY
base_optimizer = REGISTRY.get_from_params(params=params, **base_optimizer_params)
optimizer = cls(optimizer=base_optimizer, **kwargs)
return optimizer
__all__ = ["Lookahead"]
| Lookahead |
python | getsentry__sentry | tests/sentry/search/events/builder/test_span_metrics.py | {
"start": 1364,
"end": 12095
} | class ____(MetricsEnhancedPerformanceTestCase):
@pytest.mark.querybuilder
def test_granularity(self) -> None:
# Need to pick granularity based on the period
def get_granularity(start, end):
params = {
"organization_id": self.organization.id,
"project_id": [self.project.id],
"start": start,
"end": end,
}
query = SpansMetricsQueryBuilder(params)
return query.resolve_split_granularity()
# If we're doing atleast day and its midnight we should use the daily bucket, no granularity splitting happens
start = datetime.datetime(2015, 5, 18, 0, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 19, 0, 0, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 86400, "Granularity at a day at midnight"
assert condition == [], "Condition at a day at midnight"
# If we're doing several days, allow more range
start = datetime.datetime(2015, 5, 18, 0, 10, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 28, 23, 59, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 86400, "Granularity at Several days"
assert condition == [], "Condition at Several days"
# We're doing a long period, use the biggest granularity
start = datetime.datetime(2015, 5, 18, 12, 33, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 7, 28, 17, 22, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 86400, "Granularity at Big Range"
assert condition == [], "Condition at Big Range"
# If we're on the start of the hour we should use the hour granularity
start = datetime.datetime(2015, 5, 18, 23, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 20, 1, 0, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity on the hour"
assert condition == create_condition(
datetime.datetime(2015, 5, 19), datetime.datetime(2015, 5, 20), 3600, 86400
), "Condition, on the hour"
# If we're close to the start of the hour we should use the hour granularity
start = datetime.datetime(2015, 5, 18, 23, 3, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 21, 1, 57, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity on the hour, close"
assert condition == create_condition(
datetime.datetime(2015, 5, 19), datetime.datetime(2015, 5, 21), 3600, 86400
), "Condition on the hour, close"
# A decently long period but not close to hour ends, still use hour bucket
start = datetime.datetime(2015, 5, 18, 23, 3, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 28, 1, 57, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity on the hour, long period"
assert condition == create_condition(
datetime.datetime(2015, 5, 19), datetime.datetime(2015, 5, 28), 3600, 86400
), "Condition on the hour, long period"
# Hour to hour should only happen at the precise hour
start = datetime.datetime(2015, 5, 18, 10, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 0, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity, "Granularity precisely hour to hour"
assert condition == [], "Condition precisely hour to hour"
# Even a few seconds means we need to switch back to minutes since the latter bucket may not be filled
start = datetime.datetime(2015, 5, 18, 10, 0, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 0, 1, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, hour to hour but with seconds"
assert condition == create_condition(
datetime.datetime(2015, 5, 18, 11), datetime.datetime(2015, 5, 18, 18), 60, 3600
), "Condition, hour to hour but with seconds"
# Even though this is >24h of data, because its a random hour in the middle of the day to the next we use minute
# granularity
start = datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 15, 1, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, hour to hour but random minute"
assert condition == create_condition(
datetime.datetime(2015, 5, 18, 11), datetime.datetime(2015, 5, 18, 18), 60, 3600
), "Condition, hour to hour but random minute"
# Less than a minute, no reason to work hard for such a small window, just use a minute
start = datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 10, 15, 34, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 60, "Granularity, less than a minute"
def test_granularity_boundaries(self) -> None:
# Need to pick granularity based on the period
def get_granularity(start, end):
params = {
"organization_id": self.organization.id,
"project_id": [self.project.id],
"start": start,
"end": end,
}
query = SpansMetricsQueryBuilder(params)
return query.resolve_split_granularity()
# See resolve_granularity on the MQB to see what these boundaries are
# Exactly 30d, at the 30 minute boundary
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 31, 0, 30, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 86400, "Granularity, 30d at boundary"
assert condition == [], "Condition, 30d at boundary"
# Near 30d, but 1 hour before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 30, 23, 29, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, near 30d, but 1 hour before boundary for end"
assert condition == create_condition(
datetime.datetime(2015, 5, 2), datetime.datetime(2015, 5, 30), 3600, 86400
), "Condition, near 30d but 1 hour before boundary for end"
# Near 30d, but 1 hour after the boundary for start
start = datetime.datetime(2015, 5, 1, 1, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 31, 0, 30, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, near 30d, but 1 hour before boundary for start"
assert condition == create_condition(
datetime.datetime(2015, 5, 2), datetime.datetime(2015, 5, 31), 3600, 86400
), "Condition, near 30d but 1 hour before boundary for start"
# Exactly 3d
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 4, 0, 30, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 86400, "3d at boundary"
assert condition == []
# Near 3d, but 1 hour before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 13, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 3, 23, 45, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, near 3d, but 1 hour before boundary for end"
assert condition == create_condition(
datetime.datetime(2015, 5, 2), datetime.datetime(2015, 5, 3), 3600, 86400
), "Condition, near 3d but 1 hour before boundary for end"
# Near 3d, but 1 hour after the boundary for start
start = datetime.datetime(2015, 5, 1, 1, 46, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 4, 0, 46, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity is None, "Granularity, near 3d, but 1 hour before boundary for start"
assert condition == create_condition(
datetime.datetime(2015, 5, 2), datetime.datetime(2015, 5, 4), 3600, 86400
), "Condition, near 3d but 1 hour before boundary for start"
# exactly 12 hours
start = datetime.datetime(2015, 5, 1, 0, 15, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 15, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert granularity.granularity == 3600, "Granularity, 12h at boundary"
assert condition == [], "Condition, 12h at boundary"
# Near 12h, but 15 minutes before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 15, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 0, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert (
granularity is None
), "Granularity, 12h at boundary, but 15 min before the boundary for end"
assert condition == create_condition(
datetime.datetime(2015, 5, 1, 1), datetime.datetime(2015, 5, 1, 12), 60, 3600
), "Condition, 12h at boundary, but 15 min before the boundary for end"
# Near 12h, but 15 minutes after the boundary for start
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 15, 0, tzinfo=timezone.utc)
condition, granularity = get_granularity(start, end)
assert (
granularity is None
), "Granularity, 12h at boundary, but 15 min before the boundary for start"
assert condition == create_condition(
datetime.datetime(2015, 5, 1, 1), datetime.datetime(2015, 5, 1, 12), 60, 3600
), "Condition, 12h at boundary, but 15 min before the boundary for start"
| MetricQueryBuilderTest |
python | pytransitions__transitions | tests/utils.py | {
"start": 34,
"end": 2169
} | class ____(object):
is_false = False
is_True = True
def __init__(self, states=None, machine_cls=Machine, extra_kwargs=None):
extra_kwargs = extra_kwargs if extra_kwargs is not None else {}
self.state = None
self.message = None
states = ['A', 'B', 'C', 'D', 'E', 'F'] if states is None else states
args = [self]
kwargs = {
'states': states,
'initial': 'A',
'name': 'Test Machine',
}
kwargs.update(extra_kwargs)
if machine_cls is not None:
self.machine = machine_cls(*args, **kwargs)
self.level = 1
self.transitions = 0
self.machine_cls = machine_cls
@staticmethod
def this_passes():
return True
@staticmethod
def this_fails():
return False
@staticmethod
def this_raises(exception, *args, **kwargs):
raise exception
@staticmethod
def this_fails_by_default(boolean=False):
return boolean
@staticmethod
def extract_boolean(event_data):
return event_data.kwargs['boolean']
def goodbye(self):
self.message = "So long, suckers!"
def hello_world(self):
self.message = "Hello World!"
def greet(self):
self.message = "Hi"
def meet(self):
self.message = "Nice to meet you"
def hello_F(self):
if self.message is None:
self.message = ''
self.message += "Hello F!"
def increase_level(self):
self.level += 1
self.transitions += 1
def decrease_level(self):
self.level -= 1
self.transitions += 1
def set_message(self, message="Hello World!"):
self.message = message
def extract_message(self, event_data):
self.message = event_data.kwargs['message']
def on_enter_E(self, msg=None):
self.message = "I am E!" if msg is None else msg
def on_exit_E(self):
self.exit_message = "E go home..."
def on_enter_F(self):
self.message = "I am F!"
@property
def property_that_fails(self):
return self.is_false
| Stuff |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 174346,
"end": 181724
} | class ____:
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
rng = check_random_state(1234)
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=rng)
assert_equal(shape, x.shape)
def test_moments(self):
rng = check_random_state(1234)
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_pdf_large_x(self):
# Triples are [x, a, logpdf(x, a)]. These values were computed
# using Log[PDF[SkewNormalDistribution[0, 1, a], x]] in Wolfram Alpha.
logpdfvals = [
[40, -1, -1604.834233366398515598970],
[40, -1/2, -1004.142946723741991369168],
[40, 0, -800.9189385332046727417803],
[40, 1/2, -800.2257913526447274323631],
[-40, -1/2, -800.2257913526447274323631],
[-2, 1e7, -2.000000000000199559727173e14],
[2, -1e7, -2.000000000000199559727173e14],
]
for x, a, logpdfval in logpdfvals:
logp = stats.skewnorm.logpdf(x, a)
assert_allclose(logp, logpdfval, rtol=1e-8)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormalDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
@pytest.mark.parametrize('a, moments', _skewnorm_noncentral_moments)
def test_noncentral_moments(self, a, moments):
for order, expected in enumerate(moments, start=1):
mom = stats.skewnorm.moment(order, a)
assert_allclose(mom, expected, rtol=1e-14)
def test_fit(self):
rng = np.random.default_rng(4609813989115202851)
a, loc, scale = -2, 3.5, 0.5 # arbitrary, valid parameters
dist = stats.skewnorm(a, loc, scale)
rvs = dist.rvs(size=100, random_state=rng)
# test that MLE still honors guesses and fixed parameters
a2, loc2, scale2 = stats.skewnorm.fit(rvs, -1.5, floc=3)
a3, loc3, scale3 = stats.skewnorm.fit(rvs, -1.6, floc=3)
assert loc2 == loc3 == 3 # fixed parameter is respected
assert a2 != a3 # different guess -> (slightly) different outcome
# quality of fit is tested elsewhere
# test that MoM honors fixed parameters, accepts (but ignores) guesses
a4, loc4, scale4 = stats.skewnorm.fit(rvs, 3, fscale=3, method='mm')
assert scale4 == 3
# because scale was fixed, only the mean and skewness will be matched
dist4 = stats.skewnorm(a4, loc4, scale4)
res = dist4.stats(moments='ms')
ref = np.mean(rvs), stats.skew(rvs)
assert_allclose(res, ref)
# Test behavior when skew of data is beyond maximum of skewnorm
rvs2 = stats.pareto.rvs(1, size=100, random_state=rng)
# MLE still works
res = stats.skewnorm.fit(rvs2)
assert np.all(np.isfinite(res))
# MoM fits variance and skewness
a5, loc5, scale5 = stats.skewnorm.fit(rvs2, method='mm')
assert np.isinf(a5)
# distribution infrastructure doesn't allow infinite shape parameters
# into _stats; it just bypasses it and produces NaNs. Calculate
# moments manually.
m, v = np.mean(rvs2), np.var(rvs2)
assert_allclose(m, loc5 + scale5 * np.sqrt(2/np.pi))
assert_allclose(v, scale5**2 * (1 - 2 / np.pi))
# test that MLE and MoM behave as expected under sign changes
a6p, loc6p, scale6p = stats.skewnorm.fit(rvs, method='mle')
a6m, loc6m, scale6m = stats.skewnorm.fit(-rvs, method='mle')
assert_allclose([a6m, loc6m, scale6m], [-a6p, -loc6p, scale6p])
a7p, loc7p, scale7p = stats.skewnorm.fit(rvs, method='mm')
a7m, loc7m, scale7m = stats.skewnorm.fit(-rvs, method='mm')
assert_allclose([a7m, loc7m, scale7m], [-a7p, -loc7p, scale7p])
def test_fit_gh19332(self):
# When the skewness of the data was high, `skewnorm.fit` fell back on
# generic `fit` behavior with a bad guess of the skewness parameter.
# Test that this is improved; `skewnorm.fit` is now better at finding
# the global optimum when the sample is highly skewed. See gh-19332.
x = np.array([-5, -1, 1 / 100_000] + 12 * [1] + [5])
params = stats.skewnorm.fit(x)
res = stats.skewnorm.nnlf(params, x)
# Compare overridden fit against generic fit.
# res should be about 32.01, and generic fit is worse at 32.64.
# In case the generic fit improves, remove this assertion (see gh-19333).
params_super = stats.skewnorm.fit(x, superfit=True)
ref = stats.skewnorm.nnlf(params_super, x)
assert res < ref - 0.5
# Compare overridden fit against stats.fit
rng = np.random.default_rng(9842356982345693637)
bounds = {'a': (-5, 5), 'loc': (-10, 10), 'scale': (1e-16, 10)}
def optimizer(fun, bounds):
return differential_evolution(fun, bounds, rng=rng)
fit_result = stats.fit(stats.skewnorm, x, bounds, optimizer=optimizer)
np.testing.assert_allclose(params, fit_result.params, rtol=1e-4)
def test_ppf(self):
# gh-20124 reported that Boost's ppf was wrong for high skewness
# Reference value was calculated using
# N[InverseCDF[SkewNormalDistribution[0, 1, 500], 1/100], 14] in Wolfram Alpha.
assert_allclose(stats.skewnorm.ppf(0.01, 500), 0.012533469508013, rtol=1e-13)
| TestSkewNorm |
python | optuna__optuna | tests/samplers_tests/test_samplers.py | {
"start": 18080,
"end": 38647
} | class ____(BaseSampler):
def __init__(
self,
relative_search_space: dict[str, BaseDistribution],
relative_params: dict[str, Any],
unknown_param_value: Any,
) -> None:
self.relative_search_space = relative_search_space
self.relative_params = relative_params
self.unknown_param_value = unknown_param_value
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> dict[str, BaseDistribution]:
return self.relative_search_space
def sample_relative(
self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution]
) -> dict[str, Any]:
return self.relative_params
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
return self.unknown_param_value
def test_sample_relative() -> None:
relative_search_space: dict[str, BaseDistribution] = {
"a": FloatDistribution(low=0, high=5),
"b": CategoricalDistribution(choices=("foo", "bar", "baz")),
"c": IntDistribution(low=20, high=50), # Not exist in `relative_params`.
}
relative_params = {
"a": 3.2,
"b": "baz",
}
unknown_param_value = 30
sampler = FixedSampler(relative_search_space, relative_params, unknown_param_value)
study = optuna.study.create_study(sampler=sampler)
def objective(trial: Trial) -> float:
# Predefined parameters are sampled by `sample_relative()` method.
assert trial.suggest_float("a", 0, 5) == 3.2
assert trial.suggest_categorical("b", ["foo", "bar", "baz"]) == "baz"
# Other parameters are sampled by `sample_independent()` method.
assert trial.suggest_int("c", 20, 50) == unknown_param_value
assert trial.suggest_float("d", 1, 100, log=True) == unknown_param_value
assert trial.suggest_float("e", 20, 40) == unknown_param_value
return 0.0
study.optimize(objective, n_trials=10, catch=())
for trial in study.trials:
assert trial.params == {"a": 3.2, "b": "baz", "c": 30, "d": 30, "e": 30}
@parametrize_sampler
def test_nan_objective_value(sampler_class: Callable[[], BaseSampler]) -> None:
study = optuna.create_study(sampler=sampler_class())
def objective(trial: Trial, base_value: float) -> float:
return trial.suggest_float("x", 0.1, 0.2) + base_value
# Non NaN objective values.
for i in range(10, 1, -1):
study.optimize(lambda t: objective(t, i), n_trials=1, catch=())
assert int(study.best_value) == 2
# NaN objective values.
study.optimize(lambda t: objective(t, float("nan")), n_trials=1, catch=())
assert int(study.best_value) == 2
# Non NaN objective value.
study.optimize(lambda t: objective(t, 1), n_trials=1, catch=())
assert int(study.best_value) == 1
@parametrize_sampler
def test_partial_fixed_sampling(sampler_class: Callable[[], BaseSampler]) -> None:
study = optuna.create_study(sampler=sampler_class())
def objective(trial: Trial) -> float:
x = trial.suggest_float("x", -1, 1)
y = trial.suggest_int("y", -1, 1)
z = trial.suggest_float("z", -1, 1)
return x + y + z
# First trial.
study.optimize(objective, n_trials=1)
# Second trial. Here, the parameter ``y`` is fixed as 0.
fixed_params = {"y": 0}
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
study.sampler = optuna.samplers.PartialFixedSampler(fixed_params, study.sampler)
study.optimize(objective, n_trials=1)
trial_params = study.trials[-1].params
assert trial_params["y"] == fixed_params["y"]
@parametrize_multi_objective_sampler
@pytest.mark.parametrize(
"distribution",
[
FloatDistribution(-1.0, 1.0),
FloatDistribution(0.0, 1.0),
FloatDistribution(-1.0, 0.0),
FloatDistribution(1e-7, 1.0, log=True),
FloatDistribution(-10, 10, step=0.1),
FloatDistribution(-10.2, 10.2, step=0.1),
IntDistribution(-10, 10),
IntDistribution(0, 10),
IntDistribution(-10, 0),
IntDistribution(-10, 10, step=2),
IntDistribution(0, 10, step=2),
IntDistribution(-10, 0, step=2),
IntDistribution(1, 100, log=True),
CategoricalDistribution((1, 2, 3)),
CategoricalDistribution(("a", "b", "c")),
CategoricalDistribution((1, "a")),
],
)
def test_multi_objective_sample_independent(
multi_objective_sampler_class: Callable[[], BaseSampler], distribution: BaseDistribution
) -> None:
study = optuna.study.create_study(
directions=["minimize", "maximize"], sampler=multi_objective_sampler_class()
)
for i in range(100):
value = study.sampler.sample_independent(
study, _create_new_trial(study), "x", distribution
)
assert distribution._contains(distribution.to_internal_repr(value))
if not isinstance(distribution, CategoricalDistribution):
# Please see https://github.com/optuna/optuna/pull/393 why this assertion is needed.
assert not isinstance(value, np.floating)
if isinstance(distribution, FloatDistribution):
if distribution.step is not None:
# Check the value is a multiple of `distribution.step` which is
# the quantization interval of the distribution.
value -= distribution.low
value /= distribution.step
round_value = np.round(value)
np.testing.assert_almost_equal(round_value, value)
def test_before_trial() -> None:
n_calls = 0
n_trials = 3
class SamplerBeforeTrial(optuna.samplers.RandomSampler):
def before_trial(self, study: Study, trial: FrozenTrial) -> None:
assert len(study.trials) - 1 == trial.number
assert trial.state == TrialState.RUNNING
assert trial.values is None
nonlocal n_calls
n_calls += 1
sampler = SamplerBeforeTrial()
study = optuna.create_study(directions=["minimize", "minimize"], sampler=sampler)
study.optimize(
lambda t: [t.suggest_float("y", -3, 3), t.suggest_int("x", 0, 10)], n_trials=n_trials
)
assert n_calls == n_trials
def test_after_trial() -> None:
n_calls = 0
n_trials = 3
class SamplerAfterTrial(optuna.samplers.RandomSampler):
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
assert len(study.trials) - 1 == trial.number
assert trial.state == TrialState.RUNNING
assert trial.values is None
assert state == TrialState.COMPLETE
assert values is not None
assert len(values) == 2
nonlocal n_calls
n_calls += 1
sampler = SamplerAfterTrial()
study = optuna.create_study(directions=["minimize", "minimize"], sampler=sampler)
study.optimize(lambda t: [t.suggest_float("y", -3, 3), t.suggest_int("x", 0, 10)], n_trials=3)
assert n_calls == n_trials
def test_after_trial_pruning() -> None:
n_calls = 0
n_trials = 3
class SamplerAfterTrial(optuna.samplers.RandomSampler):
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
assert len(study.trials) - 1 == trial.number
assert trial.state == TrialState.RUNNING
assert trial.values is None
assert state == TrialState.PRUNED
assert values is None
nonlocal n_calls
n_calls += 1
sampler = SamplerAfterTrial()
study = optuna.create_study(directions=["minimize", "minimize"], sampler=sampler)
study.optimize(pruned_objective, n_trials=n_trials)
assert n_calls == n_trials
def test_after_trial_failing() -> None:
n_calls = 0
n_trials = 3
class SamplerAfterTrial(optuna.samplers.RandomSampler):
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
assert len(study.trials) - 1 == trial.number
assert trial.state == TrialState.RUNNING
assert trial.values is None
assert state == TrialState.FAIL
assert values is None
nonlocal n_calls
n_calls += 1
sampler = SamplerAfterTrial()
study = optuna.create_study(directions=["minimize", "minimize"], sampler=sampler)
with pytest.raises(ValueError):
study.optimize(fail_objective, n_trials=n_trials)
# Called once after the first failing trial before returning from optimize.
assert n_calls == 1
def test_after_trial_failing_in_after_trial() -> None:
n_calls = 0
n_trials = 3
class SamplerAfterTrialAlwaysFail(optuna.samplers.RandomSampler):
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
nonlocal n_calls
n_calls += 1
raise NotImplementedError # Arbitrary error for testing purpose.
sampler = SamplerAfterTrialAlwaysFail()
study = optuna.create_study(sampler=sampler)
with pytest.raises(NotImplementedError):
study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=n_trials)
assert len(study.trials) == 1
assert n_calls == 1
sampler = SamplerAfterTrialAlwaysFail()
study = optuna.create_study(sampler=sampler)
# Not affected by `catch`.
with pytest.raises(NotImplementedError):
study.optimize(
lambda t: t.suggest_int("x", 0, 10), n_trials=n_trials, catch=(NotImplementedError,)
)
assert len(study.trials) == 1
assert n_calls == 2
def test_after_trial_with_study_tell() -> None:
n_calls = 0
class SamplerAfterTrial(optuna.samplers.RandomSampler):
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
nonlocal n_calls
n_calls += 1
sampler = SamplerAfterTrial()
study = optuna.create_study(sampler=sampler)
assert n_calls == 0
study.tell(study.ask(), 1.0)
assert n_calls == 1
@parametrize_sampler
def test_sample_single_distribution(sampler_class: Callable[[], BaseSampler]) -> None:
relative_search_space = {
"a": CategoricalDistribution([1]),
"b": IntDistribution(low=1, high=1),
"c": IntDistribution(low=1, high=1, log=True),
"d": FloatDistribution(low=1.0, high=1.0),
"e": FloatDistribution(low=1.0, high=1.0, log=True),
"f": FloatDistribution(low=1.0, high=1.0, step=1.0),
}
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = sampler_class()
study = optuna.study.create_study(sampler=sampler)
# We need to test the construction of the model, so we should set `n_trials >= 2`.
for _ in range(2):
trial = study.ask(fixed_distributions=relative_search_space)
study.tell(trial, 1.0)
for param_name in relative_search_space.keys():
assert trial.params[param_name] == 1
@parametrize_sampler
@parametrize_suggest_method("x")
def test_single_parameter_objective(
sampler_class: Callable[[], BaseSampler], suggest_method_x: Callable[[Trial], float]
) -> None:
def objective(trial: Trial) -> float:
return suggest_method_x(trial)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = sampler_class()
study = optuna.study.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
assert len(study.trials) == 10
assert all(t.state == TrialState.COMPLETE for t in study.trials)
@parametrize_sampler
def test_conditional_parameter_objective(sampler_class: Callable[[], BaseSampler]) -> None:
def objective(trial: Trial) -> float:
x = trial.suggest_categorical("x", [True, False])
if x:
return trial.suggest_float("y", 0, 1)
return trial.suggest_float("z", 0, 1)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = sampler_class()
study = optuna.study.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
assert len(study.trials) == 10
assert all(t.state == TrialState.COMPLETE for t in study.trials)
@parametrize_sampler
@parametrize_suggest_method("x")
@parametrize_suggest_method("y")
def test_combination_of_different_distributions_objective(
sampler_class: Callable[[], BaseSampler],
suggest_method_x: Callable[[Trial], float],
suggest_method_y: Callable[[Trial], float],
) -> None:
def objective(trial: Trial) -> float:
return suggest_method_x(trial) + suggest_method_y(trial)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = sampler_class()
study = optuna.study.create_study(sampler=sampler)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
assert all(t.state == TrialState.COMPLETE for t in study.trials)
@parametrize_sampler
@pytest.mark.parametrize(
"second_low,second_high",
[
(0, 5), # Narrow range.
(0, 20), # Expand range.
(20, 30), # Set non-overlapping range.
],
)
def test_dynamic_range_objective(
sampler_class: Callable[[], BaseSampler], second_low: int, second_high: int
) -> None:
def objective(trial: Trial, low: int, high: int) -> float:
v = trial.suggest_float("x", low, high)
v += trial.suggest_int("y", low, high)
return v
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = sampler_class()
study = optuna.study.create_study(sampler=sampler)
study.optimize(lambda t: objective(t, 0, 10), n_trials=10)
study.optimize(lambda t: objective(t, second_low, second_high), n_trials=10)
assert len(study.trials) == 20
assert all(t.state == TrialState.COMPLETE for t in study.trials)
# We add tests for constant objective functions to ensure the reproducibility of sorting.
@parametrize_sampler_with_seed
@pytest.mark.slow
@pytest.mark.parametrize("objective_func", [lambda *args: sum(args), lambda *args: 0.0])
def test_reproducible(sampler_class: Callable[[int], BaseSampler], objective_func: Any) -> None:
def objective(trial: Trial) -> float:
a = trial.suggest_float("a", 1, 9)
b = trial.suggest_float("b", 1, 9, log=True)
c = trial.suggest_float("c", 1, 9, step=1)
d = trial.suggest_int("d", 1, 9)
e = trial.suggest_int("e", 1, 9, log=True)
f = trial.suggest_int("f", 1, 9, step=2)
g = trial.suggest_categorical("g", range(1, 10))
return objective_func(a, b, c, d, e, f, g)
study = optuna.create_study(sampler=sampler_class(1))
study.optimize(objective, n_trials=15)
study_same_seed = optuna.create_study(sampler=sampler_class(1))
study_same_seed.optimize(objective, n_trials=15)
for i in range(15):
assert study.trials[i].params == study_same_seed.trials[i].params
study_different_seed = optuna.create_study(sampler=sampler_class(2))
study_different_seed.optimize(objective, n_trials=15)
assert any(
[study.trials[i].params != study_different_seed.trials[i].params for i in range(15)]
)
@pytest.mark.slow
@parametrize_sampler_with_seed
def test_reseed_rng_change_sampling(sampler_class: Callable[[int], BaseSampler]) -> None:
def objective(trial: Trial) -> float:
a = trial.suggest_float("a", 1, 9)
b = trial.suggest_float("b", 1, 9, log=True)
c = trial.suggest_float("c", 1, 9, step=1)
d = trial.suggest_int("d", 1, 9)
e = trial.suggest_int("e", 1, 9, log=True)
f = trial.suggest_int("f", 1, 9, step=2)
g = trial.suggest_categorical("g", range(1, 10))
return a + b + c + d + e + f + g
sampler = sampler_class(1)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=15)
sampler_different_seed = sampler_class(1)
sampler_different_seed.reseed_rng()
study_different_seed = optuna.create_study(sampler=sampler_different_seed)
study_different_seed.optimize(objective, n_trials=15)
assert any(
[study.trials[i].params != study_different_seed.trials[i].params for i in range(15)]
)
# This function is used only in test_reproducible_in_other_process, but declared at top-level
# because local function cannot be pickled, which occurs within multiprocessing.
def run_optimize(
k: int,
sampler_name: str,
sequence_dict: DictProxy,
hash_dict: DictProxy,
) -> None:
def objective(trial: Trial) -> float:
a = trial.suggest_float("a", 1, 9)
b = trial.suggest_float("b", 1, 9, log=True)
c = trial.suggest_float("c", 1, 9, step=1)
d = trial.suggest_int("d", 1, 9)
e = trial.suggest_int("e", 1, 9, log=True)
f = trial.suggest_int("f", 1, 9, step=2)
g = trial.suggest_categorical("g", range(1, 10))
return a + b + c + d + e + f + g
hash_dict[k] = hash("nondeterministic hash")
sampler = sampler_class_with_seed[sampler_name](1)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=15)
sequence_dict[k] = list(study.trials[-1].params.values())
@pytest.fixture
def unset_seed_in_test(request: SubRequest) -> None:
# Unset the hashseed at beginning and restore it at end regardless of an exception in the test.
# See https://docs.pytest.org/en/stable/how-to/fixtures.html#adding-finalizers-directly
# for details.
hash_seed = os.getenv("PYTHONHASHSEED")
if hash_seed is not None:
del os.environ["PYTHONHASHSEED"]
def restore_seed() -> None:
if hash_seed is not None:
os.environ["PYTHONHASHSEED"] = hash_seed
request.addfinalizer(restore_seed)
@pytest.mark.slow
@parametrize_sampler_name_with_seed
def test_reproducible_in_other_process(sampler_name: str, unset_seed_in_test: None) -> None:
# This test should be tested without `PYTHONHASHSEED`. However, some tool such as tox
# set the environmental variable "PYTHONHASHSEED" by default.
# To do so, this test calls a finalizer: `unset_seed_in_test`.
# Multiprocessing supports three way to start a process.
# We use `spawn` option to create a child process as a fresh python process.
# For more detail, see https://github.com/optuna/optuna/pull/3187#issuecomment-997673037.
multiprocessing.set_start_method("spawn", force=True)
manager = multiprocessing.Manager()
sequence_dict: DictProxy = manager.dict()
hash_dict: DictProxy = manager.dict()
for i in range(3):
p = multiprocessing.Process(
target=run_optimize, args=(i, sampler_name, sequence_dict, hash_dict)
)
p.start()
p.join()
# Hashes are expected to be different because string hashing is nondeterministic per process.
assert not (hash_dict[0] == hash_dict[1] == hash_dict[2])
# But the sequences are expected to be the same.
assert sequence_dict[0] == sequence_dict[1] == sequence_dict[2]
@pytest.mark.parametrize("n_jobs", [1, 2])
@parametrize_relative_sampler
def test_trial_relative_params(
n_jobs: int, relative_sampler_class: Callable[[], BaseSampler]
) -> None:
# TODO(nabenabe): Consider moving this test to study.
sampler = relative_sampler_class()
study = optuna.study.create_study(sampler=sampler)
def objective(trial: Trial) -> float:
assert trial._relative_params is None
trial.suggest_float("x", -10, 10)
trial.suggest_float("y", -10, 10)
assert trial._relative_params is not None
return -1
study.optimize(objective, n_trials=10, n_jobs=n_jobs)
| FixedSampler |
python | getsentry__sentry | src/sentry/integrations/pipeline.py | {
"start": 1900,
"end": 3688
} | class ____(TypedDict):
metadata: dict[str, Any]
name: str
status: int
def ensure_integration(key: str, data: IntegrationData) -> Integration:
defaults: _IntegrationDefaults = {
"metadata": data.get("metadata", {}),
"name": data.get("name", data["external_id"]),
"status": ObjectStatus.ACTIVE,
}
integration, created = Integration.objects.get_or_create(
provider=key, external_id=data["external_id"], defaults=defaults
)
if not created:
integration.update(**defaults)
return integration
def is_violating_region_restriction(organization_id: int, integration_id: int):
"""
Returns True if the organization_id provided does NOT reside within the same region as other
organizations which have installed the provided integration.
"""
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
return False
ois = OrganizationIntegration.objects.filter(integration_id=integration_id)
if len(ois) == 0:
return False
logger_extra = {
"integration_id": integration_id,
"organization_id": organization_id,
}
organization_ids = {oi.organization_id for oi in ois}
region_names = (
OrganizationMapping.objects.filter(organization_id__in=organization_ids)
.values_list("region_name", flat=True)
.distinct()
)
if len(region_names) > 1:
logger.error("region_violation", extra={"regions": region_names, **logger_extra})
try:
mapping = OrganizationMapping.objects.get(organization_id=organization_id)
except OrganizationMapping.DoesNotExist:
logger.exception("mapping_missing", extra=logger_extra)
return True
return mapping.region_name not in region_names
| _IntegrationDefaults |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 14485,
"end": 19468
} | class ____(NonStrictDataModel):
"""
An entire plot (not single datapoint) and it's layout.
Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.
:param timestamp: Epoch milliseconds UTC, will be set by the server if not set.
:type timestamp: float
:param task: Task ID (required)
:type task: str
:param iter: Iteration
:type iter: int
:param metric: Metric name, e.g. 'count', 'loss', 'accuracy'
:type metric: str
:param variant: E.g. 'class_1', 'total', 'average
:type variant: str
:param plot_str: An entire plot (not single datapoint) and it's layout. Used
for plotting ROC curves, confidence matrices, etc. when evaluating the net.
:type plot_str: str
"""
_schema = {
"description": " An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"plot_str": {
"description": "An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.\n ",
"type": "string",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "plot", "description": "'plot'"},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
}
def __init__(
self,
task: str,
timestamp: Optional[float] = None,
iter: Optional[int] = None,
metric: Optional[str] = None,
variant: Optional[str] = None,
plot_str: Optional[str] = None,
**kwargs: Any
) -> None:
super(MetricsPlotEvent, self).__init__(**kwargs)
self.timestamp = timestamp
self.task = task
self.iter = iter
self.metric = metric
self.variant = variant
self.plot_str = plot_str
@schema_property("timestamp")
def timestamp(self) -> Optional[float]:
return self._property_timestamp
@timestamp.setter
def timestamp(self, value: Optional[float]) -> None:
if value is None:
self._property_timestamp = None
return
self.assert_isinstance(value, "timestamp", six.integer_types + (float,))
self._property_timestamp = value
@schema_property("type")
def type(self) -> Any:
return "plot"
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iter")
def iter(self) -> Optional[int]:
return self._property_iter
@iter.setter
def iter(self, value: Optional[int]) -> None:
if value is None:
self._property_iter = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iter", six.integer_types)
self._property_iter = value
@schema_property("metric")
def metric(self) -> Optional[str]:
return self._property_metric
@metric.setter
def metric(self, value: Optional[str]) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variant")
def variant(self) -> Optional[str]:
return self._property_variant
@variant.setter
def variant(self, value: Optional[str]) -> None:
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
@schema_property("plot_str")
def plot_str(self) -> Optional[str]:
return self._property_plot_str
@plot_str.setter
def plot_str(self, value: Optional[str]) -> None:
if value is None:
self._property_plot_str = None
return
self.assert_isinstance(value, "plot_str", six.string_types)
self._property_plot_str = value
| MetricsPlotEvent |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC403_google.py | {
"start": 285,
"end": 1106
} | class ____:
# DOC403
def foo(self) -> str:
"""
Do something
Args:
num (int): A number
Yields:
str: A string
"""
print('test')
# OK
def bar(self) -> str:
"""
Do something
Args:
num (int): A number
"""
print('test')
import typing
# OK
def foo() -> typing.Generator[None, None, None]:
"""
Do something
Yields:
When X.
"""
yield
# OK
def foo() -> typing.Generator[None, None, None]:
"""
Do something
Yields:
When X.
"""
yield None
# OK
def foo():
"""
Do something
Yields:
When X.
"""
yield
# OK
def foo():
"""
Do something
Yields:
When X.
"""
yield None
| Bar |
python | boto__boto3 | tests/unit/s3/test_inject.py | {
"start": 752,
"end": 2425
} | class ____(unittest.TestCase):
def test_inject_upload_download_file_to_client(self):
class_attributes = {}
inject.inject_s3_transfer_methods(class_attributes=class_attributes)
assert 'upload_file' in class_attributes
assert 'download_file' in class_attributes
def test_upload_file_proxies_to_transfer_object(self):
with mock.patch('boto3.s3.inject.S3Transfer') as transfer:
inject.upload_file(
mock.sentinel.CLIENT,
Filename='filename',
Bucket='bucket',
Key='key',
)
transfer_in_context_manager = (
transfer.return_value.__enter__.return_value
)
transfer_in_context_manager.upload_file.assert_called_with(
filename='filename',
bucket='bucket',
key='key',
extra_args=None,
callback=None,
)
def test_download_file_proxies_to_transfer_object(self):
with mock.patch('boto3.s3.inject.S3Transfer') as transfer:
inject.download_file(
mock.sentinel.CLIENT,
Bucket='bucket',
Key='key',
Filename='filename',
)
transfer_in_context_manager = (
transfer.return_value.__enter__.return_value
)
transfer_in_context_manager.download_file.assert_called_with(
bucket='bucket',
key='key',
filename='filename',
extra_args=None,
callback=None,
)
| TestInjectTransferMethods |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 30747,
"end": 42570
} | class ____(SwitchTransformersPreTrainedModel):
_can_record_outputs = {
"hidden_states": SwitchTransformersBlock,
"attentions": OutputRecorder(SwitchTransformersAttention, index=-1, layer_name="layer.0"),
"cross_attentions": OutputRecorder(SwitchTransformersAttention, index=-1, layer_name="layer.1"),
"router_logits": SwitchTransformersTop1Router,
}
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
self.is_decoder = config.is_decoder
sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step
config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers
self.block = nn.ModuleList()
for i in range(config.num_layers):
is_sparse = (i % sparse_step == 1 or sparse_step == 1) if sparse_step > 0 else False
self.block.append(
SwitchTransformersBlock(
config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse, layer_idx=i
)
)
self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.post_init()
self.gradient_checkpointing = False
@check_model_inputs()
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
past_key_values=None,
use_cache=None,
cache_position=None,
**kwargs: Unpack[TransformersKwargs],
):
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError("You have to initialize the model with valid token embeddings")
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = inputs_embeds.shape[:2]
if use_cache is True:
if not self.is_decoder:
raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
if self.is_decoder:
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(
DynamicCache(config=self.config), DynamicCache(config=self.config)
)
else:
past_key_values = DynamicCache(config=self.config)
elif not self.is_decoder:
# do not pass cache object down the line for encoder stack
# it messes indexing later in decoder-stack because cache object is modified in-place
past_key_values = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if self.config.is_decoder:
causal_mask = self._update_causal_mask(
attention_mask,
inputs_embeds,
cache_position,
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values,
)
else:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, layer_module in enumerate(self.block):
hidden_states = layer_module(
hidden_states,
causal_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return MoEModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring
| SwitchTransformersStack |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 13342,
"end": 13658
} | class ____(InlineProcessor):
""" Return a simple text of `group(1)` of a Pattern. """
def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]:
""" Return string content of `group(1)` of a matching pattern. """
return m.group(1), m.start(0), m.end(0)
| SimpleTextInlineProcessor |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 3233,
"end": 5294
} | class ____(TestCase):
def test_required(self) -> None:
class Schema(Config):
option = c.Choice(('python', 'node'))
conf = self.get_config(Schema, {'option': 'python'})
assert_type(conf.option, str)
self.assertEqual(conf.option, 'python')
def test_optional(self) -> None:
class Schema(Config):
option = c.Optional(c.Choice(('python', 'node')))
conf = self.get_config(Schema, {'option': 'python'})
assert_type(conf.option, Optional[str])
self.assertEqual(conf.option, 'python')
conf = self.get_config(Schema, {})
self.assertEqual(conf.option, None)
conf = self.get_config(Schema, {'option': None})
self.assertEqual(conf.option, None)
def test_default(self) -> None:
class Schema(Config):
option = c.Choice(('a', 'b', 'c'), default='b')
conf = self.get_config(Schema, {})
assert_type(conf.option, str)
self.assertEqual(conf.option, 'b')
conf = self.get_config(Schema, {})
self.assertEqual(conf.option, 'b')
conf = self.get_config(Schema, {'option': None})
self.assertEqual(conf.option, 'b')
with self.expect_error(option="Expected one of: ('a', 'b', 'c') but received: 'go'"):
self.get_config(Schema, {'option': 'go'})
def test_invalid_default(self) -> None:
with self.assertRaises(ValueError):
c.Choice(('a', 'b'), default='c')
with self.assertRaises(ValueError):
c.Choice(('a', 'b'), default='c', required=True)
def test_invalid_choice(self) -> None:
class Schema(Config):
option = c.Choice(('python', 'node'))
with self.expect_error(option="Expected one of: ('python', 'node') but received: 'go'"):
self.get_config(Schema, {'option': 'go'})
def test_invalid_choices(self) -> None:
with self.assertRaises(ValueError):
c.Choice('')
with self.assertRaises(ValueError):
c.Choice([])
| ChoiceTest |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/translate.py | {
"start": 1574,
"end": 5836
} | class ____:
"""
Translates polars-internal IR nodes and expressions to our representation.
Parameters
----------
visitor
Polars NodeTraverser object
engine
GPU engine configuration.
"""
def __init__(self, visitor: NodeTraverser, engine: GPUEngine):
self.visitor = visitor
self.config_options = config.ConfigOptions.from_polars_engine(engine)
self.errors: list[Exception] = []
self._cache_nodes: dict[int, ir.Cache] = {}
self._expr_context: ExecutionContext = ExecutionContext.FRAME
def translate_ir(self, *, n: int | None = None) -> ir.IR:
"""
Translate a polars-internal IR node to our representation.
Parameters
----------
visitor
Polars NodeTraverser object
n
Optional node to start traversing from, if not provided uses
current polars-internal node.
Returns
-------
Translated IR object
Raises
------
NotImplementedError
If the version of Polars IR is unsupported.
Notes
-----
Any expression nodes that cannot be translated are replaced by
:class:`expr.ErrorNode` nodes and collected in the the `errors` attribute.
After translation is complete, this list of errors should be inspected
to determine if the query is supported.
"""
ctx: AbstractContextManager[None] = (
set_node(self.visitor, n) if n is not None else noop_context
)
# IR is versioned with major.minor, minor is bumped for backwards
# compatible changes (e.g. adding new nodes), major is bumped for
# incompatible changes (e.g. renaming nodes).
if (version := self.visitor.version()) >= (10, 1):
e = NotImplementedError(
f"No support for polars IR {version=}"
) # pragma: no cover; no such version for now.
self.errors.append(e) # pragma: no cover
raise e # pragma: no cover
with ctx:
polars_schema = self.visitor.get_schema()
try:
schema = {k: DataType(v) for k, v in polars_schema.items()}
except Exception as e:
self.errors.append(NotImplementedError(str(e)))
return ir.ErrorNode({}, str(e))
try:
node = self.visitor.view_current_node()
except Exception as e:
self.errors.append(e)
return ir.ErrorNode(schema, str(e))
try:
result = _translate_ir(node, self, schema)
except Exception as e:
self.errors.append(e)
return ir.ErrorNode(schema, str(e))
if any(
isinstance(dtype, pl.Null)
for dtype in pl.datatypes.unpack_dtypes(*polars_schema.values())
):
error = NotImplementedError(
f"No GPU support for {result} with Null column dtype."
)
self.errors.append(error)
return ir.ErrorNode(schema, str(error))
return result
def translate_expr(self, *, n: int, schema: Schema) -> expr.Expr:
"""
Translate a polars-internal expression IR into our representation.
Parameters
----------
n
Node to translate, an integer referencing a polars internal node.
schema
Schema of the IR node this expression uses as evaluation context.
Returns
-------
Translated IR object.
Notes
-----
Any expression nodes that cannot be translated are replaced by
:class:`expr.ErrorExpr` nodes and collected in the the `errors` attribute.
After translation is complete, this list of errors should be inspected
to determine if the query is supported.
"""
node = self.visitor.view_expression(n)
dtype = DataType(self.visitor.get_dtype(n))
try:
return _translate_expr(node, self, dtype, schema)
except Exception as e:
self.errors.append(e)
return expr.ErrorExpr(dtype, str(e))
| Translator |
python | pandas-dev__pandas | pandas/tests/io/json/test_ujson.py | {
"start": 26855,
"end": 35202
} | class ____:
def test_dataframe(self, orient):
dtype = np.int64
df = DataFrame(
[[1, 2, 3], [4, 5, 6]],
index=["a", "b"],
columns=["x", "y", "z"],
dtype=dtype,
)
encode_kwargs = {} if orient is None else {"orient": orient}
assert (df.dtypes == dtype).all()
output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs))
assert (df.dtypes == dtype).all()
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
assert (df.dtypes == dtype).all()
tm.assert_frame_equal(output, df)
def test_dataframe_nested(self, orient):
df = DataFrame(
[[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"]
)
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else {"orient": orient}
exp = {
"df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
"df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
}
assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
def test_series(self, orient):
dtype = np.int64
s = Series(
[10, 20, 30, 40, 50, 60],
name="series",
index=[6, 7, 8, 9, 10, 15],
dtype=dtype,
).sort_values()
assert s.dtype == dtype
encode_kwargs = {} if orient is None else {"orient": orient}
output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs))
assert s.dtype == dtype
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
assert s.dtype == dtype
tm.assert_series_equal(output, s)
def test_series_nested(self, orient):
s = Series(
[10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15]
).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else {"orient": orient}
exp = {
"s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
"s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
}
assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(
ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index"
)
tm.assert_index_equal(i, output)
output = Index(
ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index"
)
tm.assert_index_equal(i, output)
output = Index(
ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index"
)
tm.assert_index_equal(i, output)
def test_datetime_index(self):
date_unit = "ns"
# freq doesn't round-trip
rng = DatetimeIndex(
list(date_range("1/1/2000", periods=20, unit="ns")), freq=None
)
encoded = ujson.ujson_dumps(rng, date_unit=date_unit)
decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit)))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
@pytest.mark.parametrize(
"invalid_arr",
[
"[31337,]", # Trailing comma.
"[,31337]", # Leading comma.
"[]]", # Unmatched bracket.
"[,]", # Only comma.
],
)
def test_decode_invalid_array(self, invalid_arr):
msg = (
"Expected object or value|Trailing data|"
"Unexpected character found when decoding array value"
)
with pytest.raises(ValueError, match=msg):
ujson.ujson_loads(invalid_arr)
@pytest.mark.parametrize("arr", [[], [31337]])
def test_decode_array(self, arr):
assert arr == ujson.ujson_loads(str(arr))
@pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808])
def test_decode_extreme_numbers(self, extreme_num):
assert extreme_num == ujson.ujson_loads(str(extreme_num))
@pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-(2**63) - 1}"])
def test_decode_too_extreme_numbers(self, too_extreme_num):
with pytest.raises(
ValueError,
match="Value is too big|Value is too small",
):
ujson.ujson_loads(too_extreme_num)
def test_decode_with_trailing_whitespaces(self):
assert {} == ujson.ujson_loads("{}\n\t ")
def test_decode_with_trailing_non_whitespaces(self):
with pytest.raises(ValueError, match="Trailing data"):
ujson.ujson_loads("{}\n\t a")
@pytest.mark.parametrize("value", [f"{2**64}", f"{-(2**63) - 1}"])
def test_decode_array_with_big_int(self, value):
with pytest.raises(
ValueError,
match="Value is too big|Value is too small",
):
ujson.ujson_loads(value)
@pytest.mark.parametrize(
"float_number",
[
1.1234567893,
1.234567893,
1.34567893,
1.4567893,
1.567893,
1.67893,
1.7893,
1.893,
1.3,
],
)
@pytest.mark.parametrize("sign", [-1, 1])
def test_decode_floating_point(self, sign, float_number):
float_number *= sign
tm.assert_almost_equal(
float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15
)
def test_encode_big_set(self):
s = set(range(100000))
# Make sure no Exception is raised.
ujson.ujson_dumps(s)
def test_encode_empty_set(self):
assert "[]" == ujson.ujson_dumps(set())
def test_encode_set(self):
s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
enc = ujson.ujson_dumps(s)
dec = ujson.ujson_loads(enc)
for v in dec:
assert v in s
@pytest.mark.parametrize(
"td",
[
Timedelta(days=366),
Timedelta(days=-1),
Timedelta(hours=13, minutes=5, seconds=5),
Timedelta(hours=13, minutes=20, seconds=30),
Timedelta(days=-1, nanoseconds=5),
Timedelta(nanoseconds=1),
Timedelta(microseconds=1, nanoseconds=1),
Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),
Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),
],
)
def test_encode_timedelta_iso(self, td):
# GH 28256
result = ujson.ujson_dumps(td, iso_dates=True)
expected = f'"{td.isoformat()}"'
assert result == expected
def test_encode_periodindex(self):
# GH 46683
p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
df = DataFrame(index=p)
assert df.to_json() == "{}"
| TestPandasJSONTests |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/test_module_loading.py | {
"start": 894,
"end": 1778
} | class ____:
@pytest.mark.parametrize(
("path", "expected"),
[
pytest.param("valid_path", True, id="module_no_dots"),
pytest.param("valid.dot.path", True, id="standard_dotpath"),
pytest.param("package.sub_package.module", True, id="dotpath_with_underscores"),
pytest.param("MyPackage.MyClass", True, id="mixed_case_path"),
pytest.param("invalid..path", False, id="consecutive_dots_fails"),
pytest.param(".invalid.path", False, id="leading_dot_fails"),
pytest.param("invalid.path.", False, id="trailing_dot_fails"),
pytest.param("1invalid.path", False, id="leading_number_fails"),
pytest.param(42, False, id="not_a_string"),
],
)
def test_is_valid_dotpath(self, path, expected):
assert is_valid_dotpath(path) == expected
| TestModuleLoading |
python | chroma-core__chroma | chromadb/segment/impl/vector/brute_force_index.py | {
"start": 331,
"end": 5420
} | class ____:
"""A lightweight, numpy based brute force index that is used for batches that have not been indexed into hnsw yet. It is not
thread safe and callers should ensure that only one thread is accessing it at a time.
"""
id_to_index: Dict[str, int]
index_to_id: Dict[int, str]
id_to_seq_id: Dict[str, int]
deleted_ids: Set[str]
free_indices: List[int]
size: int
dimensionality: int
distance_fn: Callable[[npt.NDArray[Any], npt.NDArray[Any]], float]
vectors: npt.NDArray[Any]
def __init__(self, size: int, dimensionality: int, space: str = "l2"):
if space == "l2":
self.distance_fn = distance_functions.l2
elif space == "ip":
self.distance_fn = distance_functions.ip
elif space == "cosine":
self.distance_fn = distance_functions.cosine
else:
raise Exception(f"Unknown distance function: {space}")
self.id_to_index = {}
self.index_to_id = {}
self.id_to_seq_id = {}
self.deleted_ids = set()
self.free_indices = list(range(size))
self.size = size
self.dimensionality = dimensionality
self.vectors = np.zeros((size, dimensionality))
def __len__(self) -> int:
return len(self.id_to_index)
def clear(self) -> None:
self.id_to_index = {}
self.index_to_id = {}
self.id_to_seq_id = {}
self.deleted_ids.clear()
self.free_indices = list(range(self.size))
self.vectors.fill(0)
def upsert(self, records: List[LogRecord]) -> None:
if len(records) + len(self) > self.size:
raise Exception(
"Index with capacity {} and {} current entries cannot add {} records".format(
self.size, len(self), len(records)
)
)
for i, record in enumerate(records):
id = record["record"]["id"]
vector = record["record"]["embedding"]
self.id_to_seq_id[id] = record["log_offset"]
if id in self.deleted_ids:
self.deleted_ids.remove(id)
# TODO: It may be faster to use multi-index selection on the vectors array
if id in self.id_to_index:
# Update
index = self.id_to_index[id]
self.vectors[index] = vector
else:
# Add
next_index = self.free_indices.pop()
self.id_to_index[id] = next_index
self.index_to_id[next_index] = id
self.vectors[next_index] = vector
def delete(self, records: List[LogRecord]) -> None:
for record in records:
id = record["record"]["id"]
if id in self.id_to_index:
index = self.id_to_index[id]
self.deleted_ids.add(id)
del self.id_to_index[id]
del self.index_to_id[index]
del self.id_to_seq_id[id]
self.vectors[index].fill(np.nan)
self.free_indices.append(index)
else:
logger.warning(f"Delete of nonexisting embedding ID: {id}")
def has_id(self, id: str) -> bool:
"""Returns whether the index contains the given ID"""
return id in self.id_to_index and id not in self.deleted_ids
def get_vectors(
self, ids: Optional[Sequence[str]] = None
) -> Sequence[VectorEmbeddingRecord]:
target_ids = ids or self.id_to_index.keys()
return [
VectorEmbeddingRecord(
id=id,
embedding=self.vectors[self.id_to_index[id]],
)
for id in target_ids
]
def query(self, query: VectorQuery) -> Sequence[Sequence[VectorQueryResult]]:
np_query = np.array(query["vectors"], dtype=np.float32)
allowed_ids = (
None if query["allowed_ids"] is None else set(query["allowed_ids"])
)
distances = np.apply_along_axis(
lambda query: np.apply_along_axis(self.distance_fn, 1, self.vectors, query),
1,
np_query,
)
indices = np.argsort(distances)
# Filter out deleted labels
filtered_results = []
for i, index_list in enumerate(indices):
curr_results = []
for j in index_list:
# If the index is in the index_to_id map, then it has been added
if j in self.index_to_id:
id = self.index_to_id[j]
if id not in self.deleted_ids and (
allowed_ids is None or id in allowed_ids
):
curr_results.append(
VectorQueryResult(
id=id,
distance=distances[i][j].item(),
embedding=self.vectors[j],
)
)
filtered_results.append(curr_results)
return filtered_results
| BruteForceIndex |
python | pytorch__pytorch | torch/_export/serde/serialize.py | {
"start": 2636,
"end": 5227
} | class ____(RuntimeError):
pass
def _reverse_map(d: dict[Any, Enum]):
return {v.value: k for k, v in d.items()}
MetaType = Union[
FakeTensor,
int,
torch.SymInt,
float,
torch.SymFloat,
bool,
torch.SymBool,
ep.CustomObjArgument,
]
DEFAULT_PICKLE_PROTOCOL = 2
ST_DELIMITER = ";"
_TORCH_TO_SERIALIZE_DTYPE = {
torch.uint8: ScalarType.BYTE,
torch.int8: ScalarType.CHAR,
torch.uint16: ScalarType.UINT16,
torch.int16: ScalarType.SHORT,
torch.int32: ScalarType.INT,
torch.int64: ScalarType.LONG,
torch.float16: ScalarType.HALF,
torch.float32: ScalarType.FLOAT,
torch.float64: ScalarType.DOUBLE,
torch.complex32: ScalarType.COMPLEXHALF,
torch.complex64: ScalarType.COMPLEXFLOAT,
torch.complex128: ScalarType.COMPLEXDOUBLE,
torch.bool: ScalarType.BOOL,
torch.bfloat16: ScalarType.BFLOAT16,
torch.float8_e4m3fn: ScalarType.FLOAT8E4M3FN,
torch.float8_e5m2: ScalarType.FLOAT8E5M2,
torch.float8_e4m3fnuz: ScalarType.FLOAT8E4M3FNUZ,
torch.float8_e5m2fnuz: ScalarType.FLOAT8E5M2FNUZ,
}
_SERIALIZE_TO_TORCH_DTYPE = _reverse_map(_TORCH_TO_SERIALIZE_DTYPE) # type: ignore[arg-type]
_TORCH_TO_SERIALIZE_LAYOUT = {
torch.sparse_coo: Layout.SparseCoo,
torch.sparse_csr: Layout.SparseCsr,
torch.sparse_csc: Layout.SparseCsc,
torch.sparse_bsr: Layout.SparseBsr,
torch.sparse_bsc: Layout.SparseBsc,
torch._mkldnn: Layout._mkldnn, # type: ignore[attr-defined]
torch.strided: Layout.Strided,
}
_SERIALIZE_TO_TORCH_LAYOUT = _reverse_map(_TORCH_TO_SERIALIZE_LAYOUT) # type: ignore[arg-type]
_TORCH_TO_SERIALIZE_MEMORY_FORMAT = {
torch.contiguous_format: MemoryFormat.ContiguousFormat,
torch.channels_last: MemoryFormat.ChannelsLast,
torch.channels_last_3d: MemoryFormat.ChannelsLast3d,
torch.preserve_format: MemoryFormat.PreserveFormat,
}
_SERIALIZE_TO_TORCH_MEMORY_FORMAT = _reverse_map(_TORCH_TO_SERIALIZE_MEMORY_FORMAT) # type: ignore[arg-type]
_SYM_OPS = {
operator.eq,
operator.ne,
operator.le,
operator.ge,
operator.lt,
operator.gt,
operator.neg,
operator.pos,
operator.and_,
operator.or_,
math.trunc,
torch.sym_not,
operator.mul,
operator.add,
operator.sub,
operator.floordiv,
operator.mod,
operator.pow,
torch.sym_int,
torch.sym_float,
torch.sym_ite,
torch.sym_max,
torch.sym_min,
torch.sym_sqrt,
operator.truediv,
operator.and_,
}
assert not any(isinstance(op, torch._ops.OpOverload) for op in _SYM_OPS)
@dataclass
| SerializeError |
python | huggingface__transformers | tests/models/focalnet/test_modeling_focalnet.py | {
"start": 1538,
"end": 8504
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=2,
num_channels=3,
embed_dim=16,
hidden_sizes=[32, 64, 128],
depths=[1, 2, 1],
num_heads=[2, 2, 4],
window_size=2,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=True,
type_sequence_label_size=10,
encoder_stride=8,
out_features=["stage1", "stage2"],
out_indices=[1, 2],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.hidden_sizes = hidden_sizes
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return FocalNetConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
num_heads=self.num_heads,
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
use_absolute_embeddings=self.use_absolute_embeddings,
path_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = FocalNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def create_and_check_backbone(self, config, pixel_values, labels):
model = FocalNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1])
# verify backbone works with out_features=None
config.out_features = None
model = FocalNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels):
model = FocalNetForMaskedImageModeling(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size)
)
# test greyscale images
config.num_channels = 1
model = FocalNetForMaskedImageModeling(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = FocalNetForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
config.num_channels = 1
model = FocalNetForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| FocalNetModelTester |
python | pennersr__django-allauth | allauth/socialaccount/providers/meetup/provider.py | {
"start": 268,
"end": 640
} | class ____(OAuth2Provider):
id = "meetup"
name = "Meetup"
account_class = MeetupAccount
oauth2_adapter_class = MeetupOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(email=data.get("email"), name=data.get("name"))
provider_classes = [MeetupProvider]
| MeetupProvider |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 338636,
"end": 339891
} | class ____(Request):
"""
Convert public tasks to private
:param ids: Ids of the tasks to convert. Only the tasks originated by the
company can be converted
:type ids: Sequence[str]
"""
_service = "tasks"
_action = "make_private"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the tasks to convert. Only the tasks originated by the company can be converted",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePrivateRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| MakePrivateRequest |
python | sympy__sympy | sympy/stats/drv_types.py | {
"start": 14464,
"end": 16889
} | class ____(SingleDiscreteDistribution):
_argnames = ('mu1', 'mu2')
set = S.Integers
@staticmethod
def check(mu1, mu2):
_value_check(mu1 >= 0, 'Parameter mu1 must be >= 0')
_value_check(mu2 >= 0, 'Parameter mu2 must be >= 0')
def pdf(self, k):
(mu1, mu2) = (self.mu1, self.mu2)
term1 = exp(-(mu1 + mu2)) * (mu1 / mu2) ** (k / 2)
term2 = besseli(k, 2 * sqrt(mu1 * mu2))
return term1 * term2
def _cdf(self, x):
raise NotImplementedError(
"Skellam doesn't have closed form for the CDF.")
def _characteristic_function(self, t):
(mu1, mu2) = (self.mu1, self.mu2)
return exp(-(mu1 + mu2) + mu1 * exp(I * t) + mu2 * exp(-I * t))
def _moment_generating_function(self, t):
(mu1, mu2) = (self.mu1, self.mu2)
return exp(-(mu1 + mu2) + mu1 * exp(t) + mu2 * exp(-t))
def Skellam(name, mu1, mu2):
r"""
Create a discrete random variable with a Skellam distribution.
Explanation
===========
The Skellam is the distribution of the difference N1 - N2
of two statistically independent random variables N1 and N2
each Poisson-distributed with respective expected values mu1 and mu2.
The density of the Skellam distribution is given by
.. math::
f(k) := e^{-(\mu_1+\mu_2)}(\frac{\mu_1}{\mu_2})^{k/2}I_k(2\sqrt{\mu_1\mu_2})
Parameters
==========
mu1 : A non-negative value
mu2 : A non-negative value
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Skellam, density, E, variance
>>> from sympy import Symbol, pprint
>>> z = Symbol("z", integer=True)
>>> mu1 = Symbol("mu1", positive=True)
>>> mu2 = Symbol("mu2", positive=True)
>>> X = Skellam("x", mu1, mu2)
>>> pprint(density(X)(z), use_unicode=False)
z
-
2
/mu1\ -mu1 - mu2 / _____ _____\
|---| *e *besseli\z, 2*\/ mu1 *\/ mu2 /
\mu2/
>>> E(X)
mu1 - mu2
>>> variance(X).expand()
mu1 + mu2
References
==========
.. [1] https://en.wikipedia.org/wiki/Skellam_distribution
"""
return rv(name, SkellamDistribution, mu1, mu2)
#-------------------------------------------------------------------------------
# Yule-Simon distribution ------------------------------------------------------------
| SkellamDistribution |
python | encode__starlette | starlette/datastructures.py | {
"start": 6988,
"end": 7837
} | class ____(Sequence[str]):
def __init__(self, value: str | Sequence[str]):
if isinstance(value, str):
splitter = shlex(value, posix=True)
splitter.whitespace = ","
splitter.whitespace_split = True
self._items = [item.strip() for item in splitter]
else:
self._items = list(value)
def __len__(self) -> int:
return len(self._items)
def __getitem__(self, index: int | slice) -> Any:
return self._items[index]
def __iter__(self) -> Iterator[str]:
return iter(self._items)
def __repr__(self) -> str:
class_name = self.__class__.__name__
items = [item for item in self]
return f"{class_name}({items!r})"
def __str__(self) -> str:
return ", ".join(repr(item) for item in self)
| CommaSeparatedStrings |
python | python-pillow__Pillow | src/PIL/BmpImagePlugin.py | {
"start": 16772,
"end": 19855
} | class ____(BmpImageFile):
format = "DIB"
format_description = "Windows Bitmap"
def _open(self) -> None:
self._bitmap()
#
# --------------------------------------------------------------------
# Write BMP file
SAVE = {
"1": ("1", 1, 2),
"L": ("L", 8, 256),
"P": ("P", 8, 256),
"RGB": ("BGR", 24, 0),
"RGBA": ("BGRA", 32, 0),
}
def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, False)
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
) -> None:
try:
rawmode, bits, colors = SAVE[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as BMP"
raise OSError(msg) from e
info = im.encoderinfo
dpi = info.get("dpi", (96, 96))
# 1 meter == 39.3701 inches
ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
header = 40 # or 64 for OS/2 version 2
image = stride * im.size[1]
if im.mode == "1":
palette = b"".join(o8(i) * 3 + b"\x00" for i in (0, 255))
elif im.mode == "L":
palette = b"".join(o8(i) * 3 + b"\x00" for i in range(256))
elif im.mode == "P":
palette = im.im.getpalette("RGB", "BGRX")
colors = len(palette) // 4
else:
palette = None
# bitmap header
if bitmap_header:
offset = 14 + header + colors * 4
file_size = offset + image
if file_size > 2**32 - 1:
msg = "File size is too large for the BMP format"
raise ValueError(msg)
fp.write(
b"BM" # file type (magic)
+ o32(file_size) # file size
+ o32(0) # reserved
+ o32(offset) # image data offset
)
# bitmap info header
fp.write(
o32(header) # info header size
+ o32(im.size[0]) # width
+ o32(im.size[1]) # height
+ o16(1) # planes
+ o16(bits) # depth
+ o32(0) # compression (0=uncompressed)
+ o32(image) # size of bitmap
+ o32(ppm[0]) # resolution
+ o32(ppm[1]) # resolution
+ o32(colors) # colors used
+ o32(colors) # colors important
)
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
if palette:
fp.write(palette)
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]
)
#
# --------------------------------------------------------------------
# Registry
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
Image.register_save(BmpImageFile.format, _save)
Image.register_extension(BmpImageFile.format, ".bmp")
Image.register_mime(BmpImageFile.format, "image/bmp")
Image.register_decoder("bmp_rle", BmpRleDecoder)
Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
Image.register_save(DibImageFile.format, _dib_save)
Image.register_extension(DibImageFile.format, ".dib")
Image.register_mime(DibImageFile.format, "image/bmp")
| DibImageFile |
python | aio-libs__aiohttp | aiohttp/abc.py | {
"start": 3555,
"end": 3972
} | class ____(ABC):
"""Abstract DNS resolver."""
@abstractmethod
async def resolve(
self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET
) -> list[ResolveResult]:
"""Return IP address for given hostname"""
@abstractmethod
async def close(self) -> None:
"""Release resolver"""
ClearCookiePredicate = Callable[[Morsel[str]], bool]
| AbstractResolver |
python | explosion__spaCy | spacy/lang/am/__init__.py | {
"start": 324,
"end": 734
} | class ____(BaseDefaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = lambda text: "am"
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
suffixes = TOKENIZER_SUFFIXES
writing_system = {"direction": "ltr", "has_case": False, "has_letters": True}
| AmharicDefaults |
python | langchain-ai__langchain | libs/core/langchain_core/tools/structured.py | {
"start": 844,
"end": 9602
} | class ____(BaseTool):
"""Tool that can operate on any number of inputs."""
description: str = ""
args_schema: Annotated[ArgsSchema, SkipValidation()] = Field(
..., description="The tool schema."
)
"""The input arguments' schema."""
func: Callable[..., Any] | None = None
"""The function to run when the tool is called."""
coroutine: Callable[..., Awaitable[Any]] | None = None
"""The asynchronous version of the function."""
# --- Runnable ---
# TODO: Is this needed?
@override
async def ainvoke(
self,
input: str | dict | ToolCall,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
if not self.coroutine:
# If the tool does not implement async, fall back to default implementation
return await run_in_executor(config, self.invoke, input, config, **kwargs)
return await super().ainvoke(input, config, **kwargs)
# --- Tool ---
def _run(
self,
*args: Any,
config: RunnableConfig,
run_manager: CallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> Any:
"""Use the tool.
Args:
*args: Positional arguments to pass to the tool
config: Configuration for the run
run_manager: Optional callback manager to use for the run
**kwargs: Keyword arguments to pass to the tool
Returns:
The result of the tool execution
"""
if self.func:
if run_manager and signature(self.func).parameters.get("callbacks"):
kwargs["callbacks"] = run_manager.get_child()
if config_param := _get_runnable_config_param(self.func):
kwargs[config_param] = config
return self.func(*args, **kwargs)
msg = "StructuredTool does not support sync invocation."
raise NotImplementedError(msg)
async def _arun(
self,
*args: Any,
config: RunnableConfig,
run_manager: AsyncCallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> Any:
"""Use the tool asynchronously.
Args:
*args: Positional arguments to pass to the tool
config: Configuration for the run
run_manager: Optional callback manager to use for the run
**kwargs: Keyword arguments to pass to the tool
Returns:
The result of the tool execution
"""
if self.coroutine:
if run_manager and signature(self.coroutine).parameters.get("callbacks"):
kwargs["callbacks"] = run_manager.get_child()
if config_param := _get_runnable_config_param(self.coroutine):
kwargs[config_param] = config
return await self.coroutine(*args, **kwargs)
# If self.coroutine is None, then this will delegate to the default
# implementation which is expected to delegate to _run on a separate thread.
return await super()._arun(
*args, config=config, run_manager=run_manager, **kwargs
)
@classmethod
def from_function(
cls,
func: Callable | None = None,
coroutine: Callable[..., Awaitable[Any]] | None = None,
name: str | None = None,
description: str | None = None,
return_direct: bool = False, # noqa: FBT001,FBT002
args_schema: ArgsSchema | None = None,
infer_schema: bool = True, # noqa: FBT001,FBT002
*,
response_format: Literal["content", "content_and_artifact"] = "content",
parse_docstring: bool = False,
error_on_invalid_docstring: bool = False,
**kwargs: Any,
) -> StructuredTool:
"""Create tool from a given function.
A classmethod that helps to create a tool from a function.
Args:
func: The function from which to create a tool.
coroutine: The async function from which to create a tool.
name: The name of the tool. Defaults to the function name.
description: The description of the tool.
Defaults to the function docstring.
return_direct: Whether to return the result directly or as a callback.
args_schema: The schema of the tool's input arguments.
infer_schema: Whether to infer the schema from the function's signature.
response_format: The tool response format.
If `"content"` then the output of the tool is interpreted as the
contents of a `ToolMessage`. If `"content_and_artifact"` then the output
is expected to be a two-tuple corresponding to the `(content, artifact)`
of a `ToolMessage`.
parse_docstring: If `infer_schema` and `parse_docstring`, will attempt
to parse parameter descriptions from Google Style function docstrings.
error_on_invalid_docstring: if `parse_docstring` is provided, configure
whether to raise `ValueError` on invalid Google Style docstrings.
**kwargs: Additional arguments to pass to the tool
Returns:
The tool.
Raises:
ValueError: If the function is not provided.
ValueError: If the function does not have a docstring and description
is not provided.
TypeError: If the `args_schema` is not a `BaseModel` or dict.
Examples:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers\"\"\"
return a + b
tool = StructuredTool.from_function(add)
tool.run(1, 2) # 3
```
"""
if func is not None:
source_function = func
elif coroutine is not None:
source_function = coroutine
else:
msg = "Function and/or coroutine must be provided"
raise ValueError(msg)
name = name or source_function.__name__
if args_schema is None and infer_schema:
# schema name is appended within function
args_schema = create_schema_from_function(
name,
source_function,
parse_docstring=parse_docstring,
error_on_invalid_docstring=error_on_invalid_docstring,
filter_args=_filter_schema_args(source_function),
)
description_ = description
if description is None and not parse_docstring:
description_ = source_function.__doc__ or None
if description_ is None and args_schema:
if isinstance(args_schema, type) and is_basemodel_subclass(args_schema):
description_ = args_schema.__doc__
if (
description_
and "A base class for creating Pydantic models" in description_
):
description_ = ""
elif not description_:
description_ = None
elif isinstance(args_schema, dict):
description_ = args_schema.get("description")
else:
msg = (
"Invalid args_schema: expected BaseModel or dict, "
f"got {args_schema}"
)
raise TypeError(msg)
if description_ is None:
msg = "Function must have a docstring if description not provided."
raise ValueError(msg)
if description is None:
# Only apply if using the function's docstring
description_ = textwrap.dedent(description_).strip()
# Description example:
# search_api(query: str) - Searches the API for the query.
description_ = f"{description_.strip()}"
return cls(
name=name,
func=func,
coroutine=coroutine,
args_schema=args_schema,
description=description_,
return_direct=return_direct,
response_format=response_format,
**kwargs,
)
@functools.cached_property
def _injected_args_keys(self) -> frozenset[str]:
fn = self.func or self.coroutine
if fn is None:
return _EMPTY_SET
return frozenset(
k
for k, v in signature(fn).parameters.items()
if _is_injected_arg_type(v.annotation)
)
def _filter_schema_args(func: Callable) -> list[str]:
filter_args = list(FILTERED_ARGS)
if config_param := _get_runnable_config_param(func):
filter_args.append(config_param)
# filter_args.extend(_get_non_model_params(type_hints))
return filter_args
| StructuredTool |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/io_ops/decode_csv_op_test.py | {
"start": 992,
"end": 9476
} | class ____(test.TestCase):
def _test(self, args, expected_out=None, expected_err_re=None):
if expected_err_re is None:
decode = parsing_ops.decode_csv(**args)
out = self.evaluate(decode)
for i, field in enumerate(out):
if field.dtype == np.float32 or field.dtype == np.float64:
self.assertAllClose(field, expected_out[i])
else:
self.assertAllEqual(field, expected_out[i])
else:
with self.assertRaisesWithPredicateMatch(Exception, expected_err_re):
decode = parsing_ops.decode_csv(**args)
self.evaluate(decode)
def testSimple(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[1]],
}
expected_out = [[1, 2, 3]]
self._test(args, expected_out)
def testSimpleWithScalarDefaults(self):
args = {
"records": ["1,4", "2,5", "3,6"],
"record_defaults": [1, 2],
}
expected_out = [[1, 2, 3], [4, 5, 6]]
self._test(args, expected_out)
def testSimpleWith2DDefaults(self):
args = {
"records": ["1", "2", "3"],
"record_defaults": [[[0]]],
}
if context.executing_eagerly():
err_spec = errors.InvalidArgumentError, (
"Each record default should be at "
"most rank 1")
else:
err_spec = ValueError, "Shape must be at most rank 1 but is rank 2"
with self.assertRaisesWithPredicateMatch(*err_spec):
self._test(args)
def testSimpleNoQuoteDelimiter(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[""]],
"use_quote_delim": False,
}
expected_out = [[b"1", b"2", b'"3"']]
self._test(args, expected_out)
def testScalar(self):
args = {"records": '1,""', "record_defaults": [[3], [4]]}
expected_out = [1, 4]
self._test(args, expected_out)
def test2D(self):
args = {"records": [["1", "2"], ['""', "4"]], "record_defaults": [[5]]}
expected_out = [[[1, 2], [5, 4]]]
self._test(args, expected_out)
def test2DNoQuoteDelimiter(self):
args = {
"records": [["1", "2"], ['""', '"']],
"record_defaults": [[""]],
"use_quote_delim": False
}
expected_out = [[[b"1", b"2"], [b'""', b'"']]]
self._test(args, expected_out)
def testDouble(self):
args = {
"records": ["1.0", "-1.79e+308", '"1.79e+308"'],
"record_defaults": [np.array([], dtype=np.double)],
}
expected_out = [[1.0, -1.79e+308, 1.79e+308]]
self._test(args, expected_out)
def testInt64(self):
args = {
"records": ["1", "2", '"2147483648"'],
"record_defaults": [np.array([], dtype=np.int64)],
}
expected_out = [[1, 2, 2147483648]]
self._test(args, expected_out)
def testComplexString(self):
args = {
"records": ['"1.0"', '"ab , c"', '"a\nbc"', '"ab""c"', " abc "],
"record_defaults": [["1"]]
}
expected_out = [[b"1.0", b"ab , c", b"a\nbc", b'ab"c', b" abc "]]
self._test(args, expected_out)
def testMultiRecords(self):
args = {
"records": ["1.0,4,aa", "0.2,5,bb", "3,6,cc"],
"record_defaults": [[1.0], [1], ["aa"]]
}
expected_out = [[1.0, 0.2, 3], [4, 5, 6], [b"aa", b"bb", b"cc"]]
self._test(args, expected_out)
def testNA(self):
args = {
"records": ["2.0,NA,aa", "NA,5,bb", "3,6,NA"],
"record_defaults": [[0.0], [0], [""]],
"na_value": "NA"
}
expected_out = [[2.0, 0.0, 3], [0, 5, 6], [b"aa", b"bb", b""]]
self._test(args, expected_out)
def testWithDefaults(self):
args = {
"records": [",1,", "0.2,3,bcd", "3.0,,"],
"record_defaults": [[1.0], [0], ["a"]]
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"a"]]
self._test(args, expected_out)
def testWithDefaultsAndNoQuoteDelimiter(self):
args = {
"records": [",1,", "0.2,3,bcd", '3.0,,"'],
"record_defaults": [[1.0], [0], ["a"]],
"use_quote_delim": False,
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"\""]]
self._test(args, expected_out)
def testWithTabDelim(self):
args = {
"records": ["1\t1", "0.2\t3", "3.0\t"],
"record_defaults": [[1.0], [0]],
"field_delim": "\t"
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0]]
self._test(args, expected_out)
def testWithoutDefaultsError(self):
args = {
"records": [",1", "0.2,3", "3.0,"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 is required but missing in record 2!")
def testWrongFieldIntError(self):
args = {
"records": [",1", "0.2,234a", "3.0,2"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
def testOutOfRangeError(self):
args = {
"records": ["1", "9999999999999999999999999", "3"],
"record_defaults": [[1]]
}
self._test(
args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
def testWrongFieldFloatError(self):
args = {
"records": [",1", "0.2,2", "3.0adf,3"],
"record_defaults": [[1.0], np.array([], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 0 in record 2 is not a valid float: ")
def testWrongFieldStringError(self):
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
self._test(
args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
def testWrongDefaults(self):
args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
def testShortQuotedString(self):
args = {
"records": ["\""],
"record_defaults": [["default"]],
}
self._test(
args, expected_err_re="Quoted field has to end with quote followed.*")
def testSelectCols(self):
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[1], [2]],
"select_cols": [0, 1]
}
expected_out = [[1, 4], [2, 5]]
self._test(args, expected_out)
def testSelectColsInclLast(self):
# The last col is a edge-casey; add test for that
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[0], [1], [2]],
"select_cols": [0, 1, 2]
}
expected_out = [[0, 4], [1, 5], [2, 6]]
self._test(args, expected_out)
def testWrongSelectColsInclLast(self):
# The last col is a edge-casey; add test for that
args = {
"records": [",,", "4,5,6"],
"record_defaults": [[0], [1], [2]],
"select_cols": [0, 1, 3]
}
self._test(args, expected_err_re="Expect 3 fields but have 2 in record 0")
def testWrongSelectColsLen(self):
args = {
"records": ["1,2,3", "4,5,6"],
"record_defaults": [[0], [0], [0]],
"select_cols": [0]
}
with self.assertRaisesWithPredicateMatch(
ValueError, "Length of select_cols and record_defaults do not match."):
self._test(args)
def testWrongSelectColsSorting(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [1, 0]
}
with self.assertRaisesWithPredicateMatch(
ValueError, "select_cols is not strictly increasing."):
self._test(args)
def testWrongSelectColsIndicesNegative(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [-1, 0] # -1 is not a valid index
}
with self.assertRaisesWithPredicateMatch(
ValueError, "select_cols contains negative values."):
self._test(args)
def testWrongSelectColsIndicesTooHigh(self):
args = {
"records": ["1,2,3"],
"record_defaults": [[0], [1]],
"select_cols": [0, 3] # 3 is not a valid index
}
# Only successfully parses one of the columns
self._test(args, expected_err_re="Expect 2 fields but have 1 in record 0")
def testNumpyAttribute(self):
args = {
"record_defaults": np.zeros(5),
"records": constant_op.constant("1,2,3,4,5"),
}
if context.executing_eagerly():
self._test(args, expected_out=[1, 2, 3, 4, 5])
else:
self._test(args, expected_err_re="Expected list for 'record_defaults'")
if __name__ == "__main__":
test.main()
| DecodeCSVOpTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/all_static_fields.py | {
"start": 2111,
"end": 2208
} | class ____:
"""Test doc string"""
def __init__(self, a: int) -> None:
self.a = a
| A |
python | wandb__wandb | wandb/vendor/pygments/lexers/diff.py | {
"start": 453,
"end": 1264
} | class ____(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
| DiffLexer |
python | pytest-dev__pytest | testing/example_scripts/unittest/test_unittest_asyncio.py | {
"start": 139,
"end": 630
} | class ____(IsolatedAsyncioTestCase):
async def asyncTearDown(self):
teardowns.append(None)
async def test_something_async(self):
async def addition(x, y):
return x + y
self.assertEqual(await addition(2, 2), 4)
async def test_something_async_fails(self):
async def addition(x, y):
return x + y
self.assertEqual(await addition(2, 2), 3)
def test_teardowns(self):
assert len(teardowns) == 2
| AsyncArguments |
python | getsentry__sentry | src/sentry/apidocs/hooks.py | {
"start": 3613,
"end": 12732
} | class ____(SchemaGenerator):
endpoint_inspector_cls = CustomEndpointEnumerator
def custom_preprocessing_hook(endpoints: Any) -> Any: # TODO: organize method, rename
filtered = []
ownership_data: dict[ApiOwner, dict] = {}
for path, path_regex, method, callback in endpoints:
owner_team = callback.view_class.owner
if owner_team not in ownership_data:
ownership_data[owner_team] = {
ApiPublishStatus.UNKNOWN: set(),
ApiPublishStatus.PUBLIC: set(),
ApiPublishStatus.PRIVATE: set(),
ApiPublishStatus.EXPERIMENTAL: set(),
}
# Fail if endpoint is unowned
if owner_team == ApiOwner.UNOWNED:
if path not in API_OWNERSHIP_ALLOWLIST_DONT_MODIFY:
raise SentryApiBuildError(
f"Endpoint {callback.view_class} is missing the attribute owner: ApiOwner. \n"
+ "If you can't find your team in ApiOwners feel free to add the associated github group. ",
)
# Fail if method is not included in publish_status or has unknown status
if (
method not in callback.view_class.publish_status
or callback.view_class.publish_status[method] is ApiPublishStatus.UNKNOWN
):
if (
path not in API_PUBLISH_STATUS_ALLOWLIST_DONT_MODIFY
or method not in API_PUBLISH_STATUS_ALLOWLIST_DONT_MODIFY[path]
):
raise SentryApiBuildError(
f"All methods must have a known publish_status. Please add a valid publish status for Endpoint {callback.view_class} {method} method.",
)
if any(path.startswith(p) for p in EXCLUSION_PATH_PREFIXES):
pass
elif callback.view_class.publish_status:
# endpoints that are documented via tooling
if (
method in callback.view_class.publish_status
and callback.view_class.publish_status[method] is ApiPublishStatus.PUBLIC
):
# only pass declared public methods of the endpoint
# to the rest of the OpenAPI build pipeline
filtered.append((path, path_regex, method, callback))
else:
# if an endpoint doesn't have any registered public methods, don't check it.
pass
ownership_data[owner_team][callback.view_class.publish_status[method]].add(
f"{callback.view_class.__name__}::{method}"
)
__write_ownership_data(ownership_data)
return filtered
def dereference_schema(
schema: dict[str, Any],
schema_components: Mapping[str, Any],
) -> dict[str, Any]:
"""
Dereferences the schema reference if it exists. Otherwise, returns the schema as is.
"""
if len(schema) == 1 and "$ref" in schema:
# The reference always takes the form of #/components/schemas/{schema_name}
schema_name = schema["$ref"].split("/")[-1]
schema = schema_components[schema_name]
return schema
def _validate_request_body(
request_body: dict[str, Any], schema_components: Mapping[str, Any], endpoint_name: str
) -> None:
"""
1. Dereferences schema if needed.
2. Requires all body parameters to have a description.
3. Ensures body parameters are sorted by placing required parameters first.
"""
content = request_body["content"]
# media type can either "multipart/form-data" or "application/json"
if "multipart/form-data" in content:
schema = content["multipart/form-data"]["schema"]
else:
schema = content["application/json"]["schema"]
# Dereference schema if needed and raise error on schema component collisions
schema = dereference_schema(schema, schema_components)
for body_param, param_data in schema["properties"].items():
# Ensure body parameters have a description. Our API docs don't
# display body params without a description, so it's easy to miss them.
# We should be explicitly excluding them as better practice however.
# There is an edge case where a body param might be reference that we should ignore for now
if "description" not in param_data and "$ref" not in param_data:
raise SentryApiBuildError(
f"""Body parameter '{body_param}' is missing a description for endpoint {endpoint_name}. You can either:
1. Add a 'help_text' kwarg to the serializer field
2. Remove the field if you're using an inline_serializer
3. For a DRF serializer, you must explicitly exclude this field by decorating the request serializer with
@extend_schema_serializer(exclude_fields=[{body_param}])."""
)
# Required params are stored in a list and not in the param itself
required = set(schema.get("required", []))
if required:
# Explicitly sort body params by converting the dict to an ordered dict
schema["properties"] = OrderedDict(
sorted(
schema["properties"].items(),
key=lambda param: 0 if param[0] in required else 1,
)
)
def custom_postprocessing_hook(result: Any, generator: Any, **kwargs: Any) -> Any:
_fix_issue_paths(result)
# Fetch schema component references
schema_components = result["components"]["schemas"]
for path, endpoints in result["paths"].items():
for method_info in endpoints.values():
endpoint_name = f"'{method_info['operationId']}'"
_check_tag(method_info, endpoint_name)
_check_description(
method_info,
f"Please add a description to your endpoint {endpoint_name} via docstring",
)
# Ensure path parameters have a description
for param in method_info.get("parameters", []):
if param["in"] == "path":
_check_description(
param,
f"Please add a description to your path parameter '{param['name']}' for endpoint {endpoint_name}",
)
try:
requestBody = method_info.get("requestBody")
if requestBody is not None:
_validate_request_body(requestBody, schema_components, endpoint_name)
except KeyError as e:
raise SentryApiBuildError(
f"Unable to parse body parameters due to KeyError {e} for endpoint {endpoint_name}. Please post in #discuss-api to fix."
)
return result
def _check_tag(method_info: Mapping[str, Any], endpoint_name: str) -> None:
if method_info.get("tags") is None:
raise SentryApiBuildError(
f"Please add a single tag to {endpoint_name}. The list of tags is defined at OPENAPI_TAGS in src/sentry/apidocs/build.py "
)
num_of_tags = len(method_info["tags"])
if num_of_tags > 1:
raise SentryApiBuildError(
f"Please add only a single tag to {endpoint_name}. Right now there are {num_of_tags}."
)
tag = method_info["tags"][0]
if tag not in _DEFINED_TAG_SET:
raise SentryApiBuildError(
f"""{tag} is not defined by OPENAPI_TAGS in src/sentry/apidocs/build.py for {endpoint_name}.
Please use a suitable tag or add a new one to OPENAPI_TAGS"""
)
def _check_description(json_body: Mapping[str, Any], err_str: str) -> None:
if json_body.get("description") is None:
raise SentryApiBuildError(err_str)
def _fix_issue_paths(result: Any) -> Any:
"""
The way we define `/issues/` paths causes some problems with drf-spectacular:
- The path may be defined twice, with `/organizations/{organization_id_slug}` prefix and
without. We want to use the `/organizations` prefixed path as it works across regions.
- The `/issues/` part of the path is defined as `issues|groups` for compatibility reasons,
but we only want to use `issues` in the docs
This function removes duplicate paths, removes the `issues|groups` path parameter and
replaces it with `issues` in the path.
"""
items = list(result["paths"].items())
modified_paths = []
for path, endpoint in items:
if "{var}/{issue_id}" in path:
modified_paths.append(path)
for path in modified_paths:
updated_path = path.replace("{var}/{issue_id}", "issues/{issue_id}")
if updated_path.startswith("/api/0/issues/"):
updated_path = updated_path.replace(
"/api/0/issues/", "/api/0/organizations/{organization_id_or_slug}/issues/"
)
endpoint = result["paths"][path]
for method in endpoint.keys():
endpoint[method]["parameters"] = [
param
for param in endpoint[method]["parameters"]
if not (param["in"] == "path" and param["name"] == "var")
]
result["paths"][updated_path] = endpoint
del result["paths"][path]
| CustomGenerator |
python | pyinstaller__pyinstaller | bootloader/waflib/TaskGen.py | {
"start": 12755,
"end": 15767
} | class ____(Task.Task):
def force_permissions(self):
if getattr(self.generator, 'chmod', None):
for x in self.outputs:
os.chmod(x.abspath(), self.generator.chmod)
def run(self):
if getattr(self.generator, 'is_copy', None):
for i, x in enumerate(self.outputs):
x.write(self.inputs[i].read('rb'), 'wb')
stat = os.stat(self.inputs[i].abspath())
os.utime(self.outputs[i].abspath(), (stat.st_atime, stat.st_mtime))
self.force_permissions()
return None
if getattr(self.generator, 'fun', None):
ret = self.generator.fun(self)
if not ret:
self.force_permissions()
return ret
code = self.inputs[0].read(encoding=getattr(self.generator, 'encoding', 'latin-1'))
if getattr(self.generator, 'subst_fun', None):
code = self.generator.subst_fun(self, code)
if code is not None:
self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1'))
self.force_permissions()
return None
code = code.replace('%', '%%')
lst = []
def repl(match):
g = match.group
if g(1):
lst.append(g(1))
return "%%(%s)s" % g(1)
return ''
code = getattr(self.generator, 're_m4', re_m4).sub(repl, code)
try:
d = self.generator.dct
except AttributeError:
d = {}
for x in lst:
tmp = getattr(self.generator, x, '') or self.env[x] or self.env[x.upper()]
try:
tmp = ''.join(tmp)
except TypeError:
tmp = str(tmp)
d[x] = tmp
code = code % d
self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1'))
self.generator.bld.raw_deps[self.uid()] = lst
try:
delattr(self, 'cache_sig')
except AttributeError:
pass
self.force_permissions()
def sig_vars(self):
bld = self.generator.bld
env = self.env
upd = self.m.update
if getattr(self.generator, 'fun', None):
upd(Utils.h_fun(self.generator.fun).encode())
if getattr(self.generator, 'subst_fun', None):
upd(Utils.h_fun(self.generator.subst_fun).encode())
vars = self.generator.bld.raw_deps.get(self.uid(), [])
act_sig = bld.hash_env_vars(env, vars)
upd(act_sig)
lst = [getattr(self.generator, x, '') for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self, node):
tsk = self.create_task('subst_pc', node, node.change_ext('.pc', '.pc.in'))
self.install_task = self.add_install_files(
install_to=getattr(self, 'install_path', '${LIBDIR}/pkgconfig/'), install_from=tsk.outputs
)
| subst_pc |
python | spyder-ide__spyder | spyder/plugins/completion/providers/snippets/widgets/snippetsconfig.py | {
"start": 9054,
"end": 16804
} | class ____(QDialog, SpyderFontsMixin):
SNIPPET_VALID = _('Valid snippet')
SNIPPET_INVALID = _('Invalid snippet')
INVALID_CB_CSS = "QComboBox {border: 1px solid red;}"
VALID_CB_CSS = "QComboBox {border: 1px solid green;}"
INVALID_LINE_CSS = "QLineEdit {border: 1px solid red;}"
VALID_LINE_CSS = "QLineEdit {border: 1px solid green;}"
MIN_SIZE = QSize(850, 600)
def __init__(self, parent, language=None, trigger_text='', description='',
snippet_text='', remove_trigger=False, trigger_texts=[],
descriptions=[], get_option=None, set_option=None):
super().__init__(parent)
snippet_description = _(
"To add a new text snippet, you need to define the text "
"that triggers it, a short description (two words maximum) "
"of the snippet and if it should delete the trigger text when "
"inserted. Finally, you need to define the snippet body to insert."
)
self.parent = parent
self.trigger_text = trigger_text
self.description = description
self.remove_trigger = remove_trigger
self.snippet_text = snippet_text
self.descriptions = descriptions
self.base_snippet = Snippet(
language=language, trigger_text=trigger_text,
snippet_text=snippet_text, description=description,
remove_trigger=remove_trigger,
get_option=get_option, set_option=set_option)
# Widgets
self.snippet_settings_description = QLabel(snippet_description)
self.snippet_settings_description.setFixedWidth(450)
# Trigger text
self.trigger_text_label = QLabel(_('Trigger text:'))
self.trigger_text_cb = SpyderComboBox(self)
self.trigger_text_cb.setEditable(True)
# Description
self.description_label = QLabel(_('Description:'))
self.description_input = QLineEdit(self)
# Remove trigger
self.remove_trigger_cb = QCheckBox(
_('Remove trigger text on insertion'), self)
self.remove_trigger_cb.setToolTip(_('Check if the text that triggers '
'this snippet should be removed '
'when inserting it'))
self.remove_trigger_cb.setChecked(self.remove_trigger)
# Snippet body input
self.snippet_label = QLabel(_('<b>Snippet text:</b>'))
self.snippet_valid_label = QLabel(self.SNIPPET_INVALID, self)
self.snippet_input = SimpleCodeEditor(None)
# Dialog buttons
self.bbox = SpyderDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
self.button_ok = self.bbox.button(QDialogButtonBox.Ok)
self.button_cancel = self.bbox.button(QDialogButtonBox.Cancel)
# Widget setup
self.setWindowTitle(_('Snippet editor'))
self.snippet_settings_description.setWordWrap(True)
self.trigger_text_cb.setToolTip(
_('Trigger text for the current snippet'))
self.trigger_text_cb.addItems(trigger_texts)
if self.trigger_text != '':
idx = trigger_texts.index(self.trigger_text)
self.trigger_text_cb.setCurrentIndex(idx)
self.description_input.setText(self.description)
self.description_input.textChanged.connect(lambda _x: self.validate())
text_inputs = (self.trigger_text, self.description, self.snippet_text)
non_empty_text = all([x != '' for x in text_inputs])
if non_empty_text:
self.button_ok.setEnabled(True)
self.snippet_input.setup_editor(
language=language,
color_scheme=get_option('selected', section='appearance'),
wrap=False,
highlight_current_line=True,
font=self.get_font(SpyderFontType.MonospaceInterface)
)
self.snippet_input.set_language(language)
self.snippet_input.setToolTip(_('Snippet text completion to insert'))
self.snippet_input.set_text(snippet_text)
# Layout setup
general_layout = QVBoxLayout()
general_layout.addWidget(self.snippet_settings_description)
snippet_settings_group = QGroupBox(_('Trigger information'))
settings_layout = QGridLayout()
settings_layout.addWidget(self.trigger_text_label, 0, 0)
settings_layout.addWidget(self.trigger_text_cb, 0, 1)
settings_layout.addWidget(self.description_label, 1, 0)
settings_layout.addWidget(self.description_input, 1, 1)
all_settings_layout = QVBoxLayout()
all_settings_layout.addLayout(settings_layout)
all_settings_layout.addWidget(self.remove_trigger_cb)
snippet_settings_group.setLayout(all_settings_layout)
general_layout.addWidget(snippet_settings_group)
text_layout = QVBoxLayout()
text_layout.addWidget(self.snippet_label)
text_layout.addWidget(self.snippet_input)
text_layout.addWidget(self.snippet_valid_label)
general_layout.addLayout(text_layout)
general_layout.addWidget(self.bbox)
self.setLayout(general_layout)
# Signals
self.trigger_text_cb.editTextChanged.connect(self.validate)
self.description_input.textChanged.connect(self.validate)
self.snippet_input.textChanged.connect(self.validate)
self.bbox.accepted.connect(self.accept)
self.bbox.rejected.connect(self.reject)
# Final setup
if trigger_text != '' or snippet_text != '':
self.validate()
@Slot()
def validate(self):
trigger_text = self.trigger_text_cb.currentText()
description_text = self.description_input.text()
snippet_text = self.snippet_input.toPlainText()
invalid = False
try:
build_snippet_ast(snippet_text)
self.snippet_valid_label.setText(self.SNIPPET_VALID)
except SyntaxError:
invalid = True
self.snippet_valid_label.setText(self.SNIPPET_INVALID)
if trigger_text == '':
invalid = True
self.trigger_text_cb.setStyleSheet(self.INVALID_CB_CSS)
else:
self.trigger_text_cb.setStyleSheet(self.VALID_CB_CSS)
if trigger_text in self.descriptions:
if self.trigger_text != trigger_text:
if description_text in self.descriptions[trigger_text]:
invalid = True
self.description_input.setStyleSheet(
self.INVALID_LINE_CSS)
else:
self.description_input.setStyleSheet(
self.VALID_LINE_CSS)
else:
if description_text != self.description:
if description_text in self.descriptions[trigger_text]:
invalid = True
self.description_input.setStyleSheet(
self.INVALID_LINE_CSS)
else:
self.description_input.setStyleSheet(
self.VALID_LINE_CSS)
else:
self.description_input.setStyleSheet(
self.VALID_LINE_CSS)
self.button_ok.setEnabled(not invalid)
def get_options(self):
trigger_text = self.trigger_text_cb.currentText()
description_text = self.description_input.text()
snippet_text = self.snippet_input.toPlainText()
remove_trigger = self.remove_trigger_cb.isChecked()
self.base_snippet.update(
trigger_text, description_text, snippet_text, remove_trigger)
return self.base_snippet
| SnippetEditor |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/rule_based_profiler/data_assistant/growth_numeric_data_assistant.py | {
"start": 1767,
"end": 39200
} | class ____(DataAssistant):
"""
GrowthNumericDataAssistant provides dataset exploration and validation of growing amounts of numeric columns data.
Fundamentally, GrowthNumericDataAssistant is a "thematic blend of VolumeDataAssistant with several Rule definitions
from OnboardingDataAssistant concerned with measuring dataset growth and characterizing numeric-valued columns", the
goal being a demonstration of how to develop DataAssistant implementations for application-specific custom purposes
by combining core computational building blocks following a straightforward procedure in a cookie-cutter fashion.
"""
__alias__: str = "growth_numeric"
def __init__(
self,
name: str,
validator: Validator,
) -> None:
super().__init__(
name=name,
validator=validator,
)
def get_variables(self) -> Optional[Dict[str, Any]]:
"""
Returns:
Optional "variables" configuration attribute name/value pairs (overrides), commonly-used in Builder objects.
"""
return None
def get_rules(self) -> Optional[List[Rule]]:
"""
Returns:
Optional custom list of "Rule" objects implementing particular "DataAssistant" functionality.
"""
table_rule: Rule = self._build_table_rule()
total_count_metric_multi_batch_parameter_builder_for_evaluations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_row_count_metric_multi_batch_parameter_builder()
column_value_nonnullity_rule: Rule = build_map_metric_rule(
data_assistant_class_name=self.__class__.__name__,
rule_name="column_value_nonnullity_rule",
expectation_type="expect_column_values_to_not_be_null",
map_metric_name="column_values.nonnull",
total_count_metric_multi_batch_parameter_builder_for_evaluations=total_count_metric_multi_batch_parameter_builder_for_evaluations,
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=None,
exclude_semantic_types=None,
max_unexpected_values=0,
max_unexpected_ratio=None,
min_max_unexpected_values_proportion=9.75e-1,
)
numeric_columns_rule: Rule = self._build_numeric_columns_rule()
categorical_columns_rule: Rule = self._build_categorical_columns_rule()
return [
table_rule,
column_value_nonnullity_rule,
numeric_columns_rule,
categorical_columns_rule,
]
def _build_data_assistant_result(
self, data_assistant_result: DataAssistantResult
) -> DataAssistantResult:
return GrowthNumericDataAssistantResult(
_batch_id_to_batch_identifier_display_name_map=data_assistant_result._batch_id_to_batch_identifier_display_name_map,
profiler_config=data_assistant_result.profiler_config,
profiler_execution_time=data_assistant_result.profiler_execution_time,
rule_domain_builder_execution_time=data_assistant_result.rule_domain_builder_execution_time,
rule_execution_time=data_assistant_result.rule_execution_time,
rule_exception_tracebacks=data_assistant_result.rule_exception_tracebacks,
metrics_by_domain=data_assistant_result.metrics_by_domain,
expectation_configurations=data_assistant_result.expectation_configurations,
citation=data_assistant_result.citation,
)
@staticmethod
def _build_table_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ExpectationConfiguration" objects for table "Domain" type.
"""
# Step-1: Instantiate "TableDomainBuilder" object.
table_domain_builder = TableDomainBuilder(
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
table_row_count_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_row_count_metric_multi_batch_parameter_builder()
table_columns_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_columns_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" for every "validation" need in "ExpectationConfigurationBuilder" objects.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
ParameterBuilderConfig(
**table_row_count_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
table_row_count_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
mean_table_columns_set_match_multi_batch_parameter_builder_for_validations = (
MeanTableColumnsSetMatchMultiBatchParameterBuilder(
name="column_names_set_estimator",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
suite_parameter_builder_configs=None,
)
)
# Step-4: Pass "validation" "ParameterBuilderConfig" objects to every "DefaultExpectationConfigurationBuilder", responsible for emitting "ExpectationConfiguration" (with specified "expectation_type").
validation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**table_row_count_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_table_row_count_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_table_row_count_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
min_value=f"{table_row_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{table_row_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
meta={
"profiler_details": f"{table_row_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**mean_table_columns_set_match_multi_batch_parameter_builder_for_validations.to_json_dict(),
),
]
expect_table_columns_to_match_set_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_table_columns_to_match_set",
validation_parameter_builder_configs=validation_parameter_builder_configs,
condition=f"{mean_table_columns_set_match_multi_batch_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}success_ratio >= {VARIABLES_KEY}success_ratio",
column_set=f"{mean_table_columns_set_match_multi_batch_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}",
exact_match=f"{VARIABLES_KEY}exact_match",
meta={
"profiler_details": f"{mean_table_columns_set_match_multi_batch_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
# Step-5: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": 0,
"upper_bound": None,
},
"round_decimals": 0,
"exact_match": None,
"success_ratio": 1.0,
}
parameter_builders: List[ParameterBuilder] = [
table_row_count_metric_multi_batch_parameter_builder_for_metrics,
table_columns_metric_multi_batch_parameter_builder_for_metrics,
]
expectation_configuration_builders: List[ExpectationConfigurationBuilder] = [
expect_table_row_count_to_be_between_expectation_configuration_builder,
expect_table_columns_to_match_set_expectation_configuration_builder,
]
rule = Rule(
name="table_rule",
variables=variables,
domain_builder=table_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=expectation_configuration_builders,
)
return rule
@staticmethod
def _build_numeric_columns_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ExpectationConfiguration" objects for numeric columns.
"""
# Step-1: Instantiate "ColumnDomainBuilder" for selecting numeric columns (but not "ID-type" columns).
numeric_column_type_domain_builder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=[
"_id",
"_ID",
],
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=[
SemanticDomainTypes.NUMERIC,
],
exclude_semantic_types=[
SemanticDomainTypes.IDENTIFIER,
],
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_histogram_single_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_histogram_single_batch_parameter_builder(
name="column_values.partition",
)
column_min_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_min_metric_multi_batch_parameter_builder()
column_max_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_max_metric_multi_batch_parameter_builder()
column_quantile_values_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_quantile_values_metric_multi_batch_parameter_builder()
column_median_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_median_metric_multi_batch_parameter_builder()
column_mean_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_mean_metric_multi_batch_parameter_builder()
column_standard_deviation_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_standard_deviation_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" for every "validation" need in "ExpectationConfigurationBuilder" objects.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_min_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_max_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_max_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_quantile_values_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_quantile_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs={
"quantiles": f"{VARIABLES_KEY}quantiles",
"allow_relative_error": f"{VARIABLES_KEY}allow_relative_error",
},
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_median_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_median_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_mean_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_mean_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_standard_deviation_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_standard_deviation_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
# Step-4: Pass "validation" "ParameterBuilderConfig" objects to every "DefaultExpectationConfigurationBuilder", responsible for emitting "ExpectationConfiguration" (with specified "expectation_type").
validation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_min_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_min_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_min_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_min_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_min_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_max_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_max_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_max_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_max_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_max_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_max_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_values_range_parameter_builder_for_validations.to_json_dict(),
),
ParameterBuilderConfig(
**column_max_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_values_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_values_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_min_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_max_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
mostly=f"{VARIABLES_KEY}mostly",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": {
"column_min_values_range_estimator": f"{column_min_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
"column_max_values_range_estimator": f"{column_max_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_quantile_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_quantile_values_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_quantile_values_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
quantile_ranges={
"quantiles": f"{VARIABLES_KEY}quantiles",
"value_ranges": f"{column_quantile_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}",
},
allow_relative_error=f"{VARIABLES_KEY}allow_relative_error",
meta={
"profiler_details": f"{column_quantile_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_median_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_median_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_median_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_median_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_median_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_median_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_mean_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_mean_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_mean_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_mean_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_mean_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_mean_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_standard_deviation_values_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_stdev_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_stdev_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_standard_deviation_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_standard_deviation_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_standard_deviation_values_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
# Step-5: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"quantiles": [
0.25,
0.5,
0.75,
],
"allow_relative_error": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": None,
"upper_bound": None,
},
"round_decimals": None,
}
parameter_builders: List[ParameterBuilder] = [
column_histogram_single_batch_parameter_builder_for_metrics,
column_min_metric_multi_batch_parameter_builder_for_metrics,
column_max_metric_multi_batch_parameter_builder_for_metrics,
column_quantile_values_metric_multi_batch_parameter_builder_for_metrics,
column_median_metric_multi_batch_parameter_builder_for_metrics,
column_mean_metric_multi_batch_parameter_builder_for_metrics,
column_standard_deviation_metric_multi_batch_parameter_builder_for_metrics,
]
expectation_configuration_builders: List[ExpectationConfigurationBuilder] = [
expect_column_min_to_be_between_expectation_configuration_builder,
expect_column_max_to_be_between_expectation_configuration_builder,
expect_column_values_to_be_between_expectation_configuration_builder,
expect_column_quantile_values_to_be_between_expectation_configuration_builder,
expect_column_median_to_be_between_expectation_configuration_builder,
expect_column_mean_to_be_between_expectation_configuration_builder,
expect_column_stdev_to_be_between_expectation_configuration_builder,
]
rule = Rule(
name="numeric_columns_rule",
variables=variables,
domain_builder=numeric_column_type_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=expectation_configuration_builders,
)
return rule
@staticmethod
def _build_categorical_columns_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ExpectationConfiguration" objects for categorical columns.
"""
# Step-1: Instantiate "CategoricalColumnDomainBuilder" for selecting columns containing "FEW" discrete values.
categorical_column_type_domain_builder: CategoricalColumnDomainBuilder = (
CategoricalColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=None,
exclude_semantic_types=None,
allowed_semantic_types_passthrough=None,
cardinality_limit_mode=f"{VARIABLES_KEY}cardinality_limit_mode",
max_unique_values=None,
max_proportion_unique=None,
data_context=None,
)
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_distinct_values_count_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_distinct_values_count_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" for every "validation" need in "ExpectationConfigurationBuilder" objects.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_distinct_values_count_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_distinct_values_count_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
column_unique_proportion_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name="column.unique_proportion",
metric_value_kwargs=None,
)
# Step-4: Pass "validation" "ParameterBuilderConfig" objects to every "DefaultExpectationConfigurationBuilder", responsible for emitting "ExpectationConfiguration" (with specified "expectation_type").
validation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
validation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
ParameterBuilderConfig(
**column_distinct_values_count_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_unique_value_count_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_unique_value_count_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_distinct_values_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_distinct_values_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_distinct_values_count_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
validation_parameter_builder_configs = [
ParameterBuilderConfig(
**column_unique_proportion_range_parameter_builder_for_validations.to_json_dict(),
),
]
expect_column_proportion_of_unique_values_to_be_between_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_proportion_of_unique_values_to_be_between",
validation_parameter_builder_configs=validation_parameter_builder_configs,
column=f"{DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}column",
min_value=f"{column_unique_proportion_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[0]",
max_value=f"{column_unique_proportion_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY}[1]",
strict_min=f"{VARIABLES_KEY}strict_min",
strict_max=f"{VARIABLES_KEY}strict_max",
meta={
"profiler_details": f"{column_unique_proportion_range_parameter_builder_for_validations.json_serialized_fully_qualified_parameter_name}{FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER}{FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY}",
},
)
# Step-5: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"cardinality_limit_mode": CardinalityLimitMode.REL_100.name,
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": 0.0,
"upper_bound": None,
},
"round_decimals": None,
}
expectation_configuration_builders: List[ExpectationConfigurationBuilder] = [
expect_column_unique_value_count_to_be_between_expectation_configuration_builder,
expect_column_proportion_of_unique_values_to_be_between_expectation_configuration_builder,
]
rule = Rule(
name="categorical_columns_rule",
variables=variables,
domain_builder=categorical_column_type_domain_builder,
parameter_builders=None,
expectation_configuration_builders=expectation_configuration_builders,
)
return rule
| GrowthNumericDataAssistant |
python | cython__cython | tests/run/test_tstring.py | {
"start": 4037,
"end": 15411
} | class ____(TestCase, TStringBaseCase):
def test_string_representation(self):
# Test __repr__
t = t"Hello"
self.assertEqual(repr(t), "Template(strings=('Hello',), interpolations=())")
name = "Python"
t = t"Hello, {name}"
self.assertEqual(repr(t),
"Template(strings=('Hello, ', ''), "
"interpolations=(Interpolation('Python', 'name', None, ''),))"
)
def test_interpolation_basics(self):
# Test basic interpolation
name = "Python"
t = t"Hello, {name}"
self.assertTStringEqual(t, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(t), "Hello, Python")
# Multiple interpolations
first = "Python"
last = "Developer"
t = t"{first} {last}"
self.assertTStringEqual(
t, ("", " ", ""), [(first, 'first'), (last, 'last')]
)
self.assertEqual(fstring(t), "Python Developer")
# Interpolation with expressions
a = 10
b = 20
t = t"Sum: {a + b}"
self.assertTStringEqual(t, ("Sum: ", ""), [(a + b, "a + b")])
self.assertEqual(fstring(t), "Sum: 30")
# Interpolation with function
def square(x):
return x * x
t = t"Square: {square(5)}"
self.assertTStringEqual(
t, ("Square: ", ""), [(square(5), "square(5)")]
)
self.assertEqual(fstring(t), "Square: 25")
# Test attribute access in expressions
class Person:
def __init__(self, name):
self.name = name
def upper(self):
return self.name.upper()
person = Person("Alice")
t = t"Name: {person.name}"
self.assertTStringEqual(
t, ("Name: ", ""), [(person.name, "person.name")]
)
self.assertEqual(fstring(t), "Name: Alice")
# Test method calls
t = t"Name: {person.upper()}"
self.assertTStringEqual(
t, ("Name: ", ""), [(person.upper(), "person.upper()")]
)
self.assertEqual(fstring(t), "Name: ALICE")
# Test dictionary access
data = {"name": "Bob", "age": 30}
t = t"Name: {data['name']}, Age: {data['age']}"
self.assertTStringEqual(
t, ("Name: ", ", Age: ", ""),
[(data["name"], "data['name']"), (data["age"], "data['age']")],
)
self.assertEqual(fstring(t), "Name: Bob, Age: 30")
def test_format_specifiers(self):
# Test basic format specifiers
value = 3.14159
t = t"Pi: {value:.2f}"
self.assertTStringEqual(
t, ("Pi: ", ""), [(value, "value", None, ".2f")]
)
self.assertEqual(fstring(t), "Pi: 3.14")
def test_conversions(self):
# Test !s conversion (str)
obj = object()
t = t"Object: {obj!s}"
self.assertTStringEqual(t, ("Object: ", ""), [(obj, "obj", "s")])
self.assertEqual(fstring(t), f"Object: {str(obj)}")
# Test !r conversion (repr)
t = t"Data: {obj!r}"
self.assertTStringEqual(t, ("Data: ", ""), [(obj, "obj", "r")])
self.assertEqual(fstring(t), f"Data: {repr(obj)}")
# Test !a conversion (ascii)
text = "Café"
t = t"ASCII: {text!a}"
self.assertTStringEqual(t, ("ASCII: ", ""), [(text, "text", "a")])
self.assertEqual(fstring(t), f"ASCII: {ascii(text)}")
# Test !z conversion (error)
num = 1
with self.assertRaises((SyntaxError, CompileError)):
cy_eval("t'{num!z}'")
def test_debug_specifier(self):
# Test debug specifier
value = 42
t = t"Value: {value=}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", "r")]
)
self.assertEqual(fstring(t), "Value: value=42")
# Test debug specifier with format (conversion default to !r)
t = t"Value: {value=:.2f}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", None, ".2f")]
)
self.assertEqual(fstring(t), "Value: value=42.00")
# Test debug specifier with conversion
t = t"Value: {value=!s}"
self.assertTStringEqual(
t, ("Value: value=", ""), [(value, "value", "s")]
)
# Test white space in debug specifier
t = t"Value: {value = }"
self.assertTStringEqual(
t, ("Value: value = ", ""), [(value, "value", "r")]
)
self.assertEqual(fstring(t), "Value: value = 42")
def test_raw_tstrings(self):
path = r"C:\Users"
t = rt"{path}\Documents"
self.assertTStringEqual(t, ("", r"\Documents"), [(path, "path")])
self.assertEqual(fstring(t), r"C:\Users\Documents")
# Test alternative prefix
t = tr"{path}\Documents"
self.assertTStringEqual(t, ("", r"\Documents"), [(path, "path")])
def test_template_concatenation(self):
# Test template + template
t1 = t"Hello, "
t2 = t"world"
combined = t1 + t2
self.assertTStringEqual(combined, ("Hello, world",), ())
self.assertEqual(fstring(combined), "Hello, world")
# Test template + string
t1 = t"Hello"
# Note slight modification to message for Cython fallback class
expected_msg = 'can only concatenate [a-z.]*Template ' \
'.*to [a-z.]*Template'
with self.assertRaisesRegex(TypeError, expected_msg):
t1 + ", world"
# Test template + template with interpolation
name = "Python"
t1 = t"Hello, "
t2 = t"{name}"
combined = t1 + t2
self.assertTStringEqual(combined, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(combined), "Hello, Python")
# Test string + template
if hasattr(sys, "pypy_version_info"):
expected_msg = '.*' # The test is fine - the regex doesn't quite match
else:
expected_msg = 'can only concatenate str ' \
'\\(not "[a-z.]*Template"\\) to str'
with self.assertRaisesRegex(TypeError, expected_msg):
"Hello, " + t"{name}"
def test_nested_templates(self):
# Test a template inside another template expression
name = "Python"
inner = t"{name}"
t = t"Language: {inner}"
t_interp = t.interpolations[0]
self.assertEqual(t.strings, ("Language: ", ""))
self.assertEqual(t_interp.value.strings, ("", ""))
self.assertEqual(t_interp.value.interpolations[0].value, name)
self.assertEqual(t_interp.value.interpolations[0].expression, "name")
self.assertEqual(t_interp.value.interpolations[0].conversion, None)
self.assertEqual(t_interp.value.interpolations[0].format_spec, "")
self.assertEqual(t_interp.expression, "inner")
self.assertEqual(t_interp.conversion, None)
self.assertEqual(t_interp.format_spec, "")
def test_syntax_errors(self):
# See also "tests/error/e_tstring*" which actually checks the
# syntax of these.
for case, err in (
("t'", "unterminated t-string literal"),
("t'''", "unterminated triple-quoted t-string literal"),
("t''''", "unterminated triple-quoted t-string literal"),
("t'{", "'{' was never closed"),
("t'{'", "t-string: expecting '}'"),
("t'{a'", "t-string: expecting '}'"),
("t'}'", "t-string: single '}' is not allowed"),
("t'{}'", "t-string: valid expression required before '}'"),
("t'{=x}'", "t-string: valid expression required before '='"),
("t'{!x}'", "t-string: valid expression required before '!'"),
("t'{:x}'", "t-string: valid expression required before ':'"),
("t'{x;y}'", "t-string: expecting '=', or '!', or ':', or '}'"),
("t'{x=y}'", "t-string: expecting '!', or ':', or '}'"),
("t'{x!s!}'", "t-string: expecting ':' or '}'"),
("t'{x!s:'", "t-string: expecting '}', or format specs"),
("t'{x!}'", "t-string: missing conversion character"),
("t'{x=!}'", "t-string: missing conversion character"),
("t'{x!z}'", "t-string: invalid conversion character 'z': "
"expected 's', 'r', or 'a'"),
("t'{lambda:1}'", "t-string: lambda expressions are not allowed "
"without parentheses"),
("t'{x:{;}}'", "t-string: expecting a valid expression after '{'"),
("t'{1:d\n}'", "t-string: newlines are not allowed in format specifiers")
):
with self.subTest(case), self.assertRaisesRegex(SyntaxError, err):
cy_eval(case)
def test_runtime_errors(self):
# Test missing variables
with self.assertRaises((NameError, CompileError)):
cy_eval("t'Hello, {name}'")
def test_literal_concatenation(self):
# Test concatenation of t-string literals
t = t"Hello, " t"world"
self.assertTStringEqual(t, ("Hello, world",), ())
self.assertEqual(fstring(t), "Hello, world")
# Test concatenation with interpolation
name = "Python"
t = t"Hello, " t"{name}"
self.assertTStringEqual(t, ("Hello, ", ""), [(name, "name")])
self.assertEqual(fstring(t), "Hello, Python")
# Test disallowed mix of t-string and string/f-string (incl. bytes)
what = 't'
expected_msg = 'cannot mix t-string literals with string or bytes literals'
for case in (
"t'{what}-string literal' 'str literal'",
"t'{what}-string literal' u'unicode literal'",
"t'{what}-string literal' f'f-string literal'",
"t'{what}-string literal' r'raw string literal'",
"t'{what}-string literal' rf'raw f-string literal'",
"t'{what}-string literal' b'bytes literal'",
"t'{what}-string literal' br'raw bytes literal'",
"'str literal' t'{what}-string literal'",
"u'unicode literal' t'{what}-string literal'",
"f'f-string literal' t'{what}-string literal'",
"r'raw string literal' t'{what}-string literal'",
"rf'raw f-string literal' t'{what}-string literal'",
"b'bytes literal' t'{what}-string literal'",
"br'raw bytes literal' t'{what}-string literal'",
):
with self.subTest(case):
with self.assertRaisesRegex(SyntaxError, expected_msg):
cy_eval(case)
def test_triple_quoted(self):
# Test triple-quoted t-strings
t = t"""
Hello,
world
"""
self.assertTStringEqual(
t, ("\n Hello,\n world\n ",), ()
)
self.assertEqual(fstring(t), "\n Hello,\n world\n ")
# Test triple-quoted with interpolation
name = "Python"
t = t"""
Hello,
{name}
"""
self.assertTStringEqual(
t, ("\n Hello,\n ", "\n "), [(name, "name")]
)
self.assertEqual(fstring(t), "\n Hello,\n Python\n ")
if __name__ == '__main__':
unittest.main()
| TestTString |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/merge_K_sorted_Lists.py | {
"start": 851,
"end": 1369
} | class ____:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
res = []
for i in lists:
while i != None:
res.append(i.val)
i = i.next
# print(res)
if res == []:
return None
res.sort()
ans = ListNode(res[0])
result = ans
for i in range(1,len(res)):
k = ListNode(res[i])
ans.next = k
ans = ans.next
return result | Solution |
python | networkx__networkx | examples/subclass/plot_printgraph.py | {
"start": 166,
"end": 2286
} | class ____(Graph):
"""
Example subclass of the Graph class.
Prints activity log to file or standard output.
"""
def __init__(self, data=None, name="", file=None, **attr):
super().__init__(data=data, name=name, **attr)
if file is None:
import sys
self.fh = sys.stdout
else:
self.fh = open(file, "w")
def add_node(self, n, attr_dict=None, **attr):
super().add_node(n, attr_dict=attr_dict, **attr)
self.fh.write(f"Add node: {n}\n")
def add_nodes_from(self, nodes, **attr):
for n in nodes:
self.add_node(n, **attr)
def remove_node(self, n):
super().remove_node(n)
self.fh.write(f"Remove node: {n}\n")
def remove_nodes_from(self, nodes):
for n in nodes:
self.remove_node(n)
def add_edge(self, u, v, attr_dict=None, **attr):
super().add_edge(u, v, attr_dict=attr_dict, **attr)
self.fh.write(f"Add edge: {u}-{v}\n")
def add_edges_from(self, ebunch, attr_dict=None, **attr):
for e in ebunch:
u, v = e[0:2]
self.add_edge(u, v, attr_dict=attr_dict, **attr)
def remove_edge(self, u, v):
super().remove_edge(u, v)
self.fh.write(f"Remove edge: {u}-{v}\n")
def remove_edges_from(self, ebunch):
for e in ebunch:
u, v = e[0:2]
self.remove_edge(u, v)
def clear(self):
super().clear()
self.fh.write("Clear graph\n")
G = PrintGraph()
G.add_node("foo")
G.add_nodes_from("bar", weight=8)
G.remove_node("b")
G.remove_nodes_from("ar")
print("Nodes in G: ", G.nodes(data=True))
G.add_edge(0, 1, weight=10)
print("Edges in G: ", G.edges(data=True))
G.remove_edge(0, 1)
G.add_edges_from(zip(range(3), range(1, 4)), weight=10)
print("Edges in G: ", G.edges(data=True))
G.remove_edges_from(zip(range(3), range(1, 4)))
print("Edges in G: ", G.edges(data=True))
G = PrintGraph()
nx.add_path(G, range(10))
nx.add_star(G, range(9, 13))
pos = nx.spring_layout(G, seed=225) # Seed for reproducible layout
nx.draw(G, pos)
plt.show()
| PrintGraph |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/types.py | {
"start": 26507,
"end": 26639
} | class ____(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = "LONGBLOB"
| LONGBLOB |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 105226,
"end": 106364
} | class ____(Request):
"""
Convert company projects to public
:param ids: Ids of the projects to convert
:type ids: Sequence[str]
"""
_service = "projects"
_action = "make_public"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the projects to convert",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePublicRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| MakePublicRequest |
python | kubernetes-client__python | kubernetes/client/models/v1alpha3_cel_device_selector.py | {
"start": 383,
"end": 8342
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str'
}
attribute_map = {
'expression': 'expression'
}
def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501
"""V1alpha3CELDeviceSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self.discriminator = None
self.expression = expression
@property
def expression(self):
"""Gets the expression of this V1alpha3CELDeviceSelector. # noqa: E501
Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501
:return: The expression of this V1alpha3CELDeviceSelector. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1alpha3CELDeviceSelector.
Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501
:param expression: The expression of this V1alpha3CELDeviceSelector. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
self._expression = expression
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha3CELDeviceSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha3CELDeviceSelector):
return True
return self.to_dict() != other.to_dict()
| V1alpha3CELDeviceSelector |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensors.py | {
"start": 7417,
"end": 7680
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneSensor,
GrapheneSensorNotFoundError,
GrapheneUnauthorizedError,
GraphenePythonError,
)
name = "SensorOrError"
| GrapheneSensorOrError |
python | walkccc__LeetCode | solutions/3315. Construct the Minimum Bitwise Array II/3315.py | {
"start": 0,
"end": 600
} | class ____:
# Same as 3314. Construct the Minimum Bitwise Array I
def minBitwiseArray(self, nums: list[int]) -> list[int]:
return [-1 if num == 2 else num - self._getLeadingOneOfLastGroupOfOnes(num)
for num in nums]
def _getLeadingOneOfLastGroupOfOnes(self, num: int) -> int:
"""
Returns the leading one of the last group of 1s in the binary
representation of num. For example, if num = 0b10111, the leading one of
the last group of 1s is 0b100.
"""
leadingOne = 1
while (num & leadingOne) > 0:
leadingOne <<= 1
return leadingOne >> 1
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/sensor/sensor_builder.py | {
"start": 2442,
"end": 10566
} | class ____(DagsterUserCodeExecutionError):
"""Error raised when an error occurs in the event transformer function."""
def check_keys_for_asset_keys(
repository_def: RepositoryDefinition, asset_keys: set[AssetKey]
) -> Iterable[AssetCheckKey]:
for assets_def in repository_def.asset_graph.assets_defs:
for check_spec in assets_def.check_specs:
if check_spec.asset_key in asset_keys:
yield check_spec.key
@beta
def build_airflow_polling_sensor(
*,
mapped_assets: Sequence[MappedAsset],
airflow_instance: AirflowInstance,
event_transformer_fn: DagsterEventTransformerFn = default_event_transformer,
minimum_interval_seconds: int = DEFAULT_AIRFLOW_SENSOR_INTERVAL_SECONDS,
default_sensor_status: Optional[DefaultSensorStatus] = None,
) -> SensorDefinition:
"""The constructed sensor polls the Airflow instance for activity, and inserts asset events into Dagster's event log.
The sensor decides which Airflow dags and tasks to monitor by inspecting the metadata of the passed-in Definitions object `mapped_defs`.
The metadata performing this mapping is typically set by calls to `assets_with_dag_mappings` and `assets_with_task_mappings`.
Using the `event_transformer_fn` argument, users can provide a function that transforms the materializations emitted by the sensor.
The expected return type of this function is an iterable of `AssetMaterialization`, `AssetObservation`, or `AssetCheckEvaluation` objects.
Each object is expected to have a metadata key `dagster_airlift.constants.EFFECTIVE_TIMESTAMP_METADATA_KEY` which is a `dagster.TimestampMetadataValue` set.
This allows Dagster to correctly order the materializations in the event stream.
Args:
mapped_defs (Definitions): The `Definitions` object containing assets with metadata mapping them to Airflow dags and tasks.
airflow_instance (AirflowInstance): The Airflow instance to poll for dag runs.
event_transformer_fn (Optional[DagsterEventTransformerFn]): A function that transforms the materializations emitted by the sensor.
minimum_interval_seconds (int): The minimum interval in seconds between sensor runs. Defaults to 1.
Returns:
Definitions: A `Definitions` object containing the constructed sensor.
"""
@sensor(
name=f"{airflow_instance.name}__airflow_dag_status_sensor",
minimum_interval_seconds=minimum_interval_seconds,
default_status=default_sensor_status or DefaultSensorStatus.RUNNING,
# This sensor will only ever execute asset checks and not asset materializations.
asset_selection=AssetSelection.all_asset_checks(),
)
def airflow_dag_sensor(context: SensorEvaluationContext) -> SensorResult:
"""Sensor to report materialization events for each asset as new runs come in."""
context.log.info(f"************Running sensor for {airflow_instance.name}***********")
airflow_data = AirflowDefinitionsData(
airflow_instance=airflow_instance,
resolved_repository=check.not_none(context.repository_def),
)
try:
cursor = (
deserialize_value(context.cursor, AirflowPollingSensorCursor)
if context.cursor
else AirflowPollingSensorCursor()
)
except Exception as e:
context.log.info(f"Failed to interpret cursor. Starting from scratch. Error: {e}")
cursor = AirflowPollingSensorCursor()
current_date = get_current_datetime()
current_dag_offset = cursor.dag_query_offset or 0
end_date_gte = (
cursor.end_date_gte
or (current_date - timedelta(seconds=START_LOOKBACK_SECONDS)).timestamp()
)
end_date_lte = cursor.end_date_lte or current_date.timestamp()
sensor_iter = batch_iter(
context=context,
end_date_gte=end_date_gte,
end_date_lte=end_date_lte,
offset=current_dag_offset,
airflow_data=airflow_data,
)
all_asset_events: list[AssetMaterialization] = []
all_check_keys: set[AssetCheckKey] = set()
latest_offset = current_dag_offset
repository_def = check.not_none(context.repository_def)
while get_current_datetime() - current_date < timedelta(seconds=MAIN_LOOP_TIMEOUT_SECONDS):
batch_result = next(sensor_iter, None)
if batch_result is None:
context.log.info("Received no batch result. Breaking.")
break
all_asset_events.extend(batch_result.asset_events)
all_check_keys.update(
check_keys_for_asset_keys(repository_def, batch_result.all_asset_keys_materialized)
)
latest_offset = batch_result.idx
if batch_result is not None: # pyright: ignore[reportPossiblyUnboundVariable]
new_cursor = AirflowPollingSensorCursor(
end_date_gte=end_date_gte,
end_date_lte=end_date_lte,
dag_query_offset=latest_offset + 1,
)
else:
# We have completed iteration for this range
new_cursor = AirflowPollingSensorCursor(
end_date_gte=end_date_lte,
end_date_lte=None,
dag_query_offset=0,
)
updated_asset_events = _get_transformer_result(
event_transformer_fn=event_transformer_fn,
context=context,
airflow_data=airflow_data,
all_asset_events=all_asset_events,
)
context.update_cursor(serialize_value(new_cursor))
context.log.info(
f"************Exiting sensor for {airflow_data.airflow_instance.name}***********"
)
return SensorResult(
asset_events=sorted_asset_events(updated_asset_events, repository_def),
run_requests=[RunRequest(asset_check_keys=list(all_check_keys))]
if all_check_keys
else None,
)
return airflow_dag_sensor
def sorted_asset_events(
asset_events: Sequence[AssetEvent],
repository_def: RepositoryDefinition,
) -> list[AssetEvent]:
"""Sort materializations by end date and toposort order."""
topo_aks = repository_def.asset_graph.toposorted_asset_keys
materializations_and_timestamps = [
(get_timestamp_from_materialization(mat), mat) for mat in asset_events
]
return [
sorted_event[1]
for sorted_event in sorted(
materializations_and_timestamps, key=lambda x: (x[0], topo_aks.index(x[1].asset_key))
)
]
def _get_transformer_result(
event_transformer_fn: Optional[DagsterEventTransformerFn],
context: SensorEvaluationContext,
airflow_data: AirflowDefinitionsData,
all_asset_events: Sequence[AssetMaterialization],
) -> Sequence[AssetEvent]:
if not event_transformer_fn:
return all_asset_events
with user_code_error_boundary(
AirliftSensorEventTransformerError,
lambda: f"Error occurred during event transformation for {airflow_data.airflow_instance.name}",
):
updated_asset_events = list(event_transformer_fn(context, airflow_data, all_asset_events))
for asset_event in updated_asset_events:
if not isinstance(
asset_event, (AssetMaterialization, AssetObservation, AssetCheckEvaluation)
):
raise DagsterInvariantViolationError(
f"Event transformer function must return AssetMaterialization, AssetObservation, or AssetCheckEvaluation objects. Got {type(asset_event)}."
)
if EFFECTIVE_TIMESTAMP_METADATA_KEY not in asset_event.metadata:
raise DagsterInvariantViolationError(
f"All returned events must have an effective timestamp, but {asset_event} does not. An effective timestamp can be used by setting dagster_airlift.constants.EFFECTIVE_TIMESTAMP_METADATA_KEY with a dagster.TimestampMetadataValue."
)
return updated_asset_events
@record
| AirliftSensorEventTransformerError |
python | django__django | django/core/exceptions.py | {
"start": 647,
"end": 731
} | class ____(Exception):
"""The user did something suspicious"""
| SuspiciousOperation |
python | Textualize__textual | tests/test_freeze.py | {
"start": 246,
"end": 616
} | class ____(App):
def on_mount(self):
self.install_screen(MyScreen(), "myscreen")
self.push_screen("myscreen")
async def test_freeze():
"""Regression test for https://github.com/Textualize/textual/issues/1608"""
app = MyApp()
with pytest.raises(Exception):
async with app.run_test():
raise Exception("never raised")
| MyApp |
python | graphql-python__graphene | graphene/types/tests/test_scalar.py | {
"start": 1733,
"end": 3136
} | class ____:
def test_query(self):
"""
Test that a normal query works.
"""
result = schema.execute("{ optional { int(input: 20) } }")
assert not result.errors
assert result.data == {"optional": {"int": 20}}
def test_optional_input(self):
"""
Test that we can provide a null value to an optional input
"""
result = schema.execute("{ optional { int(input: null) } }")
assert not result.errors
assert result.data == {"optional": {"int": None}}
def test_invalid_input(self):
"""
Test that if an invalid type is provided we get an error
"""
result = schema.execute('{ optional { int(input: "20") } }')
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == 'Int cannot represent non-integer value: "20"'
)
result = schema.execute('{ optional { int(input: "a") } }')
assert result.errors
assert len(result.errors) == 1
assert result.errors[0].message == 'Int cannot represent non-integer value: "a"'
result = schema.execute("{ optional { int(input: true) } }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == "Int cannot represent non-integer value: true"
)
| TestInt |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 71817,
"end": 72558
} | class ____(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
| TemplatePlugin |
python | crytic__slither | slither/core/declarations/event_contract.py | {
"start": 217,
"end": 721
} | class ____(Event, ContractLevel):
def is_declared_by(self, contract: "Contract") -> bool:
"""
Check if the element is declared by the contract
:param contract:
:return:
"""
return self.contract == contract
@property
def canonical_name(self) -> str:
"""Return the function signature as a str
Returns:
str: contract.func_name(type1,type2)
"""
return self.contract.name + "." + self.full_name
| EventContract |
python | django__django | tests/aggregation_regress/models.py | {
"start": 2796,
"end": 2862
} | class ____(Author):
class Meta:
proxy = True
| AuthorProxy |
python | getsentry__sentry | tests/sentry/incidents/test_charts.py | {
"start": 3482,
"end": 5492
} | class ____(TestCase):
@patch("sentry.charts.backend.generate_chart", return_value="chart-url")
@patch("sentry.incidents.charts.client.get")
def test_eap_alert(self, mock_client_get: MagicMock, mock_generate_chart: MagicMock) -> None:
mock_client_get.return_value.data = {"data": []}
alert_rule = self.create_alert_rule(
query="span.op:pageload",
dataset=Dataset.EventsAnalyticsPlatform,
aggregate="apdex(span.duration, 8000)",
)
incident = self.create_incident(
status=2,
organization=self.organization,
projects=[self.project],
alert_rule=alert_rule,
date_started=timezone.now() - datetime.timedelta(minutes=2),
)
trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100)
self.create_alert_rule_trigger_action(
alert_rule_trigger=trigger, triggered_for_incident=incident
)
alert_rule_serialized_response: AlertRuleSerializerResponse = serialize(
alert_rule, None, AlertRuleSerializer()
)
incident_serialized_response: DetailedIncidentSerializerResponse = serialize(
incident, None, DetailedIncidentSerializer()
)
url = build_metric_alert_chart(
self.organization,
alert_rule_serialized_response=alert_rule_serialized_response,
alert_context=AlertContext.from_alert_rule_incident(alert_rule),
snuba_query=alert_rule.snuba_query,
open_period_context=OpenPeriodContext.from_incident(incident),
selected_incident_serialized=incident_serialized_response,
)
assert url == "chart-url"
mock_client_get.assert_called()
mock_generate_chart.assert_called()
assert mock_client_get.call_args[1]["params"]["dataset"] == "spans"
assert mock_client_get.call_args[1]["params"]["query"] == "span.op:pageload"
| BuildMetricAlertChartTest |
python | doocs__leetcode | solution/2300-2399/2326.Spiral Matrix IV/Solution.py | {
"start": 151,
"end": 731
} | class ____:
def spiralMatrix(self, m: int, n: int, head: Optional[ListNode]) -> List[List[int]]:
ans = [[-1] * n for _ in range(m)]
i = j = k = 0
dirs = (0, 1, 0, -1, 0)
while 1:
ans[i][j] = head.val
head = head.next
if head is None:
break
while 1:
x, y = i + dirs[k], j + dirs[k + 1]
if 0 <= x < m and 0 <= y < n and ans[x][y] == -1:
i, j = x, y
break
k = (k + 1) % 4
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.