language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | celery__celery | t/unit/app/test_log.py | {
"start": 10490,
"end": 11531
} | class ____(test_default_logger):
def setup_method(self):
logger = self.logger = get_logger('celery.task')
logger.handlers = []
logging.root.manager.loggerDict.pop(logger.name, None)
self.uid = uuid()
@self.app.task(shared=False)
def test_task():
pass
self.get_logger().handlers = []
self.task = test_task
from celery._state import _task_stack
_task_stack.push(test_task)
def teardown_method(self):
from celery._state import _task_stack
_task_stack.pop()
def setup_logger(self, *args, **kwargs):
return self.app.log.setup_task_loggers(*args, **kwargs)
def get_logger(self, *args, **kwargs):
return get_task_logger('test_task_logger')
def test_renaming_base_logger(self):
with pytest.raises(RuntimeError):
get_task_logger('celery')
def test_renaming_task_logger(self):
with pytest.raises(RuntimeError):
get_task_logger('celery.task')
| test_task_logger |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_Numpy.py | {
"start": 1779,
"end": 2111
} | class ____:
"""test_ignores_return_in_abstract_method_numpy
Example of an abstract method documenting the return type that an
implementation should return."""
@abc.abstractmethod
def foo(self):
"""docstring ...
Returns
-------
int
Ten
"""
return 10
| Foo |
python | kamyu104__LeetCode-Solutions | Python/bitwise-ors-of-subarrays.py | {
"start": 34,
"end": 322
} | class ____(object):
def subarrayBitwiseORs(self, A):
"""
:type A: List[int]
:rtype: int
"""
result, curr = set(), {0}
for i in A:
curr = {i} | {i | j for j in curr}
result |= curr
return len(result)
| Solution |
python | spack__spack | lib/spack/spack/vendor/jinja2/ext.py | {
"start": 22235,
"end": 26721
} | class ____(Extension):
"""A ``{% debug %}`` tag that dumps the available variables,
filters, and tests.
.. code-block:: html+jinja
<pre>{% debug %}</pre>
.. code-block:: text
{'context': {'cycler': <class 'spack.vendor.jinja2.utils.Cycler'>,
...,
'namespace': <class 'spack.vendor.jinja2.utils.Namespace'>},
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
.. versionadded:: 2.11.0
"""
tags = {"debug"}
def parse(self, parser: "Parser") -> nodes.Output:
lineno = parser.stream.expect("name:debug").lineno
context = nodes.ContextReference()
result = self.call_method("_render", [context], lineno=lineno)
return nodes.Output([result], lineno=lineno)
def _render(self, context: Context) -> str:
result = {
"context": context.get_all(),
"filters": sorted(self.environment.filters.keys()),
"tests": sorted(self.environment.tests.keys()),
}
# Set the depth since the intent is to show the top few names.
return pprint.pformat(result, depth=3, compact=True)
def extract_from_ast(
ast: nodes.Template,
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
babel_style: bool = True,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from spack.vendor.jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string, or a tuple of strings for functions
with multiple string arguments.
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
for node in ast.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
strings: t.List[t.Optional[str]] = []
for arg in node.args:
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
strings.append(arg.value)
else:
strings.append(None)
for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
out = tuple(x for x in strings if x is not None)
if not out:
continue
else:
if len(strings) == 1:
out = strings[0]
else:
out = tuple(strings)
yield node.lineno, node.node.name, out
| DebugExtension |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/base.py | {
"start": 67371,
"end": 85597
} | class ____(compiler.SQLCompiler):
def visit_to_tsvector_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def visit_to_tsquery_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def visit_plainto_tsquery_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def visit_phraseto_tsquery_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def visit_websearch_to_tsquery_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def visit_ts_headline_func(self, element, **kw):
return self._assert_pg_ts_ext(element, **kw)
def _assert_pg_ts_ext(self, element, **kw):
if not isinstance(element, _regconfig_fn):
# other options here include trying to rewrite the function
# with the correct types. however, that means we have to
# "un-SQL-ize" the first argument, which can't work in a
# generalized way. Also, parent compiler class has already added
# the incorrect return type to the result map. So let's just
# make sure the function we want is used up front.
raise exc.CompileError(
f'Can\'t compile "{element.name}()" full text search '
f"function construct that does not originate from the "
f'"sqlalchemy.dialects.postgresql" package. '
f'Please ensure "import sqlalchemy.dialects.postgresql" is '
f"called before constructing "
f'"sqlalchemy.func.{element.name}()" to ensure registration '
f"of the correct argument and return types."
)
return f"{element.name}{self.function_argspec(element, **kw)}"
def render_bind_cast(self, type_, dbapi_type, sqltext):
if dbapi_type._type_affinity is sqltypes.String and dbapi_type.length:
# use VARCHAR with no length for VARCHAR cast.
# see #9511
dbapi_type = sqltypes.STRINGTYPE
return f"""{sqltext}::{
self.dialect.type_compiler_instance.process(
dbapi_type, identifier_preparer=self.preparer
)
}"""
def visit_array(self, element, **kw):
if not element.clauses and not element.type.item_type._isnull:
return "ARRAY[]::%s" % element.type.compile(self.dialect)
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_bitwise_xor_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " # ", **kw)
def visit_json_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
if (
not _cast_applied
and isinstance(binary.left.type, _json.JSONB)
and self.dialect._supports_jsonb_subscripting
):
# for pg14+JSONB use subscript notation: col['key'] instead
# of col -> 'key'
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# Fall back to arrow notation for older versions or when cast
# is applied
return self._generate_generic_binary(
binary, " -> " if not _cast_applied else " ->> ", **kw
)
def visit_json_path_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
return self._generate_generic_binary(
binary, " #> " if not _cast_applied else " #>> ", **kw
)
def visit_hstore_getitem_op_binary(self, binary, operator, **kw):
kw["eager_grouping"] = True
if self.dialect._supports_jsonb_subscripting:
# use subscript notation: col['key'] instead of col -> 'key'
# For function calls, wrap in parentheses: (func())[key]
left_str = self.process(binary.left, **kw)
if isinstance(binary.left, sql.functions.FunctionElement):
left_str = f"({left_str})"
return "%s[%s]" % (
left_str,
self.process(binary.right, **kw),
)
else:
# Fall back to arrow notation for older versions
return self._generate_generic_binary(binary, " -> ", **kw)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_aggregate_order_by(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.target, **kw),
self.process(element.order_by, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE
)
if regconfig:
return "%s @@ plainto_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
self.process(binary.right, **kw),
)
return "%s @@ plainto_tsquery(%s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_ilike_case_insensitive_operand(self, element, **kw):
return element.element._compiler_dispatch(self, **kw)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape is not None
else ""
)
def visit_not_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape is not None
else ""
)
def _regexp_match(self, base_op, binary, operator, kw):
flags = binary.modifiers["flags"]
if flags is None:
return self._generate_generic_binary(
binary, " %s " % base_op, **kw
)
if flags == "i":
return self._generate_generic_binary(
binary, " %s* " % base_op, **kw
)
return "%s %s CONCAT('(?', %s, ')', %s)" % (
self.process(binary.left, **kw),
base_op,
self.render_literal_value(flags, sqltypes.STRINGTYPE),
self.process(binary.right, **kw),
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match("~", binary, operator, kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match("!~", binary, operator, kw)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
string = self.process(binary.left, **kw)
pattern_replace = self.process(binary.right, **kw)
flags = binary.modifiers["flags"]
if flags is None:
return "REGEXP_REPLACE(%s, %s)" % (
string,
pattern_replace,
)
else:
return "REGEXP_REPLACE(%s, %s, %s)" % (
string,
pattern_replace,
self.render_literal_value(flags, sqltypes.STRINGTYPE),
)
def visit_empty_set_expr(self, element_types, **kw):
# cast the empty set to the type we are comparing against. if
# we are comparing against the null type, pick an arbitrary
# datatype for the empty set
return "SELECT %s WHERE 1!=1" % (
", ".join(
"CAST(NULL AS %s)"
% self.dialect.type_compiler_instance.process(
INTEGER() if type_._isnull else type_
)
for type_ in element_types or [INTEGER()]
),
)
def render_literal_value(self, value, type_):
value = super().render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
def visit_aggregate_strings_func(self, fn, **kw):
return super().visit_aggregate_strings_func(
fn, use_function_name="string_agg", **kw
)
def visit_pow_func(self, fn, **kw):
return f"power{self.function_argspec(fn)}"
def visit_sequence(self, seq, **kw):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " \n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT ALL"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
if hint.upper() != "ONLY":
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
def get_select_precolumns(self, select, **kw):
# Do not call super().get_select_precolumns because
# it will warn/raise when distinct on is present
if select._distinct or select._distinct_on:
if select._distinct_on:
return (
"DISTINCT ON ("
+ ", ".join(
[
self.process(col, **kw)
for col in select._distinct_on
]
)
+ ") "
)
else:
return "DISTINCT "
else:
return ""
def visit_postgresql_distinct_on(self, element, **kw):
if self.stack[-1]["selectable"]._distinct_on:
raise exc.CompileError(
"Cannot mix ``select.ext(distinct_on(...))`` and "
"``select.distinct(...)``"
)
if element._distinct_on:
cols = ", ".join(
self.process(col, **kw) for col in element._distinct_on
)
return f"ON ({cols})"
else:
return None
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
if select._for_update_arg.key_share:
tmp = " FOR KEY SHARE"
else:
tmp = " FOR SHARE"
elif select._for_update_arg.key_share:
tmp = " FOR NO KEY UPDATE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
of_kw = dict(kw)
of_kw.update(ashint=True, use_schema=False)
tmp += " OF " + ", ".join(
self.process(table, **of_kw) for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2], **kw)
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
# target may be a name of an Index, UniqueConstraint or
# ExcludeConstraint. While there is a separate
# "max_identifier_length" for indexes, PostgreSQL uses the same
# length for all objects so we can use
# truncate_and_render_constraint_name
target_text = (
"ON CONSTRAINT %s"
% self.preparer.truncate_and_render_constraint_name(
clause.constraint_target
)
)
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, str)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
elif c in set_parameters:
value = set_parameters.pop(c)
else:
continue
assert not coercions._is_literal(value)
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._with_binary_element_type(c.type)
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(c.name)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.current_executable.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, str)
else self.process(k, use_schema=False)
)
value_text = self.process(
coercions.expect(roles.ExpressionElementRole, v),
use_schema=False,
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
kw["asfrom"] = True
return "FROM " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to PostgreSQL."""
kw["asfrom"] = True
return "USING " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in extra_froms
)
def fetch_clause(self, select, **kw):
# pg requires parens for non literal clauses. It's also required for
# bind parameters if a ::type casts is used by the driver (asyncpg),
# so it's easiest to just always add it
text = ""
if select._offset_clause is not None:
text += "\n OFFSET (%s) ROWS" % self.process(
select._offset_clause, **kw
)
if select._fetch_clause is not None:
text += "\n FETCH FIRST (%s)%s ROWS %s" % (
self.process(select._fetch_clause, **kw),
" PERCENT" if select._fetch_clause_options["percent"] else "",
(
"WITH TIES"
if select._fetch_clause_options["with_ties"]
else "ONLY"
),
)
return text
| PGCompiler |
python | urllib3__urllib3 | test/with_dummyserver/test_socketlevel.py | {
"start": 1575,
"end": 2411
} | class ____(SocketDummyServerTestCase):
def test_multi_setcookie(self) -> None:
def multicookie_response_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Set-Cookie: foo=1\r\n"
b"Set-Cookie: bar=1\r\n"
b"\r\n"
)
sock.close()
self._start_server(multicookie_response_handler)
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/", retries=0)
assert r.headers == {"set-cookie": "foo=1, bar=1"}
assert r.headers.getlist("set-cookie") == ["foo=1", "bar=1"]
| TestCookies |
python | pydata__xarray | xarray/backends/scipy_.py | {
"start": 1680,
"end": 3474
} | class ____(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_variable().data
self.shape = array.shape
self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize))
def get_variable(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def _getitem(self, key):
with self.datastore.lock:
data = self.get_variable(needs_lock=False).data
return data[key]
def __getitem__(self, key):
data = indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem
)
# Copy data if the source file is mmapped. This makes things consistent
# with the netCDF4 library by ensuring we can safely read arrays even
# after closing associated files.
copy = self.datastore.ds.use_mmap
# adapt handling of copy-kwarg to numpy 2.0
# see https://github.com/numpy/numpy/issues/25916
# and https://github.com/numpy/numpy/pull/25922
copy = None if HAS_NUMPY_2_0 and copy is False else copy
return np.array(data, dtype=self.dtype, copy=copy)
def __setitem__(self, key, value):
with self.datastore.lock:
data = self.get_variable(needs_lock=False)
try:
data[key] = value
except TypeError:
if key is Ellipsis:
# workaround for GH: scipy/scipy#6880
data[:] = value
else:
raise
# TODO: Make the scipy import lazy again after upstreaming these fixes.
| ScipyArrayWrapper |
python | django__django | tests/migrations/migrations_test_apps/migrated_unapplied_app/models.py | {
"start": 31,
"end": 347
} | class ____(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
slug = models.SlugField(null=True)
age = models.IntegerField(default=0)
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrated_unapplied_app"
| OtherAuthor |
python | pydantic__pydantic | tests/test_discriminated_union.py | {
"start": 13931,
"end": 85775
} | class ____(str, Enum):
pass
ENUM_TEST_CASES = [
pytest.param(Enum, {'a': 1, 'b': 2}),
pytest.param(Enum, {'a': 'v_a', 'b': 'v_b'}),
(FooIntEnum, {'a': 1, 'b': 2}),
(IntEnum, {'a': 1, 'b': 2}),
(FooStrEnum, {'a': 'v_a', 'b': 'v_b'}),
]
if sys.version_info >= (3, 11):
from enum import StrEnum
ENUM_TEST_CASES.append((StrEnum, {'a': 'v_a', 'b': 'v_b'}))
@pytest.mark.parametrize('base_class,choices', ENUM_TEST_CASES)
def test_discriminated_union_enum(base_class, choices):
EnumValue = base_class('EnumValue', choices)
class A(BaseModel):
m: Literal[EnumValue.a]
class B(BaseModel):
m: Literal[EnumValue.b]
class Top(BaseModel):
sub: Union[A, B] = Field(discriminator='m')
assert isinstance(Top.model_validate({'sub': {'m': EnumValue.b}}).sub, B)
if isinstance(EnumValue.b, (int, str)):
assert isinstance(Top.model_validate({'sub': {'m': EnumValue.b.value}}).sub, B)
with pytest.raises(ValidationError) as exc_info:
Top.model_validate({'sub': {'m': 3}})
expected_tags = f'{EnumValue.a!r}, {EnumValue.b!r}'
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': ('sub',),
'msg': f"Input tag '3' found using 'm' does not match any of the expected tags: {expected_tags}",
'input': {'m': 3},
'ctx': {'discriminator': "'m'", 'tag': '3', 'expected_tags': expected_tags},
}
]
def test_alias_different():
class Cat(BaseModel):
pet_type: Literal['cat'] = Field(alias='U')
c: str
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='T')
d: str
with pytest.raises(TypeError, match=re.escape("Aliases for discriminator 'pet_type' must be the same (got T, U)")):
class Model(BaseModel):
pet: Union[Cat, Dog] = Field(discriminator='pet_type')
def test_alias_same():
class Cat(BaseModel):
pet_type: Literal['cat'] = Field(alias='typeOfPet')
c: str
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
d: str
class Model(BaseModel):
pet: Union[Cat, Dog] = Field(discriminator='pet_type')
assert Model(**{'pet': {'typeOfPet': 'dog', 'd': 'milou'}}).pet.pet_type == 'dog'
def test_nested():
class Cat(BaseModel):
pet_type: Literal['cat']
name: str
class Dog(BaseModel):
pet_type: Literal['dog']
name: str
CommonPet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
class Lizard(BaseModel):
pet_type: Literal['reptile', 'lizard']
name: str
class Model(BaseModel):
pet: Union[CommonPet, Lizard] = Field(discriminator='pet_type')
n: int
assert isinstance(Model(**{'pet': {'pet_type': 'dog', 'name': 'Milou'}, 'n': 5}).pet, Dog)
def test_generic():
T = TypeVar('T')
class Success(BaseModel, Generic[T]):
type: Literal['Success'] = 'Success'
data: T
class Failure(BaseModel):
type: Literal['Failure'] = 'Failure'
error_message: str
class Container(BaseModel, Generic[T]):
result: Union[Success[T], Failure] = Field(discriminator='type')
with pytest.raises(ValidationError, match="Unable to extract tag using discriminator 'type'"):
Container[str].model_validate({'result': {}})
with pytest.raises(
ValidationError,
match=re.escape(
"Input tag 'Other' found using 'type' does not match any of the expected tags: 'Success', 'Failure'"
),
):
Container[str].model_validate({'result': {'type': 'Other'}})
with pytest.raises(ValidationError, match=r'Container\[str\]\nresult\.Success\.data') as exc_info:
Container[str].model_validate({'result': {'type': 'Success'}})
assert exc_info.value.errors(include_url=False) == [
{'input': {'type': 'Success'}, 'loc': ('result', 'Success', 'data'), 'msg': 'Field required', 'type': 'missing'}
]
# invalid types error
with pytest.raises(ValidationError) as exc_info:
Container[str].model_validate({'result': {'type': 'Success', 'data': 1}})
assert exc_info.value.errors(include_url=False) == [
{
'input': 1,
'loc': ('result', 'Success', 'data'),
'msg': 'Input should be a valid string',
'type': 'string_type',
}
]
assert Container[str].model_validate({'result': {'type': 'Success', 'data': '1'}}).result.data == '1'
def test_optional_union():
class Cat(BaseModel):
pet_type: Literal['cat']
name: str
class Dog(BaseModel):
pet_type: Literal['dog']
name: str
class Pet(BaseModel):
pet: Optional[Union[Cat, Dog]] = Field(discriminator='pet_type')
assert Pet(pet={'pet_type': 'cat', 'name': 'Milo'}).model_dump() == {'pet': {'name': 'Milo', 'pet_type': 'cat'}}
assert Pet(pet={'pet_type': 'dog', 'name': 'Otis'}).model_dump() == {'pet': {'name': 'Otis', 'pet_type': 'dog'}}
assert Pet(pet=None).model_dump() == {'pet': None}
with pytest.raises(ValidationError) as exc_info:
Pet()
assert exc_info.value.errors(include_url=False) == [
{'input': {}, 'loc': ('pet',), 'msg': 'Field required', 'type': 'missing'}
]
with pytest.raises(ValidationError) as exc_info:
Pet(pet={'name': 'Benji'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'pet_type'"},
'input': {'name': 'Benji'},
'loc': ('pet',),
'msg': "Unable to extract tag using discriminator 'pet_type'",
'type': 'union_tag_not_found',
}
]
with pytest.raises(ValidationError) as exc_info:
Pet(pet={'pet_type': 'lizard'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'pet_type'", 'expected_tags': "'cat', 'dog'", 'tag': 'lizard'},
'input': {'pet_type': 'lizard'},
'loc': ('pet',),
'msg': "Input tag 'lizard' found using 'pet_type' does not match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
def test_optional_union_with_defaults():
class Cat(BaseModel):
pet_type: Literal['cat'] = 'cat'
name: str
class Dog(BaseModel):
pet_type: Literal['dog'] = 'dog'
name: str
class Pet(BaseModel):
pet: Optional[Union[Cat, Dog]] = Field(default=None, discriminator='pet_type')
assert Pet(pet={'pet_type': 'cat', 'name': 'Milo'}).model_dump() == {'pet': {'name': 'Milo', 'pet_type': 'cat'}}
assert Pet(pet={'pet_type': 'dog', 'name': 'Otis'}).model_dump() == {'pet': {'name': 'Otis', 'pet_type': 'dog'}}
assert Pet(pet=None).model_dump() == {'pet': None}
assert Pet().model_dump() == {'pet': None}
with pytest.raises(ValidationError) as exc_info:
Pet(pet={'name': 'Benji'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'pet_type'"},
'input': {'name': 'Benji'},
'loc': ('pet',),
'msg': "Unable to extract tag using discriminator 'pet_type'",
'type': 'union_tag_not_found',
}
]
with pytest.raises(ValidationError) as exc_info:
Pet(pet={'pet_type': 'lizard'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'pet_type'", 'expected_tags': "'cat', 'dog'", 'tag': 'lizard'},
'input': {'pet_type': 'lizard'},
'loc': ('pet',),
'msg': "Input tag 'lizard' found using 'pet_type' does not match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
def test_aliases_matching_is_not_sufficient() -> None:
class Case1(BaseModel):
kind_one: Literal['1'] = Field(alias='kind')
class Case2(BaseModel):
kind_two: Literal['2'] = Field(alias='kind')
with pytest.raises(PydanticUserError, match="Model 'Case1' needs a discriminator field for key 'kind'"):
class TaggedParent(BaseModel):
tagged: Union[Case1, Case2] = Field(discriminator='kind')
def test_nested_optional_unions() -> None:
class Cat(BaseModel):
pet_type: Literal['cat'] = 'cat'
class Dog(BaseModel):
pet_type: Literal['dog'] = 'dog'
class Lizard(BaseModel):
pet_type: Literal['lizard', 'reptile'] = 'lizard'
MaybeCatDog = Annotated[Optional[Union[Cat, Dog]], Field(discriminator='pet_type')]
MaybeDogLizard = Annotated[Union[Dog, Lizard, None], Field(discriminator='pet_type')]
class Pet(BaseModel):
pet: Union[MaybeCatDog, MaybeDogLizard] = Field(discriminator='pet_type')
Pet.model_validate({'pet': {'pet_type': 'dog'}})
Pet.model_validate({'pet': {'pet_type': 'cat'}})
Pet.model_validate({'pet': {'pet_type': 'lizard'}})
Pet.model_validate({'pet': {'pet_type': 'reptile'}})
Pet.model_validate({'pet': None})
with pytest.raises(ValidationError) as exc_info:
Pet.model_validate({'pet': {'pet_type': None}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': ('pet',),
'msg': "Input tag 'None' found using 'pet_type' does not match any of the expected tags: 'cat', 'dog', 'lizard', 'reptile'",
'input': {'pet_type': None},
'ctx': {'discriminator': "'pet_type'", 'tag': 'None', 'expected_tags': "'cat', 'dog', 'lizard', 'reptile'"},
}
]
with pytest.raises(ValidationError) as exc_info:
Pet.model_validate({'pet': {'pet_type': 'fox'}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': ('pet',),
'msg': "Input tag 'fox' found using 'pet_type' does not match any of the expected tags: 'cat', 'dog', 'lizard', 'reptile'",
'input': {'pet_type': 'fox'},
'ctx': {'discriminator': "'pet_type'", 'tag': 'fox', 'expected_tags': "'cat', 'dog', 'lizard', 'reptile'"},
}
]
def test_nested_discriminated_union() -> None:
class Cat(BaseModel):
pet_type: Literal['cat', 'CAT']
class Dog(BaseModel):
pet_type: Literal['dog', 'DOG']
class Lizard(BaseModel):
pet_type: Literal['lizard', 'LIZARD']
CatDog = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
CatDogLizard = Annotated[Union[CatDog, Lizard], Field(discriminator='pet_type')]
class Pet(BaseModel):
pet: CatDogLizard
Pet.model_validate({'pet': {'pet_type': 'dog'}})
Pet.model_validate({'pet': {'pet_type': 'cat'}})
Pet.model_validate({'pet': {'pet_type': 'lizard'}})
with pytest.raises(ValidationError) as exc_info:
Pet.model_validate({'pet': {'pet_type': 'reptile'}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': ('pet',),
'msg': "Input tag 'reptile' found using 'pet_type' does not match any of the expected tags: 'cat', 'CAT', 'dog', 'DOG', 'lizard', 'LIZARD'",
'input': {'pet_type': 'reptile'},
'ctx': {
'discriminator': "'pet_type'",
'tag': 'reptile',
'expected_tags': "'cat', 'CAT', 'dog', 'DOG', 'lizard', 'LIZARD'",
},
}
]
def test_unions_of_optionals() -> None:
class Cat(BaseModel):
pet_type: Literal['cat'] = Field(alias='typeOfPet')
c: str
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
d: str
class Lizard(BaseModel):
pet_type: Literal['lizard'] = Field(alias='typeOfPet')
MaybeCat = Annotated[Union[Cat, None], 'some annotation']
MaybeDogLizard = Annotated[Optional[Union[Dog, Lizard]], 'some other annotation']
class Model(BaseModel):
maybe_pet: Union[MaybeCat, MaybeDogLizard] = Field(discriminator='pet_type')
assert Model(**{'maybe_pet': None}).maybe_pet is None
assert Model(**{'maybe_pet': {'typeOfPet': 'dog', 'd': 'milou'}}).maybe_pet.pet_type == 'dog'
assert Model(**{'maybe_pet': {'typeOfPet': 'lizard'}}).maybe_pet.pet_type == 'lizard'
def test_union_discriminator_literals() -> None:
class Cat(BaseModel):
pet_type: Union[Literal['cat'], Literal['CAT']] = Field(alias='typeOfPet')
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
class Model(BaseModel):
pet: Union[Cat, Dog] = Field(discriminator='pet_type')
assert Model(**{'pet': {'typeOfPet': 'dog'}}).pet.pet_type == 'dog'
assert Model(**{'pet': {'typeOfPet': 'cat'}}).pet.pet_type == 'cat'
assert Model(**{'pet': {'typeOfPet': 'CAT'}}).pet.pet_type == 'CAT'
with pytest.raises(ValidationError) as exc_info:
Model(**{'pet': {'typeOfPet': 'Cat'}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'union_tag_invalid',
'loc': ('pet',),
'msg': "Input tag 'Cat' found using 'pet_type' | 'typeOfPet' does not match any of the expected tags: 'cat', 'CAT', 'dog'",
'input': {'typeOfPet': 'Cat'},
'ctx': {'discriminator': "'pet_type' | 'typeOfPet'", 'tag': 'Cat', 'expected_tags': "'cat', 'CAT', 'dog'"},
}
]
def test_none_schema() -> None:
cat_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))}
dog_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))}
cat = core_schema.typed_dict_schema(cat_fields)
dog = core_schema.typed_dict_schema(dog_fields)
schema = core_schema.union_schema([cat, dog, core_schema.none_schema()])
schema = apply_discriminator(schema, 'kind')
validator = SchemaValidator(schema)
assert validator.validate_python({'kind': 'cat'})['kind'] == 'cat'
assert validator.validate_python({'kind': 'dog'})['kind'] == 'dog'
assert validator.validate_python(None) is None
with pytest.raises(ValidationError) as exc_info:
validator.validate_python({'kind': 'lizard'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'kind'", 'expected_tags': "'cat', 'dog'", 'tag': 'lizard'},
'input': {'kind': 'lizard'},
'loc': (),
'msg': "Input tag 'lizard' found using 'kind' does not match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
def test_nested_unwrapping() -> None:
cat_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))}
dog_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))}
cat = core_schema.typed_dict_schema(cat_fields)
dog = core_schema.typed_dict_schema(dog_fields)
schema = core_schema.union_schema([cat, dog])
for _ in range(3):
schema = core_schema.nullable_schema(schema)
schema = core_schema.nullable_schema(schema)
schema = core_schema.definitions_schema(schema, [])
schema = core_schema.definitions_schema(schema, [])
schema = apply_discriminator(schema, 'kind')
validator = SchemaValidator(schema)
assert validator.validate_python({'kind': 'cat'})['kind'] == 'cat'
assert validator.validate_python({'kind': 'dog'})['kind'] == 'dog'
assert validator.validate_python(None) is None
with pytest.raises(ValidationError) as exc_info:
validator.validate_python({'kind': 'lizard'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'kind'", 'expected_tags': "'cat', 'dog'", 'tag': 'lizard'},
'input': {'kind': 'lizard'},
'loc': (),
'msg': "Input tag 'lizard' found using 'kind' does not match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
def test_distinct_choices() -> None:
class Cat(BaseModel):
pet_type: Literal['cat', 'dog'] = Field(alias='typeOfPet')
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
with pytest.raises(TypeError, match="Value 'dog' for discriminator 'pet_type' mapped to multiple choices"):
class Model(BaseModel):
pet: Union[Cat, Dog] = Field(discriminator='pet_type')
def test_invalid_discriminated_union_type() -> None:
class Cat(BaseModel):
pet_type: Literal['cat'] = Field(alias='typeOfPet')
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
with pytest.raises(TypeError, match="The core schema type 'str' is not a valid discriminated union variant."):
class Model(BaseModel):
pet: Union[Cat, Dog, str] = Field(discriminator='pet_type')
def test_invalid_list_discriminated_union_type():
with pytest.raises(
TypeError,
match=re.escape(
"The core schema type 'list' is not a valid discriminated union variant. "
'If you are making use of a list of union types, make sure the discriminator is applied to the '
'union type and not the list (e.g. `list[Annotated[<T> | <U>, Field(discriminator=...)]]`).'
),
):
class Model(BaseModel):
# Note: `int`/`str` is invalid but we just want to test the `list` error message:
pets: list[Union[int, str]] = Field(discriminator='pet_type')
def test_invalid_alias() -> None:
cat_fields = {
'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']), validation_alias=['cat', 'CAT'])
}
dog_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))}
cat = core_schema.typed_dict_schema(cat_fields)
dog = core_schema.typed_dict_schema(dog_fields)
schema = core_schema.union_schema([cat, dog])
with pytest.raises(TypeError, match=re.escape("Alias ['cat', 'CAT'] is not supported in a discriminated union")):
apply_discriminator(schema, 'kind')
def test_invalid_discriminator_type() -> None:
cat_fields = {'kind': core_schema.typed_dict_field(core_schema.int_schema())}
dog_fields = {'kind': core_schema.typed_dict_field(core_schema.str_schema())}
cat = core_schema.typed_dict_schema(cat_fields)
dog = core_schema.typed_dict_schema(dog_fields)
with pytest.raises(TypeError, match=re.escape("TypedDict needs field 'kind' to be of type `Literal`")):
apply_discriminator(core_schema.union_schema([cat, dog]), 'kind')
def test_missing_discriminator_field() -> None:
cat_fields = {'kind': core_schema.typed_dict_field(core_schema.int_schema())}
dog_fields = {}
cat = core_schema.typed_dict_schema(cat_fields)
dog = core_schema.typed_dict_schema(dog_fields)
with pytest.raises(TypeError, match=re.escape("TypedDict needs a discriminator field for key 'kind'")):
apply_discriminator(core_schema.union_schema([dog, cat]), 'kind')
def test_wrap_function_schema() -> None:
cat_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))}
dog_fields = {'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))}
cat = core_schema.with_info_wrap_validator_function(lambda x, y, z: None, core_schema.typed_dict_schema(cat_fields))
dog = core_schema.typed_dict_schema(dog_fields)
schema = core_schema.union_schema([cat, dog])
assert apply_discriminator(schema, 'kind') == {
'choices': {
'cat': {
'function': {
'type': 'with-info',
'function': HasRepr(IsStr(regex=r'<function [a-z_]*\.<locals>\.<lambda> at 0x[0-9a-fA-F]+>')),
},
'schema': {
'fields': {
'kind': {'schema': {'expected': ['cat'], 'type': 'literal'}, 'type': 'typed-dict-field'}
},
'type': 'typed-dict',
},
'type': 'function-wrap',
},
'dog': {
'fields': {'kind': {'schema': {'expected': ['dog'], 'type': 'literal'}, 'type': 'typed-dict-field'}},
'type': 'typed-dict',
},
},
'discriminator': 'kind',
'from_attributes': True,
'strict': False,
'type': 'tagged-union',
}
def test_plain_function_schema_is_invalid() -> None:
with pytest.raises(
TypeError,
match="The core schema type 'function-plain' is not a valid discriminated union variant.",
):
class Model(BaseModel):
a: Union[Annotated[int, PlainValidator(lambda v: v)], str] = Field(discriminator='kind')
def test_invalid_str_choice_discriminator_values() -> None:
cat = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))})
dog = core_schema.str_schema()
schema = core_schema.union_schema(
[
cat,
# NOTE: Wrapping the union with a validator results in failure to more thoroughly decompose the tagged
# union. I think this would be difficult to avoid in the general case, and I would suggest that we not
# attempt to do more than this until presented with scenarios where it is helpful/necessary.
core_schema.with_info_wrap_validator_function(lambda x, y, z: x, dog),
]
)
with pytest.raises(TypeError, match="The core schema type 'str' is not a valid discriminated union variant."):
apply_discriminator(schema, 'kind')
def test_lax_or_strict_definitions() -> None:
cat = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))})
lax_dog = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['DOG']))})
strict_dog = core_schema.definitions_schema(
core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))}),
[core_schema.int_schema(ref='my-int-definition')],
)
dog = core_schema.definitions_schema(
core_schema.lax_or_strict_schema(lax_schema=lax_dog, strict_schema=strict_dog),
[core_schema.str_schema(ref='my-str-definition')],
)
discriminated_schema = apply_discriminator(core_schema.union_schema([cat, dog]), 'kind')
# insert_assert(discriminated_schema)
assert discriminated_schema == {
'type': 'tagged-union',
'choices': {
'cat': {
'type': 'typed-dict',
'fields': {'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['cat']}}},
},
'DOG': {
'type': 'lax-or-strict',
'lax_schema': {
'type': 'typed-dict',
'fields': {
'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['DOG']}}
},
},
'strict_schema': {
'type': 'definitions',
'schema': {
'type': 'typed-dict',
'fields': {
'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['dog']}}
},
},
'definitions': [{'type': 'int', 'ref': 'my-int-definition'}],
},
},
'dog': {
'type': 'lax-or-strict',
'lax_schema': {
'type': 'typed-dict',
'fields': {
'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['DOG']}}
},
},
'strict_schema': {
'type': 'definitions',
'schema': {
'type': 'typed-dict',
'fields': {
'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['dog']}}
},
},
'definitions': [{'type': 'int', 'ref': 'my-int-definition'}],
},
},
},
'discriminator': 'kind',
'strict': False,
'from_attributes': True,
}
def test_wrapped_nullable_union() -> None:
cat = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['cat']))})
dog = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['dog']))})
ant = core_schema.typed_dict_schema({'kind': core_schema.typed_dict_field(core_schema.literal_schema(['ant']))})
schema = core_schema.union_schema(
[
ant,
# NOTE: Wrapping the union with a validator results in failure to more thoroughly decompose the tagged
# union. I think this would be difficult to avoid in the general case, and I would suggest that we not
# attempt to do more than this until presented with scenarios where it is helpful/necessary.
core_schema.with_info_wrap_validator_function(
lambda x, y, z: x, core_schema.nullable_schema(core_schema.union_schema([cat, dog]))
),
]
)
discriminated_schema = apply_discriminator(schema, 'kind')
validator = SchemaValidator(discriminated_schema)
assert validator.validate_python({'kind': 'ant'})['kind'] == 'ant'
assert validator.validate_python({'kind': 'cat'})['kind'] == 'cat'
assert validator.validate_python(None) is None
with pytest.raises(ValidationError) as exc_info:
validator.validate_python({'kind': 'armadillo'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'kind'", 'expected_tags': "'ant', 'cat', 'dog'", 'tag': 'armadillo'},
'input': {'kind': 'armadillo'},
'loc': (),
'msg': "Input tag 'armadillo' found using 'kind' does not match any of the "
"expected tags: 'ant', 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
# insert_assert(discriminated_schema)
assert discriminated_schema == {
'type': 'nullable',
'schema': {
'type': 'tagged-union',
'choices': {
'ant': {
'type': 'typed-dict',
'fields': {
'kind': {'type': 'typed-dict-field', 'schema': {'type': 'literal', 'expected': ['ant']}}
},
},
'cat': {
'type': 'function-wrap',
'function': {
'type': 'with-info',
'function': HasRepr(IsStr(regex=r'<function [a-z_]*\.<locals>\.<lambda> at 0x[0-9a-fA-F]+>')),
},
'schema': {
'type': 'nullable',
'schema': {
'type': 'union',
'choices': [
{
'type': 'typed-dict',
'fields': {
'kind': {
'type': 'typed-dict-field',
'schema': {'type': 'literal', 'expected': ['cat']},
}
},
},
{
'type': 'typed-dict',
'fields': {
'kind': {
'type': 'typed-dict-field',
'schema': {'type': 'literal', 'expected': ['dog']},
}
},
},
],
},
},
},
'dog': {
'type': 'function-wrap',
'function': {
'type': 'with-info',
'function': HasRepr(IsStr(regex=r'<function [a-z_]*\.<locals>\.<lambda> at 0x[0-9a-fA-F]+>')),
},
'schema': {
'type': 'nullable',
'schema': {
'type': 'union',
'choices': [
{
'type': 'typed-dict',
'fields': {
'kind': {
'type': 'typed-dict-field',
'schema': {'type': 'literal', 'expected': ['cat']},
}
},
},
{
'type': 'typed-dict',
'fields': {
'kind': {
'type': 'typed-dict-field',
'schema': {'type': 'literal', 'expected': ['dog']},
}
},
},
],
},
},
},
},
'discriminator': 'kind',
'strict': False,
'from_attributes': True,
},
}
def test_union_in_submodel() -> None:
class UnionModel1(BaseModel):
type: Literal[1] = 1
other: Literal['UnionModel1'] = 'UnionModel1'
class UnionModel2(BaseModel):
type: Literal[2] = 2
other: Literal['UnionModel2'] = 'UnionModel2'
UnionModel = Annotated[Union[UnionModel1, UnionModel2], Field(discriminator='type')]
class SubModel1(BaseModel):
union_model: UnionModel
class SubModel2(BaseModel):
union_model: UnionModel
class TestModel(BaseModel):
submodel: Union[SubModel1, SubModel2]
m = TestModel.model_validate({'submodel': {'union_model': {'type': 1}}})
assert isinstance(m.submodel, SubModel1)
assert isinstance(m.submodel.union_model, UnionModel1)
m = TestModel.model_validate({'submodel': {'union_model': {'type': 2}}})
assert isinstance(m.submodel, SubModel1)
assert isinstance(m.submodel.union_model, UnionModel2)
with pytest.raises(ValidationError) as exc_info:
TestModel.model_validate({'submodel': {'union_model': {'type': 1, 'other': 'UnionModel2'}}})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'literal_error',
'loc': ('submodel', 'SubModel1', 'union_model', 1, 'other'),
'msg': "Input should be 'UnionModel1'",
'input': 'UnionModel2',
'ctx': {'expected': "'UnionModel1'"},
},
{
'type': 'literal_error',
'loc': ('submodel', 'SubModel2', 'union_model', 1, 'other'),
'msg': "Input should be 'UnionModel1'",
'input': 'UnionModel2',
'ctx': {'expected': "'UnionModel1'"},
},
]
# insert_assert(TestModel.model_json_schema())
assert TestModel.model_json_schema() == {
'$defs': {
'SubModel1': {
'properties': {
'union_model': {
'discriminator': {
'mapping': {'1': '#/$defs/UnionModel1', '2': '#/$defs/UnionModel2'},
'propertyName': 'type',
},
'oneOf': [{'$ref': '#/$defs/UnionModel1'}, {'$ref': '#/$defs/UnionModel2'}],
'title': 'Union Model',
}
},
'required': ['union_model'],
'title': 'SubModel1',
'type': 'object',
},
'SubModel2': {
'properties': {
'union_model': {
'discriminator': {
'mapping': {'1': '#/$defs/UnionModel1', '2': '#/$defs/UnionModel2'},
'propertyName': 'type',
},
'oneOf': [{'$ref': '#/$defs/UnionModel1'}, {'$ref': '#/$defs/UnionModel2'}],
'title': 'Union Model',
}
},
'required': ['union_model'],
'title': 'SubModel2',
'type': 'object',
},
'UnionModel1': {
'properties': {
'type': {'const': 1, 'default': 1, 'title': 'Type', 'type': 'integer'},
'other': {
'const': 'UnionModel1',
'default': 'UnionModel1',
'title': 'Other',
'type': 'string',
},
},
'title': 'UnionModel1',
'type': 'object',
},
'UnionModel2': {
'properties': {
'type': {'const': 2, 'default': 2, 'title': 'Type', 'type': 'integer'},
'other': {
'const': 'UnionModel2',
'default': 'UnionModel2',
'title': 'Other',
'type': 'string',
},
},
'title': 'UnionModel2',
'type': 'object',
},
},
'properties': {
'submodel': {'anyOf': [{'$ref': '#/$defs/SubModel1'}, {'$ref': '#/$defs/SubModel2'}], 'title': 'Submodel'}
},
'required': ['submodel'],
'title': 'TestModel',
'type': 'object',
}
def test_function_after_discriminator():
class CatModel(BaseModel):
name: Literal['kitty', 'cat']
@field_validator('name', mode='after')
def replace_name(cls, v):
return 'cat'
class DogModel(BaseModel):
name: Literal['puppy', 'dog']
# comment out the 2 field validators and model will work!
@field_validator('name', mode='after')
def replace_name(cls, v):
return 'dog'
AllowedAnimal = Annotated[Union[CatModel, DogModel], Field(discriminator='name')]
class Model(BaseModel):
x: AllowedAnimal
m = Model(x={'name': 'kitty'})
assert m.x.name == 'cat'
# Ensure a discriminated union is actually being used during validation
with pytest.raises(ValidationError) as exc_info:
Model(x={'name': 'invalid'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': "'name'", 'expected_tags': "'kitty', 'cat', 'puppy', 'dog'", 'tag': 'invalid'},
'input': {'name': 'invalid'},
'loc': ('x',),
'msg': "Input tag 'invalid' found using 'name' does not match any of the "
"expected tags: 'kitty', 'cat', 'puppy', 'dog'",
'type': 'union_tag_invalid',
}
]
def test_sequence_discriminated_union():
class Cat(BaseModel):
pet_type: Literal['cat']
meows: int
class Dog(BaseModel):
pet_type: Literal['dog']
barks: float
class Lizard(BaseModel):
pet_type: Literal['reptile', 'lizard']
scales: bool
Pet = Annotated[Union[Cat, Dog, Lizard], Field(discriminator='pet_type')]
class Model(BaseModel):
pet: Sequence[Pet]
n: int
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'Cat': {
'properties': {
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
'meows': {'title': 'Meows', 'type': 'integer'},
},
'required': ['pet_type', 'meows'],
'title': 'Cat',
'type': 'object',
},
'Dog': {
'properties': {
'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'},
'barks': {'title': 'Barks', 'type': 'number'},
},
'required': ['pet_type', 'barks'],
'title': 'Dog',
'type': 'object',
},
'Lizard': {
'properties': {
'pet_type': {'enum': ['reptile', 'lizard'], 'title': 'Pet Type', 'type': 'string'},
'scales': {'title': 'Scales', 'type': 'boolean'},
},
'required': ['pet_type', 'scales'],
'title': 'Lizard',
'type': 'object',
},
},
'properties': {
'pet': {
'items': {
'discriminator': {
'mapping': {
'cat': '#/$defs/Cat',
'dog': '#/$defs/Dog',
'lizard': '#/$defs/Lizard',
'reptile': '#/$defs/Lizard',
},
'propertyName': 'pet_type',
},
'oneOf': [{'$ref': '#/$defs/Cat'}, {'$ref': '#/$defs/Dog'}, {'$ref': '#/$defs/Lizard'}],
},
'title': 'Pet',
'type': 'array',
},
'n': {'title': 'N', 'type': 'integer'},
},
'required': ['pet', 'n'],
'title': 'Model',
'type': 'object',
}
def test_sequence_discriminated_union_validation():
"""
Related issue: https://github.com/pydantic/pydantic/issues/9872
"""
class A(BaseModel):
type: Literal['a']
a_field: str
class B(BaseModel):
type: Literal['b']
b_field: str
class Model(BaseModel):
items: Sequence[Annotated[Union[A, B], Field(discriminator='type')]]
import json
data_json = '{"items": [{"type": "b"}]}'
data_dict = json.loads(data_json)
expected_error = {
'type': 'missing',
'loc': ('items', 0, 'b', 'b_field'),
'msg': 'Field required',
'input': {'type': 'b'},
}
# missing field should be `b_field` only, not including `a_field`
# also `literal_error` should not be reported on `type`
with pytest.raises(ValidationError) as exc_info:
Model.model_validate(data_dict)
assert exc_info.value.errors(include_url=False) == [expected_error]
with pytest.raises(ValidationError) as exc_info:
Model.model_validate_json(data_json)
assert exc_info.value.errors(include_url=False) == [expected_error]
def test_sequence_discriminated_union_validation_with_validator():
"""
This is the same as the previous test, but add validators to both class.
"""
class A(BaseModel):
type: Literal['a']
a_field: str
@model_validator(mode='after')
def check_a(self):
return self
class B(BaseModel):
type: Literal['b']
b_field: str
@model_validator(mode='after')
def check_b(self):
return self
class Model(BaseModel):
items: Sequence[Annotated[Union[A, B], Field(discriminator='type')]]
import json
data_json = '{"items": [{"type": "b"}]}'
data_dict = json.loads(data_json)
expected_error = {
'type': 'missing',
'loc': ('items', 0, 'b', 'b_field'),
'msg': 'Field required',
'input': {'type': 'b'},
}
# missing field should be `b_field` only, not including `a_field`
# also `literal_error` should not be reported on `type`
with pytest.raises(ValidationError) as exc_info:
Model.model_validate(data_dict)
assert exc_info.value.errors(include_url=False) == [expected_error]
@pytest.fixture(scope='session', name='animals')
def callable_discriminated_union_animals() -> SimpleNamespace:
class Cat(BaseModel):
pet_type: Literal['cat'] = 'cat'
class Dog(BaseModel):
pet_kind: Literal['dog'] = 'dog'
class Fish(BaseModel):
pet_kind: Literal['fish'] = 'fish'
class Lizard(BaseModel):
pet_variety: Literal['lizard'] = 'lizard'
animals = SimpleNamespace(cat=Cat, dog=Dog, fish=Fish, lizard=Lizard)
return animals
@pytest.fixture(scope='session', name='get_pet_discriminator_value')
def shared_pet_discriminator_value() -> Callable[[Any], str]:
def get_discriminator_value(v):
if isinstance(v, dict):
return v.get('pet_type', v.get('pet_kind'))
return getattr(v, 'pet_type', getattr(v, 'pet_kind', None))
return get_discriminator_value
def test_callable_discriminated_union_with_type_adapter(
animals: SimpleNamespace, get_pet_discriminator_value: Callable[[Any], str]
) -> None:
pet_adapter = TypeAdapter(
Annotated[
Union[Annotated[animals.cat, Tag('cat')], Annotated[animals.dog, Tag('dog')]],
Discriminator(get_pet_discriminator_value),
]
)
assert pet_adapter.validate_python({'pet_type': 'cat'}).pet_type == 'cat'
assert pet_adapter.validate_python({'pet_kind': 'dog'}).pet_kind == 'dog'
assert pet_adapter.validate_python(animals.cat()).pet_type == 'cat'
assert pet_adapter.validate_python(animals.dog()).pet_kind == 'dog'
assert pet_adapter.validate_json('{"pet_type":"cat"}').pet_type == 'cat'
assert pet_adapter.validate_json('{"pet_kind":"dog"}').pet_kind == 'dog'
# Unexpected discriminator value for dict
with pytest.raises(ValidationError) as exc_info:
pet_adapter.validate_python({'pet_kind': 'fish'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()', 'expected_tags': "'cat', 'dog'", 'tag': 'fish'},
'input': {'pet_kind': 'fish'},
'loc': (),
'msg': "Input tag 'fish' found using get_discriminator_value() does not "
"match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
# Missing discriminator key for dict
with pytest.raises(ValidationError) as exc_info:
pet_adapter.validate_python({'pet_variety': 'lizard'})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()'},
'input': {'pet_variety': 'lizard'},
'loc': (),
'msg': 'Unable to extract tag using discriminator get_discriminator_value()',
'type': 'union_tag_not_found',
}
]
# Unexpected discriminator value for instance
with pytest.raises(ValidationError) as exc_info:
pet_adapter.validate_python(animals.fish())
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()', 'expected_tags': "'cat', 'dog'", 'tag': 'fish'},
'input': animals.fish(pet_kind='fish'),
'loc': (),
'msg': "Input tag 'fish' found using get_discriminator_value() does not "
"match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
# Missing discriminator key for instance
with pytest.raises(ValidationError) as exc_info:
pet_adapter.validate_python(animals.lizard())
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()'},
'input': animals.lizard(pet_variety='lizard'),
'loc': (),
'msg': 'Unable to extract tag using discriminator get_discriminator_value()',
'type': 'union_tag_not_found',
}
]
def test_various_syntax_options_for_callable_union(
animals: SimpleNamespace, get_pet_discriminator_value: Callable[[Any], str]
) -> None:
class PetModelField(BaseModel):
pet: Union[Annotated[animals.cat, Tag('cat')], Annotated[animals.dog, Tag('dog')]] = Field(
discriminator=Discriminator(get_pet_discriminator_value)
)
class PetModelAnnotated(BaseModel):
pet: Annotated[
Union[Annotated[animals.cat, Tag('cat')], Annotated[animals.dog, Tag('dog')]],
Discriminator(get_pet_discriminator_value),
]
class PetModelAnnotatedWithField(BaseModel):
pet: Annotated[
Union[Annotated[animals.cat, Tag('cat')], Annotated[animals.dog, Tag('dog')]],
Field(discriminator=Discriminator(get_pet_discriminator_value)),
]
models = [PetModelField, PetModelAnnotated, PetModelAnnotatedWithField]
for model in models:
assert model.model_validate({'pet': {'pet_type': 'cat'}}).pet.pet_type == 'cat'
assert model.model_validate({'pet': {'pet_kind': 'dog'}}).pet.pet_kind == 'dog'
assert model(pet=animals.cat()).pet.pet_type == 'cat'
assert model(pet=animals.dog()).pet.pet_kind == 'dog'
assert model.model_validate_json('{"pet": {"pet_type":"cat"}}').pet.pet_type == 'cat'
assert model.model_validate_json('{"pet": {"pet_kind":"dog"}}').pet.pet_kind == 'dog'
# Unexpected discriminator value for dict
with pytest.raises(ValidationError) as exc_info:
model.model_validate({'pet': {'pet_kind': 'fish'}})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()', 'expected_tags': "'cat', 'dog'", 'tag': 'fish'},
'input': {'pet_kind': 'fish'},
'loc': ('pet',),
'msg': "Input tag 'fish' found using get_discriminator_value() does not "
"match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
# Missing discriminator key for dict
with pytest.raises(ValidationError) as exc_info:
model.model_validate({'pet': {'pet_variety': 'lizard'}})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()'},
'input': {'pet_variety': 'lizard'},
'loc': ('pet',),
'msg': 'Unable to extract tag using discriminator get_discriminator_value()',
'type': 'union_tag_not_found',
}
]
# Unexpected discriminator value for instance
with pytest.raises(ValidationError) as exc_info:
model(pet=animals.fish())
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()', 'expected_tags': "'cat', 'dog'", 'tag': 'fish'},
'input': animals.fish(pet_kind='fish'),
'loc': ('pet',),
'msg': "Input tag 'fish' found using get_discriminator_value() does not "
"match any of the expected tags: 'cat', 'dog'",
'type': 'union_tag_invalid',
}
]
# Missing discriminator key for instance
with pytest.raises(ValidationError) as exc_info:
model(pet=animals.lizard())
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'get_discriminator_value()'},
'input': animals.lizard(pet_variety='lizard'),
'loc': ('pet',),
'msg': 'Unable to extract tag using discriminator get_discriminator_value()',
'type': 'union_tag_not_found',
}
]
@pytest.mark.thread_unsafe(reason='`pytest.raises()` is thread unsafe')
def test_callable_discriminated_union_recursive():
# Demonstrate that the errors are very verbose without a callable discriminator:
class Model(BaseModel):
x: Union[str, 'Model']
with pytest.raises(ValidationError) as exc_info:
Model.model_validate({'x': {'x': {'x': 1}}})
assert exc_info.value.errors(include_url=False) == [
{'input': {'x': {'x': 1}}, 'loc': ('x', 'str'), 'msg': 'Input should be a valid string', 'type': 'string_type'},
{
'input': {'x': 1},
'loc': ('x', 'Model', 'x', 'str'),
'msg': 'Input should be a valid string',
'type': 'string_type',
},
{
'input': 1,
'loc': ('x', 'Model', 'x', 'Model', 'x', 'str'),
'msg': 'Input should be a valid string',
'type': 'string_type',
},
{
'ctx': {'class_name': 'Model'},
'input': 1,
'loc': ('x', 'Model', 'x', 'Model', 'x', 'Model'),
'msg': 'Input should be a valid dictionary or instance of Model',
'type': 'model_type',
},
]
with pytest.raises(ValidationError) as exc_info:
Model.model_validate({'x': {'x': {'x': {}}}})
assert exc_info.value.errors(include_url=False) == [
{
'input': {'x': {'x': {}}},
'loc': ('x', 'str'),
'msg': 'Input should be a valid string',
'type': 'string_type',
},
{
'input': {'x': {}},
'loc': ('x', 'Model', 'x', 'str'),
'msg': 'Input should be a valid string',
'type': 'string_type',
},
{
'input': {},
'loc': ('x', 'Model', 'x', 'Model', 'x', 'str'),
'msg': 'Input should be a valid string',
'type': 'string_type',
},
{
'input': {},
'loc': ('x', 'Model', 'x', 'Model', 'x', 'Model', 'x'),
'msg': 'Field required',
'type': 'missing',
},
]
# Demonstrate that the errors are less verbose _with_ a callable discriminator:
def model_x_discriminator(v):
if isinstance(v, str):
return 'str'
if isinstance(v, (dict, BaseModel)):
return 'model'
class DiscriminatedModel(BaseModel):
x: Annotated[
Union[Annotated[str, Tag('str')], Annotated['DiscriminatedModel', Tag('model')]],
Discriminator(
model_x_discriminator,
custom_error_type='invalid_union_member',
custom_error_message='Invalid union member',
custom_error_context={'discriminator': 'str_or_model'},
),
]
with pytest.raises(ValidationError) as exc_info:
DiscriminatedModel.model_validate({'x': {'x': {'x': 1}}})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'discriminator': 'str_or_model'},
'input': 1,
'loc': ('x', 'model', 'x', 'model', 'x'),
'msg': 'Invalid union member',
'type': 'invalid_union_member',
}
]
with pytest.raises(ValidationError) as exc_info:
DiscriminatedModel.model_validate({'x': {'x': {'x': {}}}})
assert exc_info.value.errors(include_url=False) == [
{
'input': {},
'loc': ('x', 'model', 'x', 'model', 'x', 'model', 'x'),
'msg': 'Field required',
'type': 'missing',
}
]
# Demonstrate that the data is still handled properly when valid:
data = {'x': {'x': {'x': 'a'}}}
m = DiscriminatedModel.model_validate(data)
assert m == DiscriminatedModel(x=DiscriminatedModel(x=DiscriminatedModel(x='a')))
assert m.model_dump() == data
def test_callable_discriminated_union_with_missing_tag() -> None:
def model_x_discriminator(v):
if isinstance(v, str):
return 'str'
if isinstance(v, (dict, BaseModel)):
return 'model'
with pytest.raises(PydanticUserError) as exc_info:
class DiscriminatedModel(BaseModel):
x: Annotated[
Union[str, 'DiscriminatedModel'],
Discriminator(model_x_discriminator),
]
assert exc_info.value.code == 'callable-discriminator-no-tag'
with pytest.raises(PydanticUserError) as exc_info:
class DiscriminatedModel(BaseModel):
x: Annotated[
Union[Annotated[str, Tag('str')], 'DiscriminatedModel'],
Discriminator(model_x_discriminator),
]
assert exc_info.value.code == 'callable-discriminator-no-tag'
with pytest.raises(PydanticUserError) as exc_info:
class DiscriminatedModel(BaseModel):
x: Annotated[
Union[str, Annotated['DiscriminatedModel', Tag('model')]],
Discriminator(model_x_discriminator),
]
assert exc_info.value.code == 'callable-discriminator-no-tag'
@pytest.mark.xfail(
reason='Issue not yet fixed, see: https://github.com/pydantic/pydantic/issues/8271. At the moment, JSON schema gen warns with a PydanticJsonSchemaWarning.'
)
def test_presence_of_discriminator_when_generating_type_adaptor_json_schema_definitions() -> None:
class ItemType(str, Enum):
ITEM1 = 'item1'
ITEM2 = 'item2'
class CreateItem1(BaseModel):
item_type: Annotated[Literal[ItemType.ITEM1], Field(alias='type')]
id: int
class CreateItem2(BaseModel):
item_type: Annotated[Literal[ItemType.ITEM2], Field(alias='type')]
id: int
class CreateObjectDto(BaseModel):
id: int
items: list[
Annotated[
Union[
CreateItem1,
CreateItem2,
],
Field(discriminator='item_type'),
]
]
adapter = TypeAdapter(
Annotated[CreateObjectDto, FieldInfo(examples=[{'id': 1, 'items': [{'id': 3, 'type': 'ITEM1'}]}])]
)
schema_map, definitions = GenerateJsonSchema().generate_definitions([(adapter, 'validation', adapter.core_schema)])
assert definitions == {
'CreateItem1': {
'properties': {'id': {'title': 'Id', 'type': 'integer'}, 'type': {'const': 'item1', 'title': 'Type'}},
'required': ['type', 'id'],
'title': 'CreateItem1',
'type': 'object',
},
'CreateItem2': {
'properties': {'id': {'title': 'Id', 'type': 'integer'}, 'type': {'const': 'item2', 'title': 'Type'}},
'required': ['type', 'id'],
'title': 'CreateItem2',
'type': 'object',
},
'CreateObjectDto': {
'properties': {
'id': {'title': 'Id', 'type': 'integer'},
'items': {
'items': {
'discriminator': {
'mapping': {'item1': '#/$defs/CreateItem1', 'item2': '#/$defs/CreateItem2'},
'propertyName': 'type',
},
'oneOf': [{'$ref': '#/$defs/CreateItem1'}, {'$ref': '#/$defs/CreateItem2'}],
},
'title': 'Items',
'type': 'array',
},
},
'required': ['id', 'items'],
'title': 'CreateObjectDto',
'type': 'object',
},
}
def test_nested_discriminator() -> None:
"""
The exact details of the JSON schema produced are not necessarily important; the test was added in response to a
regression that caused the inner union to lose its discriminator. Even if the schema changes, the important
thing is that the core schema (and therefore JSON schema) produced has an actual discriminated union in it.
For more context, see: https://github.com/pydantic/pydantic/issues/8688.
"""
class Step_A(BaseModel):
type: Literal['stepA']
count: int
class Step_B(BaseModel):
type: Literal['stepB']
value: float
class MyModel(BaseModel):
type: Literal['mixed']
sub_models: list['SubModel']
steps: Union[Step_A, Step_B] = Field(
default=None,
discriminator='type',
)
class SubModel(MyModel):
type: Literal['mixed']
blending: float
MyModel.model_rebuild()
# insert_assert(MyModel.model_json_schema())
assert MyModel.model_json_schema() == {
'$defs': {
'Step_A': {
'properties': {
'count': {'title': 'Count', 'type': 'integer'},
'type': {'const': 'stepA', 'title': 'Type', 'type': 'string'},
},
'required': ['type', 'count'],
'title': 'Step_A',
'type': 'object',
},
'Step_B': {
'properties': {
'type': {'const': 'stepB', 'title': 'Type', 'type': 'string'},
'value': {'title': 'Value', 'type': 'number'},
},
'required': ['type', 'value'],
'title': 'Step_B',
'type': 'object',
},
'SubModel': {
'properties': {
'blending': {'title': 'Blending', 'type': 'number'},
'steps': {
'default': None,
'discriminator': {
'mapping': {'stepA': '#/$defs/Step_A', 'stepB': '#/$defs/Step_B'},
'propertyName': 'type',
},
'oneOf': [{'$ref': '#/$defs/Step_A'}, {'$ref': '#/$defs/Step_B'}],
'title': 'Steps',
},
'sub_models': {'items': {'$ref': '#/$defs/SubModel'}, 'title': 'Sub Models', 'type': 'array'},
'type': {'const': 'mixed', 'title': 'Type', 'type': 'string'},
},
'required': ['type', 'sub_models', 'blending'],
'title': 'SubModel',
'type': 'object',
},
},
'properties': {
'steps': {
'default': None,
'discriminator': {
'mapping': {'stepA': '#/$defs/Step_A', 'stepB': '#/$defs/Step_B'},
'propertyName': 'type',
},
'oneOf': [{'$ref': '#/$defs/Step_A'}, {'$ref': '#/$defs/Step_B'}],
'title': 'Steps',
},
'sub_models': {'items': {'$ref': '#/$defs/SubModel'}, 'title': 'Sub Models', 'type': 'array'},
'type': {'const': 'mixed', 'title': 'Type', 'type': 'string'},
},
'required': ['type', 'sub_models'],
'title': 'MyModel',
'type': 'object',
}
def test_nested_schema_gen_uses_tagged_union_in_ref() -> None:
class NestedState(BaseModel):
state_type: Literal['nested']
substate: 'AnyState'
# If this type is left out, the model behaves normally again
class LoopState(BaseModel):
state_type: Literal['loop']
substate: 'AnyState'
class LeafState(BaseModel):
state_type: Literal['leaf']
AnyState = Annotated[Union[NestedState, LoopState, LeafState], Field(discriminator='state_type')]
adapter = TypeAdapter(AnyState)
assert adapter.core_schema['schema']['type'] == 'tagged-union'
for definition in adapter.core_schema['definitions']:
if definition['schema']['model_name'] in ['NestedState', 'LoopState']:
assert definition['schema']['fields']['substate']['schema']['type'] == 'tagged-union'
def test_recursive_discriminiated_union_with_typed_dict() -> None:
class Foo(TypedDict):
type: Literal['foo']
x: 'Foobar'
class Bar(TypedDict):
type: Literal['bar']
Foobar = Annotated[Union[Foo, Bar], Field(discriminator='type')]
ta = TypeAdapter(Foobar)
# len of errors should be 1 for each case, bc we're using a tagged union
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'wrong'})
assert len(e.value.errors()) == 1
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'foo', 'x': {'type': 'wrong'}})
assert len(e.value.errors()) == 1
core_schema = ta.core_schema
assert core_schema['schema']['type'] == 'tagged-union'
for definition in core_schema['definitions']:
if 'Foo' in definition['ref']:
assert definition['fields']['x']['schema']['type'] == 'tagged-union'
def test_recursive_discriminiated_union_with_base_model() -> None:
class Foo(BaseModel):
type: Literal['foo']
x: 'Foobar'
class Bar(BaseModel):
type: Literal['bar']
Foobar = Annotated[Union[Foo, Bar], Field(discriminator='type')]
ta = TypeAdapter(Foobar)
# len of errors should be 1 for each case, bc we're using a tagged union
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'wrong'})
assert len(e.value.errors()) == 1
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'foo', 'x': {'type': 'wrong'}})
assert len(e.value.errors()) == 1
core_schema = ta.core_schema
assert core_schema['schema']['type'] == 'tagged-union'
for definition in core_schema['definitions']:
if 'Foo' in definition['ref']:
assert definition['schema']['fields']['x']['schema']['type'] == 'tagged-union'
def test_recursive_discriminated_union_with_pydantic_dataclass() -> None:
@pydantic_dataclass
class Foo:
type: Literal['foo']
x: 'Foobar'
@pydantic_dataclass
class Bar:
type: Literal['bar']
Foobar = Annotated[Union[Foo, Bar], Field(discriminator='type')]
ta = TypeAdapter(Foobar)
# len of errors should be 1 for each case, bc we're using a tagged union
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'wrong'})
assert len(e.value.errors()) == 1
with pytest.raises(ValidationError) as e:
ta.validate_python({'type': 'foo', 'x': {'type': 'wrong'}})
assert len(e.value.errors()) == 1
core_schema = ta.core_schema
assert core_schema['schema']['type'] == 'tagged-union'
for definition in core_schema['definitions']:
if 'Foo' in definition['ref']:
for field in definition['schema']['fields']:
assert field['schema']['type'] == 'tagged-union' if field['name'] == 'x' else True
def test_discriminated_union_with_nested_dataclass() -> None:
@pydantic_dataclass
class Cat:
type: Literal['cat'] = 'cat'
@pydantic_dataclass
class Dog:
type: Literal['dog'] = 'dog'
@pydantic_dataclass
class NestedDataClass:
animal: Annotated[Union[Cat, Dog], Discriminator('type')]
@pydantic_dataclass
class Root:
data_class: NestedDataClass
ta = TypeAdapter(Root)
assert ta.core_schema['schema']['fields'][0]['schema']['schema']['fields'][0]['schema']['type'] == 'tagged-union'
def test_discriminated_union_with_nested_typed_dicts() -> None:
class Cat(TypedDict):
type: Literal['cat']
class Dog(TypedDict):
type: Literal['dog']
class NestedTypedDict(TypedDict):
animal: Annotated[Union[Cat, Dog], Discriminator('type')]
class Root(TypedDict):
data_class: NestedTypedDict
ta = TypeAdapter(Root)
assert ta.core_schema['fields']['data_class']['schema']['fields']['animal']['schema']['type'] == 'tagged-union'
def test_discriminated_union_with_unsubstituted_type_var() -> None:
T = TypeVar('T')
class Dog(BaseModel, Generic[T]):
type_: Literal['dog']
friends: list['GenericPet']
id: T
class Cat(BaseModel, Generic[T]):
type_: Literal['cat']
friends: list['GenericPet']
id: T
GenericPet = Annotated[Union[Dog[T], Cat[T]], Field(discriminator='type_')]
ta = TypeAdapter(Dog[int])
int_dog = {
'type_': 'dog',
'friends': [{'type_': 'dog', 'friends': [], 'id': 2}, {'type_': 'cat', 'friends': [], 'id': 3}],
'id': 1,
}
assert ta.validate_python(int_dog).id == 1
assert ta.validate_python(int_dog).friends[0].id == 2
assert ta.validate_python(int_dog).friends[1].id == 3
def test_discriminated_union_model_dump_with_nested_class() -> None:
class SomeEnum(str, Enum):
CAT = 'cat'
DOG = 'dog'
class Dog(BaseModel):
type: Literal[SomeEnum.DOG] = SomeEnum.DOG
name: str
class Cat(BaseModel):
type: Literal[SomeEnum.CAT] = SomeEnum.CAT
name: str
class Yard(BaseModel):
pet: Union[Dog, Cat] = Field(discriminator='type')
yard = Yard(pet=Dog(name='Rex'))
yard_dict = yard.model_dump(mode='json')
assert isinstance(yard_dict['pet']['type'], str)
assert not isinstance(yard_dict['pet']['type'], SomeEnum)
assert str(yard_dict['pet']['type']) == 'dog'
@pytest.mark.thread_unsafe(reason='Passes on multithreaded. This needs to be investigated further.')
@pytest.mark.xfail(reason='Waiting for union serialization fixes via https://github.com/pydantic/pydantic/issues/9688.')
def test_discriminated_union_serializer() -> None:
"""Reported via https://github.com/pydantic/pydantic/issues/9590."""
@dataclass
class FooId:
_id: int
@dataclass
class BarId:
_id: int
FooOrBarId = Annotated[
Annotated[FooId, PlainSerializer(lambda v: {'tag': 'foo', '_id': v._id}), Tag('foo')]
| Annotated[BarId, PlainSerializer(lambda v: {'tag': 'bar', '_id': v._id}), Tag('bar')],
Discriminator(lambda v: v['tag']),
]
adapter = TypeAdapter(FooOrBarId)
assert adapter.dump_python(FooId(1)) == {'tag': 'foo', '_id': 1}
assert adapter.dump_python(BarId(2)) == {'tag': 'bar', '_id': 2}
def test_deferred_discriminated_union_meta_key_removed() -> None:
"""A regression encountered after the schema cleaning refactor.
Issue: https://github.com/pydantic/pydantic/issues/11587.
"""
class Test(BaseModel):
disc: Literal['test']
class Base(BaseModel):
root: Test = Field(discriminator='disc')
base_schema = deepcopy(Base.__pydantic_core_schema__)
class Reference(BaseModel):
base: list[Base]
# With the new schema cleaning logic, the core schema of `Base` isn't deepcopied anymore
# when used in `Reference`. We were aware that theoretically, this could lead to issues
# where referenced core schemas could be mutated. This regression was an example of that.
# It happened because when processing deferred discriminators, we forgot to remove the
# `'pydantic_internal_union_discriminator'` meta key from the schemas. The schema cleaning
# logic of `Reference` would then re-apply the deferred discriminator logic for `Base`.
assert Base.__pydantic_core_schema__ == base_schema
def test_tagged_discriminator_type_alias() -> None:
"""https://github.com/pydantic/pydantic/issues/11930"""
class Pie(BaseModel):
pass
class ApplePie(Pie):
fruit: Literal['apple'] = 'apple'
class PumpkinPie(Pie):
filling: Literal['pumpkin'] = 'pumpkin'
def get_discriminator_value(v):
return v.get('fruit', v.get('filling'))
TaggedApplePie = TypeAliasType('TaggedApplePie', Annotated[ApplePie, Tag('apple')])
class ThanksgivingDinner(BaseModel):
dessert: Annotated[
Union[TaggedApplePie, Annotated[PumpkinPie, Tag('pumpkin')]],
Discriminator(get_discriminator_value),
]
inst = ThanksgivingDinner(dessert={'fruit': 'apple'})
assert isinstance(inst.dessert, ApplePie)
def test_discriminated_union_type_alias_type() -> None:
"""https://github.com/pydantic/pydantic/issues/11661
This was fixed by making sure we provide the available definitions
when first trying to apply discriminated unions during core schema
generation (which we forgot to do). Our schema cleaning logic is still
not working correctly when deferred discriminated unions are involved
together with referenceable core schemas that should be inlined. In practice,
I don't know if such a scenario can happen (see the test below --
`test_deferred_discriminated_union_and_references()` for a theoretical example).
"""
class Foo(BaseModel):
type: Literal['foo'] = 'foo'
Disc = TypeAliasType('Disc', Annotated[Foo, Field(discriminator='type')])
class Main(BaseModel):
f: Disc
# Use the JSON Schema to avoid making assertions on the core schema, that
# may be less stable:
assert Main.model_json_schema()['$defs']['Disc']['discriminator'] == {
'mapping': {'foo': '#/$defs/Foo'},
'propertyName': 'type',
}
@pytest.mark.xfail(reason='deferred discriminated union info is lost on core schemas that are inlined.')
def test_deferred_discriminated_union_and_references() -> None:
class Foo(BaseModel):
type: Literal['foo'] = 'foo'
class Bar(BaseModel):
type: Literal['bar'] = 'bar'
gen_schema = GenerateSchema(ConfigWrapper(None))
foo_ref = gen_schema.defs.create_definition_reference_schema(Foo.__pydantic_core_schema__)
bar_ref = gen_schema.defs.create_definition_reference_schema(Bar.__pydantic_core_schema__)
disc_union = core_schema.union_schema(
choices=[foo_ref, bar_ref],
metadata={'pydantic_internal_union_discriminator': 'type'},
ref='disc_union',
)
disc_union_ref = gen_schema.defs.create_definition_reference_schema(disc_union)
final_schema = gen_schema.clean_schema(disc_union_ref)
assert final_schema['type'] == 'tagged-union'
def test_recursive_discriminated_union() -> None:
"""https://github.com/pydantic/pydantic/issues/11978"""
F = TypeVar('F', bound=BaseModel)
class Not(BaseModel, Generic[F]):
operand: F = Field()
class Label(BaseModel):
prop: Literal['label'] = 'label'
def filter_discriminator(v):
if isinstance(v, dict):
if 'not' in v:
return 'not'
else:
return v.get('prop')
if isinstance(v, Not):
return 'not'
else:
return getattr(v, 'prop', None)
ParagraphFilterExpression = Annotated[
Union[
Annotated[Not['ParagraphFilterExpression'], Tag('not')],
Annotated[Label, Tag('label')],
],
Discriminator(filter_discriminator),
]
FieldFilterExpression = Annotated[
Union[
Annotated[Not['FieldFilterExpression'], Tag('not')],
Annotated[Label, Tag('label')],
],
Discriminator(filter_discriminator),
]
class FilterExpression(BaseModel):
field: FieldFilterExpression
paragraph: ParagraphFilterExpression
| FooStrEnum |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 8685,
"end": 9129
} | class ____(nn.Module):
def __init__(self, affine=True):
super().__init__()
self.fc1 = nn.Linear(2, 40, bias=False)
self.bn = nn.BatchNorm1d(4, affine=affine)
self.fc2 = nn.Linear(40, 4, bias=False)
def forward(self, x):
x = torch.reshape(self.fc1(x), (-1, 4, 10))
x = self.bn(x)
x = torch.reshape(x, (-1, 40))
x = self.fc2(x)
return F.softmax(x, dim=1)
| BatchNormNet |
python | facebook__pyre-check | tools/upgrade/commands/support_sqlalchemy.py | {
"start": 1098,
"end": 6080
} | class ____(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
local_root: Path,
paths: Sequence[Path],
repository: Repository,
) -> None:
super().__init__(command_arguments, repository=repository)
self._local_root = local_root
self._paths: Optional[Sequence[Path]] = paths if len(paths) > 0 else None
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "SupportSqlalchemy":
command_arguments = CommandArguments.from_arguments(arguments)
return SupportSqlalchemy(
command_arguments=command_arguments,
local_root=arguments.local_root,
paths=arguments.paths,
repository=repository,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(SupportSqlalchemy, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"-l",
"--local-root",
help="Path to directory with local configuration",
type=path_exists,
required=True,
)
parser.add_argument(
"paths", help="Paths using sqlalchemy", type=path_exists, nargs="*"
)
@staticmethod
def _get_sqlalchemy_errors(
path_wise_errors: Dict[str, List[PyreError]],
filter_paths: Optional[Sequence[Path]],
) -> PathsToErrors:
all_pathwise_sqlalchemy_errors = {
Path(pathname): [error for error in errors if _is_sqlalchemy_error(error)]
for pathname, errors in path_wise_errors.items()
}
nonempty_pathwise_sqlalchemy_errors = {
path: errors
for path, errors in all_pathwise_sqlalchemy_errors.items()
if len(errors) > 0
}
if filter_paths is None:
return nonempty_pathwise_sqlalchemy_errors
return {
path: errors
for path, errors in nonempty_pathwise_sqlalchemy_errors.items()
if path in filter_paths
}
def _annotate_sqlalchemy_files(
self, configuration: Configuration, sqlalchemy_path_wise_errors: PathsToErrors
) -> None:
paths = [str(path) for path in sqlalchemy_path_wise_errors.keys()]
pyre_output = configuration.run_pyre(
arguments=[
"--strict",
"-l",
str(self._local_root),
"--noninteractive",
"infer-v2",
"--in-place",
"--dequalify",
*paths,
],
description="Running `pyre infer-v2`",
should_clean=self._should_clean,
stderr_flag=None,
command_input=None,
)
if pyre_output is None:
raise UserError("Couldn't annotate sqlalchemy files.")
def _import_annotations_from_future(
self, sqlalchemy_path_wise_errors: PathsToErrors
) -> None:
"""We need this because the original sqlalchemy types aren't generic
and will fail at runtime."""
LOG.info("Importing necessary annotations...")
context = CodemodContext()
AddImportsVisitor.add_needed_import(context, "__future__", "annotations")
paths = list(sqlalchemy_path_wise_errors.keys())
for path in paths:
source = libcst.parse_module(path.read_text())
modified_tree = AddImportsVisitor(context).transform_module(source)
path.write_text(modified_tree.code)
@override
def run(self) -> None:
local_configuration_path = self._local_root / ".pyre_configuration.local"
local_configuration = Configuration(local_configuration_path)
unannotated_attribute_errors = local_configuration.get_errors(
only_fix_error_code=MISSING_ATTRIBUTE_ANNOTATION_ERROR_CODE,
strict=True,
should_clean=self._should_clean,
)
sqlalchemy_path_wise_errors = self._get_sqlalchemy_errors(
unannotated_attribute_errors.paths_to_errors, self._paths
)
if len(sqlalchemy_path_wise_errors) == 0:
LOG.warning("No paths with missing annotations. Exiting...")
return
LOG.info("Found errors: %s", sqlalchemy_path_wise_errors)
LOG.info(
"Annotating the following sqlalchemy files: `%s`",
list(sqlalchemy_path_wise_errors.keys()),
)
self._annotate_sqlalchemy_files(
local_configuration, sqlalchemy_path_wise_errors
)
self._import_annotations_from_future(sqlalchemy_path_wise_errors)
self._get_and_suppress_errors(local_configuration)
title = "Suppress errors for {}".format(self._local_root)
self._repository.commit_changes(commit=(not self._no_commit), title=title)
| SupportSqlalchemy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1212808,
"end": 1213570
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'locked' event on a given issue or pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "lock_reason", "lockable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
lock_reason = sgqlc.types.Field(LockReason, graphql_name="lockReason")
"""Reason that the conversation was locked (optional)."""
lockable = sgqlc.types.Field(sgqlc.types.non_null(Lockable), graphql_name="lockable")
"""Object that was locked."""
| LockedEvent |
python | Pylons__pyramid | tests/test_url.py | {
"start": 44327,
"end": 45597
} | class ____(unittest.TestCase):
def _callFUT(self, path, request, **kw):
from pyramid.url import static_url
return static_url(path, request, **kw)
def _makeRequest(self):
class Request:
def static_url(self, path, **kw):
self.path = path
self.kw = kw
return 'static url'
return Request()
def test_it_abs(self):
request = self._makeRequest()
result = self._callFUT('/foo/bar/abc', request, _app_url='')
self.assertEqual(result, 'static url')
self.assertEqual(request.path, '/foo/bar/abc')
self.assertEqual(request.kw, {'_app_url': ''})
def test_it_absspec(self):
request = self._makeRequest()
result = self._callFUT('foo:abc', request, _anchor='anchor')
self.assertEqual(result, 'static url')
self.assertEqual(request.path, 'foo:abc')
self.assertEqual(request.kw, {'_anchor': 'anchor'})
def test_it_rel(self):
request = self._makeRequest()
result = self._callFUT('abc', request, _app_url='')
self.assertEqual(result, 'static url')
self.assertEqual(request.path, 'tests:abc')
self.assertEqual(request.kw, {'_app_url': ''})
| Test_static_url |
python | getsentry__sentry | tests/sentry/preprod/api/endpoints/test_organization_preprod_artifact_assemble.py | {
"start": 13644,
"end": 40160
} | class ____(APITestCase):
"""Integration tests for the full endpoint - requires database."""
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
self.token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.project = self.create_project()
self.url = reverse(
"sentry-api-0-assemble-preprod-artifact-files",
args=[self.organization.slug, self.project.slug],
)
self.feature_context = Feature("organizations:preprod-frontend-routes")
self.feature_context.__enter__()
def tearDown(self) -> None:
self.feature_context.__exit__(None, None, None)
super().tearDown()
def test_feature_flag_disabled_returns_403(self) -> None:
"""Test that endpoint returns 404 when feature flag is disabled."""
self.feature_context.__exit__(None, None, None)
try:
content = b"test content"
total_checksum = sha1(content).hexdigest()
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 403
finally:
self.feature_context = Feature("organizations:preprod-frontend-routes")
self.feature_context.__enter__()
def test_assemble_json_schema_integration(self) -> None:
"""Integration test for schema validation through the endpoint."""
response = self.client.post(
self.url, data={"lol": "test"}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": []},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200
assert response.data["state"] == ChunkFileState.NOT_FOUND
def test_assemble_json_schema_invalid_structure(self) -> None:
"""Test that invalid JSON structure is rejected."""
response = self.client.post(
self.url, data={"lol": "test"}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_invalid_provider(self) -> None:
"""Test that invalid provider is rejected."""
response = self.client.post(
self.url,
data={"checksum": "a" * 40, "chunks": [], "provider": "invalid"},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
assert "Unsupported VCS provider 'invalid'" in response.data["error"]
assert "Supported providers are:" in response.data["error"]
assert "github" in response.data["error"]
def test_assemble_json_schema_missing_checksum(self) -> None:
"""Test that missing checksum field is rejected."""
response = self.client.post(
self.url, data={"chunks": []}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_invalid_checksum_format(self) -> None:
"""Test that invalid checksum format is rejected."""
response = self.client.post(
self.url,
data={"checksum": "invalid", "chunks": []},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_checksum_wrong_type(self) -> None:
"""Test that non-string checksum is rejected."""
response = self.client.post(
self.url,
data={"checksum": 123, "chunks": []},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_missing_chunks(self) -> None:
"""Test that missing chunks field is rejected."""
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_chunks_wrong_type(self) -> None:
"""Test that non-array chunks field is rejected."""
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": "not_an_array"},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_chunks_invalid_item_type(self) -> None:
"""Test that non-string items in chunks array are rejected."""
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": [123, 456]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_build_configuration_wrong_type(self) -> None:
"""Test that non-string build_configuration is rejected."""
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": [], "build_configuration": 123},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
def test_assemble_json_schema_valid_minimal(self) -> None:
"""Test that valid minimal schema is accepted."""
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": []},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.NOT_FOUND
def test_assemble_json_schema_optional_fields(self) -> None:
checksum = sha1(b"test content").hexdigest()
response = self.client.post(
self.url,
data={
"checksum": checksum,
"chunks": [],
"build_configuration": "release",
"head_sha": "e" * 40,
"base_sha": "f" * 40,
"provider": "github",
"head_repo_name": "owner/repo",
"base_repo_name": "owner/repo",
"head_ref": "feature/xyz",
"base_ref": "main",
"pr_number": 123,
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
@patch(
"sentry.preprod.api.endpoints.organization_preprod_artifact_assemble.assemble_preprod_artifact"
)
@patch(
"sentry.preprod.api.endpoints.organization_preprod_artifact_assemble.create_preprod_artifact"
)
def test_assemble_basic(
self, mock_create_preprod_artifact: MagicMock, mock_assemble_preprod_artifact: MagicMock
) -> None:
content = b"test preprod artifact content"
total_checksum = sha1(content).hexdigest()
artifact = create_preprod_artifact(
org_id=self.organization.id,
project_id=self.project.id,
checksum=total_checksum,
)
assert artifact is not None
artifact_id = artifact.id
mock_create_preprod_artifact.return_value = artifact
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
assert set(response.data["missingChunks"]) == set()
expected_url = (
f"/organizations/{self.organization.slug}/preprod/{self.project.slug}/{artifact_id}"
)
assert expected_url in response.data["artifactUrl"]
mock_create_preprod_artifact.assert_called_once_with(
org_id=self.organization.id,
project_id=self.project.id,
checksum=total_checksum,
build_configuration_name=None,
release_notes=None,
head_sha=None,
base_sha=None,
provider=None,
head_repo_name=None,
base_repo_name=None,
head_ref=None,
base_ref=None,
pr_number=None,
)
mock_assemble_preprod_artifact.apply_async.assert_called_once_with(
kwargs={
"org_id": self.organization.id,
"project_id": self.project.id,
"checksum": total_checksum,
"chunks": [blob.checksum],
"artifact_id": artifact_id,
"build_configuration": None,
}
)
@patch(
"sentry.preprod.api.endpoints.organization_preprod_artifact_assemble.assemble_preprod_artifact"
)
@patch(
"sentry.preprod.api.endpoints.organization_preprod_artifact_assemble.create_preprod_artifact"
)
def test_assemble_with_metadata(
self, mock_create_preprod_artifact: MagicMock, mock_assemble_preprod_artifact: MagicMock
) -> None:
content = b"test preprod artifact with metadata"
total_checksum = sha1(content).hexdigest()
artifact = create_preprod_artifact(
org_id=self.organization.id,
project_id=self.project.id,
checksum=total_checksum,
)
assert artifact is not None
artifact_id = artifact.id
mock_create_preprod_artifact.return_value = artifact
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
"build_configuration": "release",
"head_sha": "e" * 40,
"base_sha": "f" * 40,
"provider": "github",
"head_repo_name": "owner/repo",
"base_repo_name": "owner/repo",
"head_ref": "feature/xyz",
"base_ref": "main",
"pr_number": 123,
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
assert set(response.data["missingChunks"]) == set()
expected_url = (
f"/organizations/{self.organization.slug}/preprod/{self.project.slug}/{artifact_id}"
)
assert expected_url in response.data["artifactUrl"]
mock_create_preprod_artifact.assert_called_once_with(
org_id=self.organization.id,
project_id=self.project.id,
checksum=total_checksum,
build_configuration_name="release",
release_notes=None,
head_sha="e" * 40,
base_sha="f" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/xyz",
base_ref="main",
pr_number=123,
)
mock_assemble_preprod_artifact.apply_async.assert_called_once_with(
kwargs={
"org_id": self.organization.id,
"project_id": self.project.id,
"checksum": total_checksum,
"chunks": [blob.checksum],
"artifact_id": artifact_id,
"build_configuration": "release",
}
)
def test_assemble_with_missing_chunks(self) -> None:
content = b"test content for missing chunks"
total_checksum = sha1(content).hexdigest()
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [total_checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.NOT_FOUND
assert set(response.data["missingChunks"]) == {total_checksum}
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [total_checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
def test_assemble_response(self) -> None:
content = b"test response content"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
def test_assemble_with_pending_deletion_project(self) -> None:
self.project.status = ObjectStatus.PENDING_DELETION
self.project.save()
content = b"test content"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 404
def test_assemble_org_auth_token(self) -> None:
org2 = self.create_organization(owner=self.user)
content = b"test org auth token content"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
with assume_test_silo_mode(SiloMode.CONTROL):
bad_token_str = generate_token(self.organization.slug, "")
OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed=hash_token(bad_token_str),
token_last_characters="ABCD",
scope_list=[],
date_last_used=None,
)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {bad_token_str}",
)
assert response.status_code == 403
with assume_test_silo_mode(SiloMode.CONTROL):
bad_org_token_str = generate_token(self.organization.slug, "")
OrgAuthToken.objects.create(
organization_id=org2.id,
name="token 1",
token_hashed=hash_token(bad_org_token_str),
token_last_characters="ABCD",
scope_list=[],
date_last_used=None,
)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {bad_org_token_str}",
)
assert response.status_code == 403
with assume_test_silo_mode(SiloMode.CONTROL):
good_token_str = generate_token(self.organization.slug, "")
OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed=hash_token(good_token_str),
token_last_characters="ABCD",
scope_list=["project:releases"],
date_last_used=None,
)
with outbox_runner():
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {good_token_str}",
)
assert response.status_code == 200
with assume_test_silo_mode(SiloMode.CONTROL):
org_token = OrgAuthToken.objects.get(token_hashed=hash_token(good_token_str))
assert org_token.date_last_used is not None
assert org_token.project_last_used_id == self.project.id
def test_poll_request(self) -> None:
checksum = sha1(b"test poll").hexdigest()
response = self.client.post(
self.url,
data={
"checksum": checksum,
"chunks": [],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200
assert response.data["state"] == ChunkFileState.NOT_FOUND
assert response.data["missingChunks"] == []
def test_check_existing_assembly_status(self) -> None:
"""Test that endpoint doesn't check existing assembly status - it processes new requests."""
checksum = sha1(b"test existing status").hexdigest()
# Even if assembly status exists, endpoint doesn't check it
set_assemble_status(
AssembleTask.PREPROD_ARTIFACT, self.project.id, checksum, ChunkFileState.CREATED
)
response = self.client.post(
self.url,
data={
"checksum": checksum,
"chunks": [], # No chunks means NOT_FOUND
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200
# Endpoint returns NOT_FOUND when no chunks are provided, regardless of existing status
assert response.data["state"] == ChunkFileState.NOT_FOUND
assert response.data["missingChunks"] == []
def test_integration_task_sets_status_api_can_read_it(self) -> None:
"""
Test showing that this endpoint doesn't poll for status - it only processes new assembly requests.
This endpoint doesn't check existing assembly status. Instead, it:
1. Checks for missing chunks
2. Creates artifacts and queues assembly tasks
3. Returns NOT_FOUND when no chunks are provided
"""
content = b"test integration content"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
# Even if task sets status, this endpoint doesn't read it
set_assemble_status(
AssembleTask.PREPROD_ARTIFACT, self.project.id, total_checksum, ChunkFileState.CREATED
)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [], # No chunks means NOT_FOUND
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200
# Endpoint doesn't check existing status, returns NOT_FOUND for empty chunks
assert response.data["state"] == ChunkFileState.NOT_FOUND
assert response.data["missingChunks"] == []
def test_permission_required(self) -> None:
content = b"test permission content"
total_checksum = sha1(content).hexdigest()
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [],
},
)
assert response.status_code == 401
@patch(
"sentry.preprod.api.endpoints.organization_preprod_artifact_assemble.create_preprod_artifact"
)
def test_assemble_create_artifact_failure(
self, mock_create_preprod_artifact: MagicMock
) -> None:
"""Test that endpoint returns error when create_preprod_artifact fails."""
content = b"test preprod artifact content"
total_checksum = sha1(content).hexdigest()
mock_create_preprod_artifact.return_value = None
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 500, response.content
assert response.data["state"] == ChunkFileState.ERROR
assert response.data["detail"] == "Failed to create preprod artifact row."
mock_create_preprod_artifact.assert_called_once_with(
org_id=self.organization.id,
project_id=self.project.id,
checksum=total_checksum,
build_configuration_name=None,
release_notes=None,
head_sha=None,
base_sha=None,
provider=None,
head_repo_name=None,
base_repo_name=None,
head_ref=None,
base_ref=None,
pr_number=None,
)
def test_assemble_missing_vcs_parameters(self) -> None:
"""Test that providing partial VCS parameters returns a 400 error with specific missing params."""
content = b"test missing vcs params"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
# Test missing head_ref
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
"head_sha": "e" * 40,
"provider": "github",
"head_repo_name": "owner/repo",
# Missing head_ref
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
assert "error" in response.data
assert "Missing parameters: head_ref" in response.data["error"]
# Test missing multiple parameters
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
"head_sha": "e" * 40,
# Missing provider, head_repo_name, head_ref
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
assert "error" in response.data
assert "Missing parameters:" in response.data["error"]
assert "head_repo_name" in response.data["error"]
assert "provider" in response.data["error"]
assert "head_ref" in response.data["error"]
def test_assemble_same_head_and_base_sha(self) -> None:
"""Test that providing the same value for head_sha and base_sha returns a 400 error."""
content = b"test same sha"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
same_sha = "e" * 40
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
"head_sha": same_sha,
"base_sha": same_sha,
"provider": "github",
"head_repo_name": "owner/repo",
"head_ref": "feature/xyz",
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
assert "error" in response.data
assert "Head SHA and base SHA cannot be the same" in response.data["error"]
assert same_sha in response.data["error"]
def test_assemble_base_sha_without_head_sha(self) -> None:
"""Test that providing base_sha without head_sha returns a 400 error."""
content = b"test base sha without head sha"
total_checksum = sha1(content).hexdigest()
blob = FileBlob.from_file(ContentFile(content))
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob)
response = self.client.post(
self.url,
data={
"checksum": total_checksum,
"chunks": [blob.checksum],
"base_sha": "f" * 40,
# Missing head_sha
},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
assert "error" in response.data
assert "Head SHA is required when base SHA is provided" in response.data["error"]
| ProjectPreprodArtifactAssembleTest |
python | pytorch__pytorch | torch/jit/_check.py | {
"start": 112,
"end": 9772
} | class ____(ast.NodeVisitor):
"""Check the ``__init__`` method of a given ``nn.Module``.
It ensures that all instance-level attributes can be properly initialized.
Specifically, we do type inference based on attribute values...even
if the attribute in question has already been typed using
Python3-style annotations or ``torch.jit.annotate``. This means that
setting an instance-level attribute to ``[]`` (for ``List``),
``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
information for us to properly initialize that attribute.
An object of this class can walk a given ``nn.Module``'s AST and
determine if it meets our requirements or not.
Known limitations
1. We can only check the AST nodes for certain constructs; we can't
``eval`` arbitrary expressions. This means that function calls,
class instantiations, and complex expressions that resolve to one of
the "empty" values specified above will NOT be flagged as
problematic.
2. We match on string literals, so if the user decides to use a
non-standard import (e.g. `from typing import List as foo`), we
won't catch it.
Example:
.. code-block:: python
class M(torch.nn.Module):
def fn(self):
return []
def __init__(self) -> None:
super().__init__()
self.x: List[int] = []
def forward(self, x: List[int]):
self.x = x
return 1
The above code will pass the ``AttributeTypeIsSupportedChecker``
check since we have a function call in ``__init__``. However,
it will still fail later with the ``RuntimeError`` "Tried to set
nonexistent attribute: x. Did you forget to initialize it in
__init__()?".
Args:
nn_module - The instance of ``torch.nn.Module`` whose
``__init__`` method we wish to check
"""
def check(self, nn_module: torch.nn.Module) -> None:
source_lines = inspect.getsource(nn_module.__class__.__init__)
# Ignore comments no matter the indentation
def is_useless_comment(line):
line = line.strip()
return line.startswith("#") and not line.startswith("# type:")
source_lines = "\n".join(
[l for l in source_lines.split("\n") if not is_useless_comment(l)]
)
# This AST only contains the `__init__` method of the nn.Module
init_ast = ast.parse(textwrap.dedent(source_lines))
# Get items annotated in the class body
if sys.version_info >= (3, 14):
import annotationlib
self.class_level_annotations = list(
annotationlib.get_annotations(
nn_module, format=annotationlib.Format.FORWARDREF
).keys()
)
else:
self.class_level_annotations = list(nn_module.__annotations__.keys())
# Flag for later
self.visiting_class_level_ann = False
self.visit(init_ast)
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
if ann_type == "List":
# Assigning `[]` to a `List` type gives you a Node where
# value=List(elts=[], ctx=Load())
if not isinstance(node, ast.List):
return False
if node.elts:
return False
elif ann_type == "Dict":
# Assigning `{}` to a `Dict` type gives you a Node where
# value=Dict(keys=[], values=[])
if not isinstance(node, ast.Dict):
return False
if node.keys:
return False
elif ann_type == "Optional":
# Assigning `None` to an `Optional` type gives you a
# Node where value=Constant(value=None, kind=None)
if not isinstance(node, ast.Constant):
return False
if node.value: # type: ignore[attr-defined]
return False
return True
def visit_Assign(self, node) -> None:
"""Store assignment state when assigning to a Call Node.
If we're visiting a Call Node (the right-hand side of an
assignment statement), we won't be able to check the variable
that we're assigning to (the left-hand side of an assignment).
Because of this, we need to store this state in visitAssign.
(Luckily, we only have to do this if we're assigning to a Call
Node, i.e. ``torch.jit.annotate``. If we're using normal Python
annotations, we'll be visiting an AnnAssign Node, which has its
target built in.)
"""
try:
if (
isinstance(node.value, ast.Call)
and node.targets[0].attr in self.class_level_annotations
):
self.visiting_class_level_ann = True
except AttributeError:
return
self.generic_visit(node)
self.visiting_class_level_ann = False
def visit_AnnAssign(self, node) -> None:
"""Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method.
It checks if it conforms to our attribute annotation rules."""
# If we have a local variable
try:
if node.target.value.id != "self":
return
except AttributeError:
return
# If we have an attribute that's already been annotated at the
# class level
if node.target.attr in self.class_level_annotations:
return
# TODO @ansley: add `Union` once landed
# NB: Even though `Tuple` is a "container", we don't want to
# check for it here. `Tuple` functions as an type with an
# "infinite" number of subtypes, in the sense that you can have
# `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
# `Tuple[T2, T1]` and so on, and none of these subtypes can be
# used in place of the other. Therefore, assigning an empty
# tuple in `__init__` CORRECTLY means that that variable
# cannot be reassigned later to a non-empty tuple. Same
# deal with `NamedTuple`
containers = {"List", "list", "Dict", "dict", "Optional"}
# If we're not evaluating one of the specified problem types
try:
if node.annotation.value.id not in containers:
return
except AttributeError:
# To evaluate a base type (`str`, `int`, etc.), we would
# have needed to get the name through `node.annotation.id`
# instead of `node.annotation.value.id`. Seems that we're
# not evaluating one of our "containers"
return
# Check if the assigned variable is empty
ann_type = node.annotation.value.id
if not self._is_empty_container(node.value, ann_type):
return
warnings.warn(
"The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`.",
stacklevel=2,
)
def visit_Call(self, node) -> None:
"""Determine if a Call node is 'torch.jit.annotate' in __init__.
Visit a Call node in an ``nn.Module``'s ``__init__``
method and determine if it's ``torch.jit.annotate``. If so,
see if it conforms to our attribute annotation rules.
"""
# If we have an attribute that's already been annotated at the
# class level
if self.visiting_class_level_ann:
return
# If this isn't a call to `torch.jit.annotate`
try:
if (
node.func.value.value.id != "torch"
or node.func.value.attr != "jit"
or node.func.attr != "annotate"
):
self.generic_visit(node)
elif (
node.func.value.value.id != "jit" or node.func.value.attr != "annotate"
):
self.generic_visit(node)
except AttributeError:
# Looks like we didn't even have the right node structure
# to check for `torch.jit.annotate` in the first place
self.generic_visit(node)
# Invariant: we have a `torch.jit.annotate` or a
# `torch.annotate` call
# A Call Node for `torch.jit.annotate` should have an `args`
# list of length 2 where args[0] represents the annotation and
# args[1] represents the actual value
if len(node.args) != 2:
return
if not isinstance(node.args[0], ast.Subscript):
return
# See notes in `visit_AnnAssign` r.e. containers
containers = {"List", "Dict", "Optional"}
try:
ann_type = node.args[0].value.id # type: ignore[attr-defined]
except AttributeError:
return
if ann_type not in containers:
return
# Check if the assigned variable is empty
if not self._is_empty_container(node.args[1], ann_type):
return
warnings.warn(
"The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`.",
stacklevel=2,
)
| AttributeTypeIsSupportedChecker |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_http.py | {
"start": 1841,
"end": 2693
} | class ____(GoEthereumAdminModuleTest):
@pytest.mark.xfail(
reason="running geth with the --nodiscover flag doesn't allow peer addition"
)
def test_admin_peers(self, w3: "Web3") -> None:
super().test_admin_peers(w3)
def test_admin_start_stop_http(self, w3: "Web3") -> None:
# This test causes all tests after it to fail on CI if it's allowed to run
pytest.xfail(
reason="Only one HTTP endpoint is allowed to be active at any time"
)
super().test_admin_start_stop_http(w3)
def test_admin_start_stop_ws(self, w3: "Web3") -> None:
# This test causes all tests after it to fail on CI if it's allowed to run
pytest.xfail(reason="Only one WS endpoint is allowed to be active at any time")
super().test_admin_start_stop_ws(w3)
| TestGoEthereumAdminModuleTest |
python | django__django | tests/template_tests/test_autoreloader.py | {
"start": 5324,
"end": 6109
} | class ____(SimpleTestCase):
def test_watch_for_template_changes(self):
mock_reloader = mock.MagicMock()
autoreload.watch_for_template_changes(mock_reloader)
self.assertSequenceEqual(
sorted(mock_reloader.watch_dir.call_args_list),
[
mock.call(ROOT / "templates", "**/*"),
],
)
def test_get_template_directories(self):
self.assertSetEqual(
autoreload.get_template_directories(),
{
ROOT / "templates",
},
)
@mock.patch("django.template.loaders.base.Loader.reset")
def test_reset_all_loaders(self, mock_reset):
autoreload.reset_loaders()
self.assertEqual(mock_reset.call_count, 0)
| Jinja2TemplateReloadTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 414020,
"end": 415183
} | class ____(sgqlc.types.Interface):
"""Common fields across different project field types"""
__schema__ = github_schema
__field_names__ = ("created_at", "data_type", "database_id", "id", "name", "project", "updated_at")
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
data_type = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2FieldType), graphql_name="dataType")
"""The field's type."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The project field's name."""
project = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2"), graphql_name="project")
"""The project that contains this field."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| ProjectV2FieldCommon |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 72195,
"end": 72810
} | class ____(BiffRecord):
"""
This record specifies the default column width for columns that
do not have a specific width set using the record COLINFO or COLWIDTH.
This record has no effect, if a STANDARDWIDTH record is present in the file.
Record DEFCOLWIDTH, BIFF2-BIFF8:
Offset Size Contents
0 2 Column width in characters, using the width of the zero
character from default font (first FONT record in the file)
"""
_REC_ID = 0x0055
def __init__(self, def_width):
self._rec_data = pack('<H', options, def_width)
| DefColWidthRecord |
python | davidhalter__jedi | test/completion/usages.py | {
"start": 5273,
"end": 7061
} | class ____():
def foo(self):
return
def check(instance):
#< 13 (-5,8), (0,13)
instance.foo()
check(DynamicParam())
# -----------------
# Compiled Objects
# -----------------
import _sre
# TODO reenable this, it's currently not working, because of 2/3
# inconsistencies in typeshed (_sre exists in typeshed/2, but not in
# typeshed/3).
##< 0 (-3,7), (0,0), ('_sre', None, None)
_sre
# -----------------
# on syntax
# -----------------
#< 0
import undefined
# -----------------
# comprehensions
# -----------------
#< 0 (0,0), (2,12)
x = 32
#< 12 (-2,0), (0,12)
[x for x in x]
#< 0 (0,0), (2,1), (2,12)
y = 32
#< 12 (-2,0), (0,1), (0,12)
[y for b in y]
#< 1 (0,1), (0,7)
[x for x in something]
#< 7 (0,1), (0,7)
[x for x in something]
z = 3
#< 1 (0,1), (0,10)
{z:1 for z in something}
#< 10 (0,1), (0,10)
{z:1 for z in something}
#< 8 (0,6), (0, 40)
[[x + nested_loopv2 for x in bar()] for nested_loopv2 in baz()]
#< 25 (0,20), (0, 65)
(("*" if abs(foo(x, nested_loopv1)) else " " for x in bar()) for nested_loopv1 in baz())
def whatever_func():
zzz = 3
if UNDEFINED:
zzz = 5
if UNDEFINED2:
#< (3, 8), (4, 4), (0, 12), (-3, 8), (-5, 4)
zzz
else:
#< (0, 8), (1, 4), (-3, 12), (-6, 8), (-8, 4)
zzz
zzz
# -----------------
# global
# -----------------
def global_usage1():
#< (0, 4), (4, 11), (6, 4), (9, 8), (12, 4)
my_global
def global_definition():
#< (-4, 4), (0, 11), (2, 4), (5, 8), (8, 4)
global my_global
#< 4 (-6, 4), (-2, 11), (0, 4), (3, 8), (6, 4)
my_global = 3
if WHATEVER:
#< 8 (-9, 4), (-5, 11), (-3, 4), (0, 8), (3, 4)
my_global = 4
def global_usage2()
my_global
def not_global(my_global):
my_global
| DynamicParam |
python | openai__openai-python | src/openai/types/image_generate_params.py | {
"start": 4517,
"end": 4885
} | class ____(ImageGenerateParamsBase, total=False):
stream: Optional[Literal[False]]
"""Generate the image in streaming mode.
Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information. This parameter is only supported for `gpt-image-1`.
"""
| ImageGenerateParamsNonStreaming |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/caching_hostname_resolver_ipv6.py | {
"start": 63,
"end": 558
} | class ____(scrapy.Spider):
"""
Finishes without a twisted.internet.error.DNSLookupError exception
"""
name = "caching_hostname_resolver_spider"
start_urls = ["http://[::1]"]
if __name__ == "__main__":
process = AsyncCrawlerProcess(
settings={
"RETRY_ENABLED": False,
"DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver",
}
)
process.crawl(CachingHostnameResolverSpider)
process.start()
| CachingHostnameResolverSpider |
python | keras-team__keras | keras/src/random/random_test.py | {
"start": 16842,
"end": 19318
} | class ____(testing.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
INT_DTYPES = [x for x in dtypes.INT_TYPES if x not in ("uint64", "int64")]
FLOAT_DTYPES = [x for x in dtypes.FLOAT_TYPES if x not in ("float64",)]
if backend.backend() == "torch":
INT_DTYPES = [x for x in INT_DTYPES if x not in ("uint16", "uint32")]
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_normal(self, dtype):
res = random.normal((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=INT_DTYPES))
def test_categorical(self, dtype):
logits = np.eye(4) * 1e5 + 1e6
res = random.categorical(logits, 10, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_uniform(self, dtype):
res = random.uniform((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=INT_DTYPES))
def test_randint(self, dtype):
res = random.randint((2, 3), 0, 10, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_truncated_normal(self, dtype):
res = random.truncated_normal((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_dropout(self, dtype):
x = ops.ones((3, 5), dtype=dtype)
res = random.dropout(x, rate=0.8, seed=0)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_gamma(self, dtype):
res = random.gamma((2, 3), 2.0, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_binomial(self, dtype):
res = random.binomial((2,), 1e5, 0.5, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_beta(self, dtype):
res = random.beta((2, 3), 2.0, 3.0, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
| RandomDTypeTest |
python | django__django | django/contrib/auth/validators.py | {
"start": 173,
"end": 481
} | class ____(validators.RegexValidator):
regex = r"^[\w.@+-]+\Z"
message = _(
"Enter a valid username. This value may contain only unaccented lowercase a-z "
"and uppercase A-Z letters, numbers, and @/./+/-/_ characters."
)
flags = re.ASCII
@deconstructible
| ASCIIUsernameValidator |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/components/dbt_project/component.py | {
"start": 19520,
"end": 20599
} | class ____(
create_component_translator_cls(DbtProjectComponent, DagsterDbtTranslator),
ComponentTranslator[DbtProjectComponent],
):
def __init__(
self,
component: DbtProjectComponent,
settings: Optional[DagsterDbtComponentTranslatorSettings],
):
self._component = component
super().__init__(settings)
def get_asset_spec(
self, manifest: Mapping[str, Any], unique_id: str, project: Optional[DbtProject]
) -> dg.AssetSpec:
base_spec = super().get_asset_spec(manifest, unique_id, project)
if self.component.translation is None:
return base_spec
else:
dbt_props = get_node(manifest, unique_id)
return self.component.translation(base_spec, dbt_props)
def get_projects_from_dbt_component(components: Path) -> list[DbtProject]:
project_components = ComponentTree.for_project(components).get_all_components(
of_type=DbtProjectComponent
)
return [component.dbt_project for component in project_components]
| DbtProjectComponentTranslator |
python | explosion__spaCy | spacy/lang/az/__init__.py | {
"start": 221,
"end": 329
} | class ____(Language):
lang = "az"
Defaults = AzerbaijaniDefaults
__all__ = ["Azerbaijani"]
| Azerbaijani |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/dataclass_transforms_one.py | {
"start": 793,
"end": 1315
} | class ____(MappedAsDataclass, Base):
__tablename__ = "ticket_9628"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column()
d2: Mapped[str] = column_property(data + "Asdf")
d3: Mapped[str] = query_expression(data + "Asdf")
# d2 and d3 are not required, as these have init=False. We omit
# them from dataclass transforms entirely as these are never intended
# to be writeable fields in a 2.0 declarative mapping
t9628 = TestTicket9628(data="asf")
| TestTicket9628 |
python | scrapy__scrapy | tests/test_cmdline_crawl_with_pipeline/test_spider/pipelines.py | {
"start": 0,
"end": 131
} | class ____:
def open_spider(self, spider):
pass
def process_item(self, item):
return item
| TestSpiderPipeline |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_bestsellers.py | {
"start": 135,
"end": 917
} | class ____(OxylabsBaseReader):
"""
Get data from Amazon Best Sellers pages.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/best-sellers
"""
top_level_header: str = "Bestsellers"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonBestsellersReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_bestsellers(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_bestsellers(**payload)
| OxylabsAmazonBestsellersReader |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 55606,
"end": 69611
} | class ____:
"""The context for a Dagster Pipes process.
This class is analogous to :py:class:`~dagster.OpExecutionContext` on the Dagster side of the Pipes
connection. It provides access to information such as the asset key(s) and partition key(s) in
scope for the current step. It also provides methods for logging and emitting results that will
be streamed back to Dagster.
This class should not be directly instantiated by the user. Instead it should be initialized by
calling :py:func:`open_dagster_pipes()`, which will return the singleton instance of this class.
After `open_dagster_pipes()` has been called, the singleton instance can also be retrieved by
calling :py:func:`PipesContext.get`.
"""
_instance: ClassVar[Optional["PipesContext"]] = None
@classmethod
def is_initialized(cls) -> bool:
"""bool: Whether the context has been initialized."""
return cls._instance is not None
@classmethod
def set(cls, context: "PipesContext") -> None:
"""Set the singleton instance of the context."""
cls._instance = context
@classmethod
def get(cls) -> "PipesContext":
"""Get the singleton instance of the context. Raises an error if the context has not been initialized."""
if cls._instance is None:
raise Exception(
"PipesContext has not been initialized. You must call `open_dagster_pipes()`."
)
return cls._instance
def __init__(
self,
params_loader: PipesParamsLoader,
context_loader: PipesContextLoader,
message_writer: PipesMessageWriter,
) -> None:
context_params = params_loader.load_context_params()
messages_params = params_loader.load_messages_params()
self._io_stack = ExitStack()
self._data = self._io_stack.enter_context(context_loader.load_context(context_params))
self._message_channel = self._io_stack.enter_context(message_writer.open(messages_params))
opened_payload = message_writer.get_opened_payload()
self._message_channel.write_message(_make_message("opened", opened_payload))
self._logger = _PipesLogger(self)
self._materialized_assets: set[str] = set()
self._closed: bool = False
def __enter__(self) -> "PipesContext":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
# expected to all be present or absent together
# https://docs.python.org/3/reference/datamodel.html#object.__exit__
if exc_type and exc_value and traceback:
exc = _pipes_exc_from_tb(TracebackException(exc_type, exc_value, traceback))
else:
exc = None
self.close(exc)
def close(
self,
exc: Optional[PipesException] = None,
) -> None:
"""Close the pipes connection. This will flush all buffered messages to the orchestration
process and cause any further attempt to write a message to raise an error. This method is
idempotent-- subsequent calls after the first have no effect.
"""
if not self._closed:
payload = {"exception": exc} if exc else {}
self._message_channel.write_message(_make_message("closed", payload))
self._io_stack.close()
self._closed = True
@property
def is_closed(self) -> bool:
"""bool: Whether the context has been closed."""
return self._closed
def _write_message(self, method: Method, params: Optional[Mapping[str, Any]] = None) -> None:
if self._closed:
raise DagsterPipesError("Cannot send message after pipes context is closed.")
message = _make_message(method, params)
self._message_channel.write_message(message)
# ########################
# ##### PUBLIC API
# ########################
@property
def is_asset_step(self) -> bool:
"""bool: Whether the current step targets assets."""
return self._data["asset_keys"] is not None
@property
def asset_key(self) -> str:
"""str: The AssetKey for the currently scoped asset. Raises an error if 0 or multiple assets
are in scope.
"""
asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_key")
_assert_single_asset(self._data, "asset_key")
return asset_keys[0]
@property
def asset_keys(self) -> Sequence[str]:
"""Sequence[str]: The AssetKeys for the currently scoped assets. Raises an error if no
assets are in scope.
"""
asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_keys")
return asset_keys
@property
def provenance(self) -> Optional[PipesDataProvenance]:
"""Optional[PipesDataProvenance]: The provenance for the currently scoped asset. Raises an
error if 0 or multiple assets are in scope.
"""
provenance_by_asset_key = _assert_defined_asset_property(
self._data["provenance_by_asset_key"], "provenance"
)
_assert_single_asset(self._data, "provenance")
return next(iter(provenance_by_asset_key.values()))
@property
def provenance_by_asset_key(self) -> Mapping[str, Optional[PipesDataProvenance]]:
"""Mapping[str, Optional[PipesDataProvenance]]: Mapping of asset key to provenance for the
currently scoped assets. Raises an error if no assets are in scope.
"""
provenance_by_asset_key = _assert_defined_asset_property(
self._data["provenance_by_asset_key"], "provenance_by_asset_key"
)
return provenance_by_asset_key
@property
def code_version(self) -> Optional[str]:
"""Optional[str]: The code version for the currently scoped asset. Raises an error if 0 or
multiple assets are in scope.
"""
code_version_by_asset_key = _assert_defined_asset_property(
self._data["code_version_by_asset_key"], "code_version"
)
_assert_single_asset(self._data, "code_version")
return next(iter(code_version_by_asset_key.values()))
@property
def code_version_by_asset_key(self) -> Mapping[str, Optional[str]]:
"""Mapping[str, Optional[str]]: Mapping of asset key to code version for the currently
scoped assets. Raises an error if no assets are in scope.
"""
code_version_by_asset_key = _assert_defined_asset_property(
self._data["code_version_by_asset_key"], "code_version_by_asset_key"
)
return code_version_by_asset_key
@property
def is_partition_step(self) -> bool:
"""bool: Whether the current step is scoped to one or more partitions."""
return self._data["partition_key_range"] is not None
@property
def partition_key(self) -> str:
"""str: The partition key for the currently scoped partition. Raises an error if 0 or
multiple partitions are in scope.
"""
partition_key = _assert_defined_partition_property(
self._data["partition_key"], "partition_key"
)
return partition_key
@property
def partition_key_range(self) -> "PipesPartitionKeyRange":
"""PipesPartitionKeyRange: The partition key range for the currently scoped partition or
partitions. Raises an error if no partitions are in scope.
"""
partition_key_range = _assert_defined_partition_property(
self._data["partition_key_range"], "partition_key_range"
)
return partition_key_range
@property
def partition_time_window(self) -> Optional["PipesTimeWindow"]:
"""Optional[PipesTimeWindow]: The partition time window for the currently scoped partition
or partitions. Returns None if partitions in scope are not temporal. Raises an error if no
partitions are in scope.
"""
# None is a valid value for partition_time_window, but we check that a partition key range
# is defined.
_assert_defined_partition_property(
self._data["partition_key_range"], "partition_time_window"
)
return self._data["partition_time_window"]
@property
def run_id(self) -> str:
"""str: The run ID for the currently executing pipeline run."""
return self._data["run_id"]
@property
def job_name(self) -> Optional[str]:
"""Optional[str]: The job name for the currently executing run. Returns None if the run is
not derived from a job.
"""
return self._data["job_name"]
@property
def retry_number(self) -> int:
"""int: The retry number for the currently executing run."""
return self._data["retry_number"]
def get_extra(self, key: str) -> Any:
"""Get the value of an extra provided by the user. Raises an error if the extra is not defined.
Args:
key (str): The key of the extra.
Returns:
Any: The value of the extra.
"""
return _assert_defined_extra(self._data["extras"], key)
@property
def extras(self) -> Mapping[str, Any]:
"""Mapping[str, Any]: Key-value map for all extras provided by the user."""
return self._data["extras"]
# ##### WRITE
def report_asset_materialization(
self,
metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,
data_version: Optional[str] = None,
asset_key: Optional[str] = None,
) -> None:
"""Report to Dagster that an asset has been materialized. Streams a payload containing
materialization information back to Dagster. If no assets are in scope, raises an error.
Args:
metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):
Metadata for the materialized asset. Defaults to None.
data_version (Optional[str]): The data version for the materialized asset.
Defaults to None.
asset_key (Optional[str]): The asset key for the materialized asset. If only a
single asset is in scope, default to that asset's key. If multiple assets are in scope,
this must be set explicitly or an error will be raised.
"""
asset_key = _resolve_optionally_passed_asset_key(
self._data, asset_key, "report_asset_materialization"
)
if asset_key in self._materialized_assets:
raise DagsterPipesError(
f"Calling `report_asset_materialization` with asset key `{asset_key}` is undefined."
" Asset has already been materialized, so no additional data can be reported"
" for it."
)
metadata = (
_normalize_param_metadata(metadata, "report_asset_materialization", "metadata")
if metadata
else None
)
data_version = _assert_opt_param_type(
data_version, str, "report_asset_materialization", "data_version"
)
self._write_message(
"report_asset_materialization",
{"asset_key": asset_key, "data_version": data_version, "metadata": metadata},
)
self._materialized_assets.add(asset_key)
def report_asset_check(
self,
check_name: str,
passed: bool,
severity: PipesAssetCheckSeverity = "ERROR",
metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,
asset_key: Optional[str] = None,
) -> None:
"""Report to Dagster that an asset check has been performed. Streams a payload containing
check result information back to Dagster. If no assets or associated checks are in scope, raises an error.
Args:
check_name (str): The name of the check.
passed (bool): Whether the check passed.
severity (PipesAssetCheckSeverity): The severity of the check. Defaults to "ERROR".
metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):
Metadata for the check. Defaults to None.
asset_key (Optional[str]): The asset key for the check. If only a single asset is in
scope, default to that asset's key. If multiple assets are in scope, this must be
set explicitly or an error will be raised.
"""
asset_key = _resolve_optionally_passed_asset_key(
self._data, asset_key, "report_asset_check"
)
check_name = _assert_param_type(check_name, str, "report_asset_check", "check_name")
passed = _assert_param_type(passed, bool, "report_asset_check", "passed")
metadata = (
_normalize_param_metadata(metadata, "report_asset_check", "metadata")
if metadata
else None
)
self._write_message(
"report_asset_check",
{
"asset_key": asset_key,
"check_name": check_name,
"passed": passed,
"metadata": metadata,
"severity": severity,
},
)
def report_custom_message(self, payload: Any):
"""Send a JSON serializable payload back to the orchestration process. Can be retrieved there
using `get_custom_messages`.
Args:
payload (Any): JSON serializable data.
"""
self._write_message("report_custom_message", {"payload": payload})
def log_external_stream(self, stream: str, text: str, extras: Optional[PipesExtras] = None):
self._write_message(
"log_external_stream", {"stream": stream, "text": text, "extras": extras or {}}
)
@property
def log(self) -> logging.Logger:
"""logging.Logger: A logger that streams log messages back to Dagster."""
return self._logger
| PipesContext |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/py_test_callback/package.py | {
"start": 328,
"end": 998
} | class ____(Python):
"""A package for testing stand-alone test methods as a callback."""
homepage = "http://www.example.com"
url = "http://www.example.com/test-callback-1.0.tar.gz"
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "PyTestCallback"
build_system("testcallback")
version("1.0", "00000000000000000000000000000110")
version("2.0", "00000000000000000000000000000120")
def install(self, spec, prefix):
mkdirp(prefix.bin)
def test_callback(self):
print("PyTestCallback test")
@spack.builder.register_builder("testcallback")
| PyTestCallback |
python | ray-project__ray | python/ray/tests/test_batch_node_provider_unit.py | {
"start": 3619,
"end": 19107
} | class ____:
"""Utility to test BatchingNodeProvider."""
def __init__(self):
self.node_provider = MockBatchingNodeProvider(
provider_config={
DISABLE_LAUNCH_CONFIG_CHECK_KEY: True,
DISABLE_NODE_UPDATERS_KEY: True,
FOREGROUND_NODE_LAUNCH_KEY: True,
},
cluster_name="test-cluster",
)
# Maps node types to expected node counts.
self.expected_node_counts = defaultdict(int)
self.expected_node_counts["head"] = 1
# Tracks how many times we expect a scale request to have been submitted.
self.expected_scale_request_submitted_count = 0
def update(
self, create_node_requests, terminate_nodes_requests, safe_to_scale_flag
):
"""Simulates an autoscaler update with multiple terminate and create calls.
Calls non_terminated_nodes, then create/terminate nodes, then post_process.
Args:
create_node_requests (List[Tuple(str, int)]): List of pairs
(node type, count). Each pair is used in a create_node call that
creates count nodes of the node type.
terminate_nodes_requests (List[Tuple(str, int)]): List of pairs
(node type, count). Each pair is used in a terminate_nodes call
that terminates up to count nodes of the node type.
safe_to_scale_flag (bool): Passed to the node provider to determine # noqa
where provider.safe_to_scale() evaluates to True or False.
"""
self.node_provider.safe_to_scale_flag = safe_to_scale_flag
# Call non_terminated_nodes to refresh internal caches.
# Also validate node provider state.
self.validate_non_terminated_nodes()
# Abort if it's not safe to scale.
# This behavior is tested in the context of an actual autoscaler update in
# test_autoscaler:test_safe_to_scale.
if not self.node_provider.safe_to_scale():
return
# Terminate some nodes.
# Set to track nodes marked for termination during the update.
to_terminate_this_update = set()
for node_type, count in terminate_nodes_requests:
# Each iteration makes a provider.terminate_nodes call.
# Terminate "count" nodes of the given node_type.
# If count is greater than the number of remaining nodes of the type,
# terminate all of the nodes of the type.
to_terminate_this_request = []
for node in self.node_provider._node_data_dict:
if len(to_terminate_this_request) >= count:
break
if (
self.node_provider.node_tags(node)[TAG_RAY_USER_NODE_TYPE]
!= node_type
):
continue
if node in to_terminate_this_update:
continue
to_terminate_this_update.add(node)
to_terminate_this_request.append(node)
self.node_provider.terminate_nodes(to_terminate_this_request)
self.expected_node_counts[node_type] -= len(to_terminate_this_request)
# else: the scale request will not be submitted.
# Create some nodes.
for node_type, count in create_node_requests:
self.node_provider.create_node(
node_config={}, tags={TAG_RAY_USER_NODE_TYPE: node_type}, count=count
)
self.expected_node_counts[node_type] += count
# else: the scale request will not be submitted.
# Scale change is needed exactly when there's something to create or terminate.
assert self.node_provider.scale_change_needed is bool(
create_node_requests or terminate_nodes_requests
)
# Submit the scale request.
self.node_provider.post_process()
# Expect a scale request to be submitted iff we called create or terminate
# at least one.
if create_node_requests or terminate_nodes_requests:
self.expected_scale_request_submitted_count += 1
def validate_non_terminated_nodes(self):
"""Calls non_terminated_nodes and validates output against this test classes's
accumulated expected state.
Tests methods internal_ip, node_tags, non_terminated_nodes of
BatchingNodeProvider.
"""
nodes = self.node_provider.non_terminated_nodes({})
# Get counts of nodes using provider.node_tags to extract node type.
actual_node_counts = defaultdict(int)
for node in nodes:
# Trivial check. Just confirming we can call internal_ip with no issue.
assert isinstance(self.node_provider.internal_ip(node), str)
# Check tag structure.
tags = self.node_provider.node_tags(node)
assert set(tags.keys()) == {
TAG_RAY_USER_NODE_TYPE,
TAG_RAY_NODE_STATUS,
TAG_RAY_NODE_KIND,
}
node_type = tags[TAG_RAY_USER_NODE_TYPE]
node_kind = tags[TAG_RAY_NODE_KIND]
node_status = tags[TAG_RAY_NODE_STATUS]
if node_type == "head":
assert node_kind == NODE_KIND_HEAD
else:
assert node_kind == NODE_KIND_WORKER
# Just by construction of this test:
assert node_status == STATUS_UP_TO_DATE
actual_node_counts[node_type] += 1
# Remove 0 values from expected_node_counts before comparing.
for k, v in copy(self.expected_node_counts).items():
if v == 0:
del self.expected_node_counts[k]
assert actual_node_counts == self.expected_node_counts
# Get node counts again using tag filters.
actual_node_counts_again = {}
for node_type in actual_node_counts:
actual_node_counts_again[node_type] = len(
self.node_provider.non_terminated_nodes(
tag_filters={TAG_RAY_USER_NODE_TYPE: node_type}
)
)
assert actual_node_counts_again == self.expected_node_counts
# Check filtering by node kind.
workers = self.node_provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
heads = self.node_provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_HEAD}
)
assert len(heads) == 1
assert set(nodes) == set(workers) | set(heads)
# Check filtering by status.
up_to_date_nodes = self.node_provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}
)
assert set(up_to_date_nodes) == set(nodes)
# Make some assertions about internal structure of the node provider.
expected_node_counts_without_head = copy(self.expected_node_counts)
del expected_node_counts_without_head["head"]
# Desired number of workers immediately after calling non_terminated_nodes is
# current number of workers.
assert (
self.node_provider.scale_request.desired_num_workers
== expected_node_counts_without_head
)
# scale_change_needed should be reset after calling non_terminated_nodes
# (meaning: we've just obtained cluster state and have no indication
# from create_node or terminate_node calls that scale needs to change.)
assert self.node_provider.scale_change_needed is False
# We've submitted the expected number of scale requests:
assert (
self.node_provider._scale_request_submitted_count
== self.expected_scale_request_submitted_count
)
def update_with_random_requests(self):
random_requests = self.generate_random_requests()
self.update(*random_requests)
def generate_random_requests(self):
"""Generates random sequences of create_node and terminate_nodes requests
for the node provider. Generates random safe_to_scale_flag.
"""
num_creates = random.choice(range(100))
num_terminates = random.choice(range(100))
create_node_requests = []
for _ in range(num_creates):
# Choose from 5 worker types.
node_type = random.choice([f"type-{x}" for x in range(5)])
# Create up to 9 workers.
count = random.choice(range(10))
create_node_requests.append((node_type, count))
terminate_nodes_requests = []
for _ in range(num_terminates):
node_type = random.choice([f"type-{x}" for x in range(5)])
# Terminate up to 9 workers.
count = random.choice(range(10))
terminate_nodes_requests.append((node_type, count))
# 50% chance of the update being executed.
safe_to_scale_flag = random.choice([True, False])
return create_node_requests, terminate_nodes_requests, safe_to_scale_flag
def assert_worker_counts(self, expected_worker_counts):
"""Validates worker counts against internal node provider state."""
self.node_provider._assert_worker_counts(expected_worker_counts)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.")
def test_batching_node_provider_basic():
tester = BatchingNodeProviderTester()
tester.update(
create_node_requests=[
("type-1", 5),
],
terminate_nodes_requests=[],
safe_to_scale_flag=True,
)
tester.assert_worker_counts({"type-1": 5})
assert tester.node_provider._scale_request_submitted_count == 1
tester.update(
create_node_requests=[("type-2", 5), ("type-2", 5)],
terminate_nodes_requests=[("type-1", 2)],
safe_to_scale_flag=True,
)
tester.assert_worker_counts({"type-1": 3, "type-2": 10})
assert tester.node_provider._scale_request_submitted_count == 2
tester.update(
create_node_requests=[],
terminate_nodes_requests=[("type-1", 2), ("type-2", 1), ("type-2", 1)],
safe_to_scale_flag=True,
)
tester.assert_worker_counts({"type-1": 1, "type-2": 8})
assert tester.node_provider._scale_request_submitted_count == 3
tester.update(
create_node_requests=[],
terminate_nodes_requests=[],
safe_to_scale_flag=True,
)
tester.assert_worker_counts({"type-1": 1, "type-2": 8})
# No scale request submitted, since there were no create/terminate calls.
assert tester.node_provider._scale_request_submitted_count == 3
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.")
def test_batching_node_provider_many_requests():
"""Simulate 10 autoscaler updates with randomly generated create/terminate
requests.
"""
tester = BatchingNodeProviderTester()
for _ in range(2):
tester.update_with_random_requests()
# Final check.
tester.validate_non_terminated_nodes()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.")
def test_terminate_safeguards():
"""Tests the following behaviors:
- the node provider ignores requests to terminate a node twice.
- the node provider ignores requests to terminate an unknown node.
"""
node_provider = MockBatchingNodeProvider(
provider_config={
DISABLE_LAUNCH_CONFIG_CHECK_KEY: True,
DISABLE_NODE_UPDATERS_KEY: True,
FOREGROUND_NODE_LAUNCH_KEY: True,
},
cluster_name="test-cluster",
)
nodes = node_provider.non_terminated_nodes({})
assert len(nodes) == 1
head_node = nodes[0]
node_provider.create_node(
node_config={}, tags={TAG_RAY_USER_NODE_TYPE: "type"}, count=1
)
node_provider.post_process()
nodes = node_provider.non_terminated_nodes({})
assert len(nodes) == 2
worker_node = ""
for node in nodes:
if node == head_node:
continue
else:
worker_node = node
# This node is not in our list.
unknown_node = node + worker_node
node_provider.terminate_node(unknown_node)
node_provider.post_process()
nodes = node_provider.non_terminated_nodes({})
# Terminate request was ignored because the node is unknown.
assert len(nodes) == 2
node_provider.terminate_node(worker_node)
node_provider.terminate_node(worker_node)
node_provider.post_process()
nodes = node_provider.non_terminated_nodes({})
# Second terminate request was ignored.
assert len(nodes) == 1
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.")
def test_terminate_node_in_multihost_replica():
"""Test multi-host replica deletion logic for KubeRay.
Tests manually deleting a node in a multi-host replica
and verifying that the entire replica is scaled down.
Nodes belonging to the same multi-host replica are identified
through a replicaIndex label set by a GKE webhook.
"""
# create 4 TPU nodes with MockBatchingNodeProvider
node_provider = MockBatchingNodeProvider(
provider_config={
DISABLE_LAUNCH_CONFIG_CHECK_KEY: True,
DISABLE_NODE_UPDATERS_KEY: True,
FOREGROUND_NODE_LAUNCH_KEY: True,
},
cluster_name="test-cluster",
)
num_tpu_workers = 4
for i in range(num_tpu_workers):
node_provider._add_node(node_type="TPU", node_kind=NODE_KIND_WORKER)
# Set replica_index in node_data for all workers
workers = node_provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
assert len(workers) == num_tpu_workers
for index, node_id in enumerate(workers):
if index < num_tpu_workers // 2:
node_provider.set_node_replica_index(node_id, "tpu-group-0")
else:
node_provider.set_node_replica_index(node_id, "tpu-group-1")
# Verify RAY_REPLICA_INDEX tag has been set
replicaIndexFilter = {TAG_RAY_REPLICA_INDEX: "tpu-group-0"}
replicaWorkers1 = node_provider.non_terminated_nodes(tag_filters=replicaIndexFilter)
assert len(replicaWorkers1) == num_tpu_workers // 2
replicaIndexFilter[TAG_RAY_REPLICA_INDEX] = "tpu-group-1"
replicaWorkers2 = node_provider.non_terminated_nodes(tag_filters=replicaIndexFilter)
assert len(replicaWorkers2) == num_tpu_workers // 2
# Verify replica_to_nodes mapping has been populated
assert (
len(node_provider.replica_index_to_nodes["tpu-group-0"]) == num_tpu_workers // 2
)
assert (
len(node_provider.replica_index_to_nodes["tpu-group-1"]) == num_tpu_workers // 2
)
worker_0 = replicaWorkers1[0] # tpu-group-0
worker_2 = replicaWorkers2[0] # tpu-group-1
# Manually delete one TPU worker in tpu-group-0
# BatchingNodeProvider should scale down all nodes in the replica
assert worker_0 in node_provider.node_data_dict
node_provider.terminate_node(worker_0)
assert len(node_provider.scale_request.workers_to_delete) == num_tpu_workers // 2
# Scale down the tpu-group-1 replica
assert worker_2 in node_provider.node_data_dict
node_provider.terminate_node(worker_2)
assert len(node_provider.scale_request.workers_to_delete) == num_tpu_workers
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| BatchingNodeProviderTester |
python | gevent__gevent | src/gevent/events.py | {
"start": 7234,
"end": 7849
} | class ____(Interface):
"""
The event emitted when the memory usage threshold is exceeded.
This event is emitted only while memory continues to grow
above the threshold. Only if the condition or stabilized is corrected (memory
usage drops) will the event be emitted in the future.
This event is emitted in the monitor thread.
"""
mem_usage = Attribute("The current process memory usage, in bytes.")
max_allowed = Attribute("The maximum allowed memory usage, in bytes.")
memory_info = Attribute("The tuple of memory usage stats return by psutil.")
| IMemoryUsageThresholdExceeded |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 102996,
"end": 106842
} | class ____(ASTDeclarator):
def __init__(
self, className: ASTNestedName, const: bool, volatile: bool, next: ASTDeclarator
) -> None:
assert className
assert next
self.className = className
self.const = const
self.volatile = volatile
self.next = next
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclaratorMemPtr):
return NotImplemented
return (
self.className == other.className
and self.const == other.const
and self.volatile == other.volatile
and self.next == other.next
)
def __hash__(self) -> int:
return hash((self.className, self.const, self.volatile, self.next))
@property
def name(self) -> ASTNestedName:
return self.next.name
@name.setter
def name(self, name: ASTNestedName) -> None:
self.next.name = name
@property
def isPack(self) -> bool:
return self.next.isPack
@property
def function_params(self) -> list[ASTFunctionParameter]:
return self.next.function_params
@property
def trailingReturn(self) -> ASTType:
return self.next.trailingReturn
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
res.extend((transform(self.className), '::*'))
if self.volatile:
res.append('volatile')
if self.const:
if self.volatile:
res.append(' ')
res.append('const')
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError
return self.next.get_modifiers_id(version)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
if version == 1:
raise NoOldIdError
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError
raise NotImplementedError
return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
# ReturnType name::* next, so we are part of the return type of next
next_return_type_id = ''
if self.volatile:
next_return_type_id += 'V'
if self.const:
next_return_type_id += 'K'
next_return_type_id += 'M'
next_return_type_id += self.className.get_id(version)
next_return_type_id += returnTypeId
return self.next.get_type_id(version, next_return_type_id)
def is_function_type(self) -> bool:
return self.next.is_function_type()
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.className.describe_signature(signode, 'markType', env, symbol)
signode += addnodes.desc_sig_punctuation('::', '::')
signode += addnodes.desc_sig_punctuation('*', '*')
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_sig_keyword(text, text)
if self.volatile:
_add_anno(signode, 'volatile')
if self.const:
if self.volatile:
signode += addnodes.desc_sig_space()
_add_anno(signode, 'const')
if self.next.require_space_after_declSpecs():
signode += addnodes.desc_sig_space()
self.next.describe_signature(signode, mode, env, symbol)
| ASTDeclaratorMemPtr |
python | kamyu104__LeetCode-Solutions | Python/identify-the-largest-outlier-in-an-array.py | {
"start": 42,
"end": 568
} | class ____(object):
def getLargestOutlier(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = float("-inf")
total = sum(nums)
cnt = collections.defaultdict(int)
for x in nums:
cnt[x] += 1
for x in nums:
if (total-x)%2:
continue
target = (total-x)//2
if target in cnt and (cnt[target]-int(target == x) >= 1):
result = max(result, x)
return result
| Solution |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/session.py | {
"start": 1904,
"end": 2087
} | class ____(SessionExemptMixin, SqlAlchemySessionInterface):
"""Session interface that exempts some routes and stores session data in the database."""
| AirflowDatabaseSessionInterface |
python | plotly__plotly.py | plotly/graph_objs/layout/_ternary.py | {
"start": 235,
"end": 7494
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.ternary"
_valid_props = {"aaxis", "baxis", "bgcolor", "caxis", "domain", "sum", "uirevision"}
@property
def aaxis(self):
"""
The 'aaxis' property is an instance of Aaxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Aaxis`
- A dict of string/value properties that will be passed
to the Aaxis constructor
Returns
-------
plotly.graph_objs.layout.ternary.Aaxis
"""
return self["aaxis"]
@aaxis.setter
def aaxis(self, val):
self["aaxis"] = val
@property
def baxis(self):
"""
The 'baxis' property is an instance of Baxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Baxis`
- A dict of string/value properties that will be passed
to the Baxis constructor
Returns
-------
plotly.graph_objs.layout.ternary.Baxis
"""
return self["baxis"]
@baxis.setter
def baxis(self, val):
self["baxis"] = val
@property
def bgcolor(self):
"""
Set the background color of the subplot
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def caxis(self):
"""
The 'caxis' property is an instance of Caxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Caxis`
- A dict of string/value properties that will be passed
to the Caxis constructor
Returns
-------
plotly.graph_objs.layout.ternary.Caxis
"""
return self["caxis"]
@caxis.setter
def caxis(self, val):
self["caxis"] = val
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Returns
-------
plotly.graph_objs.layout.ternary.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def sum(self):
"""
The number each triplet should sum to, and the maximum range of
each axis
The 'sum' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sum"]
@sum.setter
def sum(self, val):
self["sum"] = val
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `min` and
`title`, if not overridden in the individual axes. Defaults to
`layout.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def _prop_descriptions(self):
return """\
aaxis
:class:`plotly.graph_objects.layout.ternary.Aaxis`
instance or dict with compatible properties
baxis
:class:`plotly.graph_objects.layout.ternary.Baxis`
instance or dict with compatible properties
bgcolor
Set the background color of the subplot
caxis
:class:`plotly.graph_objects.layout.ternary.Caxis`
instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.ternary.Domain`
instance or dict with compatible properties
sum
The number each triplet should sum to, and the maximum
range of each axis
uirevision
Controls persistence of user-driven changes in axis
`min` and `title`, if not overridden in the individual
axes. Defaults to `layout.uirevision`.
"""
def __init__(
self,
arg=None,
aaxis=None,
baxis=None,
bgcolor=None,
caxis=None,
domain=None,
sum=None,
uirevision=None,
**kwargs,
):
"""
Construct a new Ternary object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Ternary`
aaxis
:class:`plotly.graph_objects.layout.ternary.Aaxis`
instance or dict with compatible properties
baxis
:class:`plotly.graph_objects.layout.ternary.Baxis`
instance or dict with compatible properties
bgcolor
Set the background color of the subplot
caxis
:class:`plotly.graph_objects.layout.ternary.Caxis`
instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.ternary.Domain`
instance or dict with compatible properties
sum
The number each triplet should sum to, and the maximum
range of each axis
uirevision
Controls persistence of user-driven changes in axis
`min` and `title`, if not overridden in the individual
axes. Defaults to `layout.uirevision`.
Returns
-------
Ternary
"""
super().__init__("ternary")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Ternary
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Ternary`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("aaxis", arg, aaxis)
self._set_property("baxis", arg, baxis)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("caxis", arg, caxis)
self._set_property("domain", arg, domain)
self._set_property("sum", arg, sum)
self._set_property("uirevision", arg, uirevision)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Ternary |
python | huggingface__transformers | src/transformers/models/x_clip/processing_x_clip.py | {
"start": 700,
"end": 1514
} | class ____(ProcessorMixin):
r"""
Constructs an X-CLIP processor which wraps a VideoMAE image processor and a CLIP tokenizer into a single processor.
[`XCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`CLIPTokenizerFast`]. See the
[`~XCLIPProcessor.__call__`] and [`~XCLIPProcessor.decode`] for more information.
Args:
image_processor ([`CLIPImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`CLIPTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.video_processor = self.image_processor
__all__ = ["XCLIPProcessor"]
| XCLIPProcessor |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 11339,
"end": 11601
} | class ____(RootModel[JsonValue]):
root: Annotated[
JsonValue,
Field(
description="XCom schema with minimal structure for index-based access.",
title="XComSequenceIndexResponse",
),
]
| XComSequenceIndexResponse |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_autofilter05.py | {
"start": 315,
"end": 2615
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("autofilter05.xlsx")
self.set_text_file("autofilter_data.txt")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.pl example:
Example 5. Autofilter with filter for blanks.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter("A1:D51")
# Add filter criteria.
worksheet.filter_column(0, "x == Blanks")
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row("A1", headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ""
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region == "":
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={"hidden": True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/path_registry.py | {
"start": 24841,
"end": 26007
} | class ____(_AbstractEntityRegistry):
# for long lived mapper, return dict based caching
# version that creates reference cycles
__slots__ = ("_cache",)
inherit_cache = True
def __init__(
self,
parent: Union[RootRegistry, _PropRegistry],
entity: _InternalEntityType[Any],
):
super().__init__(parent, entity)
self._cache = _ERDict(self)
def pop(self, key: Any, default: Any) -> Any:
return self._cache.pop(key, default)
def _getitem(self, entity: Any) -> Any:
if isinstance(entity, (int, slice)):
return self.path[entity]
elif isinstance(entity, PathToken):
return _TokenRegistry(self, entity)
else:
return self._cache[entity]
if not TYPE_CHECKING:
__getitem__ = _getitem
if TYPE_CHECKING:
def path_is_entity(
path: PathRegistry,
) -> TypeGuard[_AbstractEntityRegistry]: ...
def path_is_property(path: PathRegistry) -> TypeGuard[_PropRegistry]: ...
else:
path_is_entity = operator.attrgetter("is_entity")
path_is_property = operator.attrgetter("is_property")
| _CachingEntityRegistry |
python | getsentry__sentry | src/sentry/uptime/subscriptions/subscriptions.py | {
"start": 3491,
"end": 22831
} | class ____(Exception):
"""
Indicates that the quotes system is unable to allocate a seat for the new
uptime monitor.
"""
result: SeatAssignmentResult
def __init__(self, result: SeatAssignmentResult) -> None:
super().__init__()
self.result = result
def create_uptime_subscription(
url: str,
interval_seconds: int,
timeout_ms: int,
method: str = "GET",
headers: Sequence[tuple[str, str]] | None = None,
body: str | None = None,
trace_sampling: bool = False,
) -> UptimeSubscription:
"""
Creates a new uptime subscription. This creates the row in postgres, and fires a task that will send the config
to the uptime check system.
"""
if headers is None:
headers = []
# We extract the domain and suffix of the url here. This is used to prevent there being too many checks to a single
# domain.
result = extract_domain_parts(url)
subscription = UptimeSubscription.objects.create(
url=url,
url_domain=result.domain,
url_domain_suffix=result.suffix,
interval_seconds=interval_seconds,
timeout_ms=timeout_ms,
status=UptimeSubscription.Status.CREATING.value,
type=UPTIME_SUBSCRIPTION_TYPE,
method=method,
headers=headers, # type: ignore[misc]
body=body,
trace_sampling=trace_sampling,
)
# Associate active regions with this subscription
for region_config in get_active_regions():
UptimeSubscriptionRegion.objects.create(
uptime_subscription=subscription,
region_slug=region_config.slug,
mode=region_config.mode,
)
def commit_tasks():
create_remote_uptime_subscription.delay(subscription.id)
fetch_subscription_rdap_info.delay(subscription.id)
transaction.on_commit(commit_tasks, using=router.db_for_write(UptimeSubscription))
return subscription
def update_uptime_subscription(
subscription: UptimeSubscription,
url: str | NotSet = NOT_SET,
interval_seconds: int | NotSet = NOT_SET,
timeout_ms: int | NotSet = NOT_SET,
method: str | NotSet = NOT_SET,
headers: Sequence[tuple[str, str]] | None | NotSet = NOT_SET,
body: str | None | NotSet = NOT_SET,
trace_sampling: bool | NotSet = NOT_SET,
):
"""
Updates an existing uptime subscription. This updates the row in postgres, and fires a task that will send the
config to the uptime check system.
"""
url = default_if_not_set(subscription.url, url)
# We extract the domain and suffix of the url here. This is used to prevent there being too many checks to a single
# domain.
result = extract_domain_parts(url)
headers = default_if_not_set(subscription.headers, headers)
if headers is None:
headers = []
subscription.update(
status=UptimeSubscription.Status.UPDATING.value,
url=url,
url_domain=result.domain,
url_domain_suffix=result.suffix,
interval_seconds=default_if_not_set(subscription.interval_seconds, interval_seconds),
timeout_ms=default_if_not_set(subscription.timeout_ms, timeout_ms),
method=default_if_not_set(subscription.method, method),
headers=headers,
body=default_if_not_set(subscription.body, body),
trace_sampling=default_if_not_set(subscription.trace_sampling, trace_sampling),
)
# Associate active regions with this subscription
check_and_update_regions(subscription, load_regions_for_uptime_subscription(subscription.id))
def commit_tasks():
update_remote_uptime_subscription.delay(subscription.id)
fetch_subscription_rdap_info.delay(subscription.id)
transaction.on_commit(commit_tasks, using=router.db_for_write(UptimeSubscription))
def delete_uptime_subscription(uptime_subscription: UptimeSubscription):
"""
Deletes an existing uptime subscription. This updates the row in postgres, and fires a task that will send the
deletion to the external system and remove the row once successful.
"""
uptime_subscription.update(status=UptimeSubscription.Status.DELETING.value)
transaction.on_commit(
lambda: delete_remote_uptime_subscription.delay(uptime_subscription.id),
using=router.db_for_write(UptimeSubscription),
)
def create_uptime_detector(
project: Project,
environment: Environment | None,
url: str,
interval_seconds: int,
timeout_ms: int,
method: str = "GET",
headers: Sequence[tuple[str, str]] | None = None,
body: str | None = None,
mode: UptimeMonitorMode = UptimeMonitorMode.MANUAL,
status: int = ObjectStatus.ACTIVE,
name: str = "",
owner: Actor | None = None,
trace_sampling: bool = False,
override_manual_org_limit: bool = False,
recovery_threshold: int = DEFAULT_RECOVERY_THRESHOLD,
downtime_threshold: int = DEFAULT_DOWNTIME_THRESHOLD,
) -> Detector:
"""
Creates an UptimeSubscription and associated Detector
"""
if mode == UptimeMonitorMode.MANUAL:
# Once a user has created a subscription manually, make sure we disable all autodetection, and remove any
# onboarding monitors
if project.organization.get_option("sentry:uptime_autodetection", False):
project.organization.update_option("sentry:uptime_autodetection", False)
for detector in get_auto_monitored_detectors_for_project(
project, modes=[UptimeMonitorMode.AUTO_DETECTED_ONBOARDING]
):
delete_uptime_detector(detector)
if not override_manual_org_limit:
check_uptime_subscription_limit(project.organization_id)
with atomic_transaction(
using=(
router.db_for_write(UptimeSubscription),
router.db_for_write(DataSource),
router.db_for_write(DataCondition),
router.db_for_write(DataConditionGroup),
router.db_for_write(DataSourceDetector),
router.db_for_write(Detector),
)
):
uptime_subscription = create_uptime_subscription(
url=url,
interval_seconds=interval_seconds,
timeout_ms=timeout_ms,
method=method,
headers=headers,
body=body,
trace_sampling=trace_sampling,
)
owner_user_id = None
owner_team_id = None
if owner:
if owner.is_user:
owner_user_id = owner.id
if owner.is_team:
owner_team_id = owner.id
data_source = DataSource.objects.create(
type=DATA_SOURCE_UPTIME_SUBSCRIPTION,
organization=project.organization,
source_id=str(uptime_subscription.id),
)
condition_group = DataConditionGroup.objects.create(
organization=project.organization,
)
DataCondition.objects.create(
comparison=CHECKSTATUS_FAILURE,
type=Condition.EQUAL,
condition_result=DetectorPriorityLevel.HIGH,
condition_group=condition_group,
)
DataCondition.objects.create(
comparison=CHECKSTATUS_SUCCESS,
type=Condition.EQUAL,
condition_result=DetectorPriorityLevel.OK,
condition_group=condition_group,
)
env = environment.name if environment else None
detector = Detector.objects.create(
type=GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE,
project=project,
name=name,
owner_user_id=owner_user_id,
owner_team_id=owner_team_id,
config={
"environment": env,
"mode": mode,
"recovery_threshold": recovery_threshold,
"downtime_threshold": downtime_threshold,
},
workflow_condition_group=condition_group,
)
DataSourceDetector.objects.create(data_source=data_source, detector=detector)
# Don't consume a seat if we're still in onboarding mode
if mode != UptimeMonitorMode.AUTO_DETECTED_ONBOARDING:
# Update status. This may have the side effect of removing or creating a
# remote subscription. When a new monitor is created we will ensure seat
# assignment, which may cause the monitor to be disabled if there are no
# available seat assignments.
match status:
case ObjectStatus.ACTIVE:
try:
enable_uptime_detector(detector, ensure_assignment=True)
except UptimeMonitorNoSeatAvailable:
# No need to do anything if we failed to handle seat
# assignment. The monitor will be created, but not enabled
pass
case ObjectStatus.DISABLED:
disable_uptime_detector(detector)
# Detector may have been updated as part of
# {enable,disable}_uptime_detector
detector.refresh_from_db()
return detector
def update_uptime_detector(
detector: Detector,
environment: Environment | None | NotSet = NOT_SET,
url: str | NotSet = NOT_SET,
interval_seconds: int | NotSet = NOT_SET,
timeout_ms: int | NotSet = NOT_SET,
method: str | NotSet = NOT_SET,
headers: Sequence[tuple[str, str]] | NotSet = NOT_SET,
body: str | None | NotSet = NOT_SET,
name: str | NotSet = NOT_SET,
owner: Actor | None | NotSet = NOT_SET,
trace_sampling: bool | NotSet = NOT_SET,
status: int = ObjectStatus.ACTIVE,
mode: UptimeMonitorMode = UptimeMonitorMode.MANUAL,
ensure_assignment: bool = False,
recovery_threshold: int | NotSet = NOT_SET,
downtime_threshold: int | NotSet = NOT_SET,
):
"""
Updates a uptime detector and its associated uptime subscription.
"""
with atomic_transaction(
using=(
router.db_for_write(UptimeSubscription),
router.db_for_write(Detector),
)
):
uptime_subscription = get_uptime_subscription(detector)
update_uptime_subscription(
uptime_subscription,
url=url,
interval_seconds=interval_seconds,
timeout_ms=timeout_ms,
method=method,
headers=headers,
body=body,
trace_sampling=trace_sampling,
)
owner_user_id = detector.owner_user_id
owner_team_id = detector.owner_team_id
if owner and owner is not NOT_SET:
if owner.is_user:
owner_user_id = owner.id
owner_team_id = None
if owner.is_team:
owner_team_id = owner.id
owner_user_id = None
current_env = detector.config.get("environment")
if current_env:
current_env_obj = Environment.get_or_create(detector.project, current_env)
else:
current_env_obj = None
env = default_if_not_set(current_env_obj, environment)
detector.update(
name=default_if_not_set(detector.name, name),
owner_user_id=owner_user_id,
owner_team_id=owner_team_id,
config={
"mode": mode,
"environment": env.name if env else None,
"recovery_threshold": default_if_not_set(
detector.config["recovery_threshold"],
recovery_threshold,
),
"downtime_threshold": default_if_not_set(
detector.config["downtime_threshold"],
downtime_threshold,
),
},
)
# Don't consume a seat if we're still in onboarding mode
if mode != UptimeMonitorMode.AUTO_DETECTED_ONBOARDING:
# Update status. This may have the side effect of removing or creating a
# remote subscription. Will raise a UptimeMonitorNoSeatAvailable if seat
# assignment fails.
match status:
case ObjectStatus.DISABLED:
disable_uptime_detector(detector)
case ObjectStatus.ACTIVE:
enable_uptime_detector(detector, ensure_assignment=ensure_assignment)
# Detector may have been updated as part of
# {enable,disable}_uptime_detector
detector.refresh_from_db()
def disable_uptime_detector(detector: Detector, skip_quotas: bool = False):
"""
Disables a uptime detector. If the UptimeSubscription no longer has any active
detectors, it will also be disabled.
"""
with atomic_transaction(
using=(
router.db_for_write(UptimeSubscription),
router.db_for_write(Detector),
)
):
uptime_subscription: UptimeSubscription = get_uptime_subscription(detector)
if not detector.enabled:
return
detector_state = detector.detectorstate_set.first()
if detector_state and detector_state.is_triggered:
# Resolve the issue so that we don't see it in the ui anymore
resolve_uptime_issue(detector)
# We set the status back to ok here so that if we re-enable we'll
# start from a good state
detector_state.update(state=DetectorPriorityLevel.OK, is_triggered=False)
cluster = get_cluster()
last_update_key = build_last_update_key(detector)
cluster.delete(last_update_key)
detector.update(enabled=False)
if not skip_quotas:
quotas.backend.disable_seat(DataCategory.UPTIME, detector)
# Are there any other detectors associated to the subscription
# that are still enabled?
has_active_subscription = Detector.objects.filter(
data_sources__source_id=str(uptime_subscription.id),
enabled=True,
status=ObjectStatus.ACTIVE,
).exists()
# All project subscriptions are disabled, we can disable the subscription
# and remove the remote subscription.
if not has_active_subscription:
uptime_subscription.update(status=UptimeSubscription.Status.DISABLED.value)
delete_remote_uptime_subscription.delay(uptime_subscription.id)
def ensure_uptime_seat(detector: Detector) -> None:
"""
Ensures that a billing seat is assigned for the uptime detector.
Raises UptimeMonitorNoSeatAvailable if no seats are available.
"""
outcome = quotas.backend.assign_seat(DataCategory.UPTIME, detector)
if outcome != Outcome.ACCEPTED:
result = quotas.backend.check_assign_seat(DataCategory.UPTIME, detector)
raise UptimeMonitorNoSeatAvailable(result)
def enable_uptime_detector(
detector: Detector, ensure_assignment: bool = False, skip_quotas: bool = False
):
"""
Enable a uptime detector. If the uptime subscription was also disabled it
will be re-activated and the remote subscription will be published.
This method will attempt seat assignment via the quotas system. If There
are no available seats the monitor will be disabled and a
`UptimeMonitorNoSeatAvailable` will be raised.
By default if the detector is already marked as enabled this function is a
no-op. Pass `ensure_assignment=True` to force seat assignment.
"""
if not ensure_assignment and detector.enabled:
return
if not skip_quotas:
try:
ensure_uptime_seat(detector)
except UptimeMonitorNoSeatAvailable:
disable_uptime_detector(detector, skip_quotas=True)
raise
uptime_subscription: UptimeSubscription = get_uptime_subscription(detector)
detector.update(enabled=True)
# The subscription was disabled, it can be re-activated now
if uptime_subscription.status == UptimeSubscription.Status.DISABLED.value:
uptime_subscription.update(status=UptimeSubscription.Status.CREATING.value)
transaction.on_commit(
lambda: create_remote_uptime_subscription.delay(uptime_subscription.id),
using=router.db_for_write(UptimeSubscription),
)
def remove_uptime_seat(detector: Detector):
quotas.backend.remove_seat(DataCategory.UPTIME, detector)
def delete_uptime_detector(detector: Detector):
uptime_subscription = get_uptime_subscription(detector)
remove_uptime_seat(detector)
detector.update(status=ObjectStatus.PENDING_DELETION)
RegionScheduledDeletion.schedule(detector, days=0)
delete_uptime_subscription(uptime_subscription)
def is_url_auto_monitored_for_project(project: Project, url: str) -> bool:
auto_detected_subscription_ids = list(
Detector.objects.filter(
status=ObjectStatus.ACTIVE,
type=GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE,
project=project,
config__mode__in=(
UptimeMonitorMode.AUTO_DETECTED_ONBOARDING.value,
UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value,
),
)
.select_related("data_sources")
.values_list("data_sources__source_id", flat=True)
)
return UptimeSubscription.objects.filter(
id__in=auto_detected_subscription_ids,
url=url,
).exists()
def get_auto_monitored_detectors_for_project(
project: Project,
modes: Sequence[UptimeMonitorMode] | None = None,
) -> list[Detector]:
if modes is None:
modes = [
UptimeMonitorMode.AUTO_DETECTED_ONBOARDING,
UptimeMonitorMode.AUTO_DETECTED_ACTIVE,
]
return list(
Detector.objects.filter(
type=GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE, project=project, config__mode__in=modes
)
)
def check_and_update_regions(
subscription: UptimeSubscription,
regions: list[UptimeSubscriptionRegion],
) -> bool:
"""
This method will check if regions have been added or removed from our region configuration,
and updates regions associated with this uptime subscription to reflect the new state.
"""
subscription_region_modes = {
UptimeRegionWithMode(r.region_slug, UptimeSubscriptionRegion.RegionMode(r.mode))
for r in regions
}
active_regions = set(get_active_regions())
if subscription_region_modes == active_regions:
# Regions haven't changed, exit early.
return False
new_or_updated_regions = active_regions - subscription_region_modes
removed_regions = {srm.slug for srm in subscription_region_modes} - {
ar.slug for ar in active_regions
}
for region in new_or_updated_regions:
UptimeSubscriptionRegion.objects.update_or_create(
uptime_subscription=subscription,
region_slug=region.slug,
defaults={"mode": region.mode},
)
if removed_regions:
for deleted_region in UptimeSubscriptionRegion.objects.filter(
uptime_subscription=subscription, region_slug__in=removed_regions
):
if subscription.subscription_id:
# We need to explicitly send deletes here before we remove the region
send_uptime_config_deletion(
deleted_region.region_slug, subscription.subscription_id
)
deleted_region.delete()
return True
| UptimeMonitorNoSeatAvailable |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 38928,
"end": 43001
} | class ____(ColumnArg):
measurement_aliases = {
MEASUREMENTS_FRAMES_SLOW_RATE,
MEASUREMENTS_FRAMES_FROZEN_RATE,
MEASUREMENTS_STALL_PERCENTAGE,
}
numeric_array_columns = {
"measurements_value",
"span_op_breakdowns_value",
"spans_exclusive_time",
}
def __init__(
self,
name: str,
allow_array_value: bool | None = False,
spans: bool | None = False,
**kwargs,
):
self.spans = spans
super().__init__(name, **kwargs)
self.allow_array_value = allow_array_value
def _normalize(self, value: str) -> str:
from sentry.snuba.metrics.naming_layer.mri import is_mri
# This method is written in this way so that `get_type` can always call
# this even in child classes where `normalize` have been overridden.
# Shortcutting this for now
# TODO: handle different datasets better here
if self.spans and value in [
"span.duration",
"span.self_time",
"ai.total_tokens.used",
"ai.total_cost",
"cache.item_size",
"http.decoded_response_content_length",
"http.response_content_length",
"http.response_transfer_size",
]:
return value
snuba_column = SEARCH_MAP.get(value)
if not snuba_column and is_measurement(value):
return value
if not snuba_column and is_span_op_breakdown(value):
return value
if not snuba_column and is_mri(value):
return value
match = TYPED_TAG_KEY_RE.search(value)
if match and match.group("type") == "number":
return value
if not snuba_column:
raise InvalidFunctionArgument(f"{value} is not a valid column")
elif snuba_column not in ["time", "timestamp", "duration"]:
raise InvalidFunctionArgument(f"{value} is not a numeric column")
return snuba_column
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str:
snuba_column = None
if combinator is not None and combinator.validate_argument(value):
snuba_column = value
# `measurement_value` and `span_op_breakdowns_value` are actually an
# array of Float64s. But when used in this context, we always want to
# expand it using `arrayJoin`. The resulting column will be a numeric
# column of type Float64.
if self.allow_array_value:
if value in self.numeric_array_columns:
snuba_column = value
if snuba_column is None:
snuba_column = self._normalize(value)
if self.validate_only:
return value
else:
return snuba_column
def get_type(self, value: str | list[Any]) -> str:
if isinstance(value, str) and value in self.numeric_array_columns:
return "number"
# `measurements.frames_frozen_rate` and `measurements.frames_slow_rate` are aliases
# to a percentage value, since they are expressions rather than columns, we special
# case them here
# TODO: These are no longer expressions with SnQL, this should be removed once the
# migration is done
if isinstance(value, list):
for name in self.measurement_aliases:
field = FIELD_ALIASES[name]
expression = field.get_expression(None)
if expression == value:
return field.result_type
else:
raise AssertionError(f"unreachable: {value}")
if value in self.measurement_aliases:
return "percentage"
snuba_column = self._normalize(value)
if is_duration_measurement(snuba_column) or is_span_op_breakdown(snuba_column):
return "duration"
elif snuba_column == "duration":
return "duration"
elif snuba_column == "timestamp":
return "date"
return "number"
| NumericColumn |
python | readthedocs__readthedocs.org | readthedocs/profiles/views.py | {
"start": 6612,
"end": 6666
} | class ____(TokenMixin, ListView):
pass
| TokenListView |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_connector/filesystem_data_connector.py | {
"start": 494,
"end": 574
} | class ____(pydantic.BaseModel):
glob_directive: str = "**/*"
| FilesystemOptions |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 2002,
"end": 4546
} | class ____(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
"admin.E001",
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ["non_existent_field"]
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
"admin.E002",
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ("name",)
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a "
"many-to-many field.",
"admin.E003",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ("users",)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
@isolate_apps("modeladmin")
def assertGeneratedDateTimeFieldIsValid(self, *, db_persist):
class TestModel(Model):
date = models.DateTimeField()
date_copy = models.GeneratedField(
expression=F("date"),
output_field=models.DateTimeField(),
db_persist=db_persist,
)
class TestModelAdmin(ModelAdmin):
date_hierarchy = "date_copy"
self.assertIsValid(TestModelAdmin, TestModel)
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_valid_case_stored_generated_field(self):
self.assertGeneratedDateTimeFieldIsValid(db_persist=True)
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_valid_case_virtual_generated_field(self):
self.assertGeneratedDateTimeFieldIsValid(db_persist=False)
def test_field_attname(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ["band_id"]
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'raw_id_fields[0]' refers to 'band_id', which is "
"not a field of 'modeladmin.ValidationTestModel'.",
"admin.E002",
)
| RawIdCheckTests |
python | conda__conda | conda/cli/condarc.py | {
"start": 2402,
"end": 5765
} | class ____:
"""
Groups configuration parameters by their parameter type.
Organizes configuration parameters from a Configuration instance into sequence
and map parameters, handling both regular and plugin parameters separately.
This is primarily used by ConfigurationFile to efficiently determine which operations
are valid for different configuration keys.
"""
def __init__(self, context: Configuration) -> None:
"""
Initialize ParameterTypeGroups by grouping parameters by type.
:param context: Configuration instance containing configuration parameters.
"""
from ..common.iterators import groupby_to_dict as groupby
self._grouped_parameter = groupby(
lambda p: context.describe_parameter(p)["parameter_type"],
context.list_parameters(),
)
# Handle plugin parameters if the context has a plugins attribute
if hasattr(context, "plugins"):
self._plugin_grouped_parameters = groupby(
lambda p: context.plugins.describe_parameter(p)["parameter_type"],
context.plugins.list_parameters(),
)
else:
self._plugin_grouped_parameters = {}
@cached_property
def sequence_parameters(self) -> list[str]:
"""List of sequence parameter names."""
return self._grouped_parameter.get("sequence", [])
@cached_property
def plugin_sequence_parameters(self) -> list[str]:
"""List of plugin sequence parameter names."""
return self._plugin_grouped_parameters.get("sequence", [])
@cached_property
def map_parameters(self) -> list[str]:
"""List of map parameter names."""
return self._grouped_parameter.get("map", [])
@cached_property
def plugin_map_parameters(self) -> list[str]:
"""List of plugin map parameter names."""
return self._plugin_grouped_parameters.get("map", [])
def validate_provided_parameters(
parameters: Sequence[str],
plugin_parameters: Sequence[str],
context: Configuration,
) -> None:
"""
Validate that provided parameters exist in the configuration context.
Compares the provided parameters with the available parameters in the context
and raises an error if any are invalid.
:param parameters: Regular parameter names to validate.
:param plugin_parameters: Plugin parameter names to validate.
:param context: Configuration instance containing available parameters.
:raises ArgumentError: If any provided parameters are not valid.
"""
from ..common.io import dashlist
from ..exceptions import ArgumentError
all_names = context.list_parameters(aliases=True)
# Handle plugin parameters if the context has a plugins attribute
if hasattr(context, "plugins"):
all_plugin_names = context.plugins.list_parameters()
else:
all_plugin_names = []
not_params = set(parameters) - set(all_names)
not_plugin_params = set(plugin_parameters) - set(all_plugin_names)
if not_params or not_plugin_params:
not_plugin_params = {f"plugins.{name}" for name in not_plugin_params}
error_params = not_params | not_plugin_params
raise ArgumentError(
f"Invalid configuration parameters: {dashlist(error_params)}"
)
| ParameterTypeGroups |
python | gevent__gevent | src/gevent/_semaphore.py | {
"start": 1109,
"end": 1331
} | class ____(object):
__slots__ = (
'lock',
)
def __init__(self, lock):
self.lock = lock
def __call__(self, _):
self.lock.release()
_UNSET = object()
_MULTI = object()
| _LockReleaseLink |
python | django-haystack__django-haystack | test_haystack/test_managers.py | {
"start": 1102,
"end": 8826
} | class ____(TestCase):
fixtures = ["bulk_data.json"]
def setUp(self):
super().setUp()
self.search_index = BasicMockModelSearchIndex
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.search_index(), MockModel.objects.all())
ui = connections["default"].get_unified_index()
ui.build([BasicMockModelSearchIndex(), BasicAnotherMockModelSearchIndex()])
self.search_queryset = BasicMockModelSearchIndex.objects.all()
def test_queryset(self):
self.assertTrue(isinstance(self.search_queryset, SearchQuerySet))
def test_none(self):
self.assertTrue(
isinstance(self.search_index.objects.none(), EmptySearchQuerySet)
)
def test_filter(self):
sqs = self.search_index.objects.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.search_index.objects.exclude(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_filter_and(self):
sqs = self.search_index.objects.filter_and(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.query_filter.connector, "AND")
def test_filter_or(self):
sqs = self.search_index.objects.filter_or(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.query_filter.connector, "OR")
def test_order_by(self):
sqs = self.search_index.objects.order_by("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue("foo" in sqs.query.order_by)
@unittest.skipUnless(HAVE_GDAL, "Requires gdal library")
def test_order_by_distance(self):
p = Point(1.23, 4.56)
sqs = self.search_index.objects.distance("location", p).order_by("distance")
self.assertTrue(isinstance(sqs, SearchQuerySet))
params = sqs.query.build_params()
self.assertIn("distance_point", params)
self.assertDictEqual(
params["distance_point"], {"field": "location", "point": p}
)
self.assertTupleEqual(params["distance_point"]["point"].coords, (1.23, 4.56))
self.assertListEqual(params["sort_by"], ["distance"])
def test_highlight(self):
sqs = self.search_index.objects.highlight()
self.assertEqual(sqs.query.highlight, True)
def test_boost(self):
sqs = self.search_index.objects.boost("foo", 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_facets(self):
sqs = self.search_index.objects.facet("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
@unittest.skipUnless(HAVE_GDAL, "Requires gdal library")
def test_within(self):
# This is a meaningless query but we're just confirming that the manager updates the parameters here:
p1 = Point(-90, -90)
p2 = Point(90, 90)
sqs = self.search_index.objects.within("location", p1, p2)
self.assertTrue(isinstance(sqs, SearchQuerySet))
params = sqs.query.build_params()
self.assertIn("within", params)
self.assertDictEqual(
params["within"], {"field": "location", "point_1": p1, "point_2": p2}
)
@unittest.skipUnless(HAVE_GDAL, "Requires gdal library")
def test_dwithin(self):
p = Point(0, 0)
distance = D(mi=500)
sqs = self.search_index.objects.dwithin("location", p, distance)
self.assertTrue(isinstance(sqs, SearchQuerySet))
params = sqs.query.build_params()
self.assertIn("dwithin", params)
self.assertDictEqual(
params["dwithin"], {"field": "location", "point": p, "distance": distance}
)
@unittest.skipUnless(HAVE_GDAL, "Requires gdal library")
def test_distance(self):
p = Point(0, 0)
sqs = self.search_index.objects.distance("location", p)
self.assertTrue(isinstance(sqs, SearchQuerySet))
params = sqs.query.build_params()
self.assertIn("distance_point", params)
self.assertDictEqual(
params["distance_point"], {"field": "location", "point": p}
)
def test_date_facets(self):
sqs = self.search_index.objects.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
def test_query_facets(self):
sqs = self.search_index.objects.query_facet("foo", "[bar TO *]")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
def test_narrow(self):
sqs = self.search_index.objects.narrow("content:foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertSetEqual(set(["content:foo"]), sqs.query.narrow_queries)
def test_raw_search(self):
self.assertEqual(len(self.search_index.objects.raw_search("foo")), 23)
def test_load_all(self):
# Models with character primary keys.
sqs = self.search_index.objects.all()
sqs.query.backend = CharPKMockSearchBackend("charpk")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
def test_auto_query(self):
sqs = self.search_index.objects.auto_query("test search -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test search -stuff>",
)
# With keyword argument
sqs = self.search_index.objects.auto_query(
"test search -stuff", fieldname="title"
)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>"
)
def test_autocomplete(self):
# Not implemented
pass
def test_count(self):
self.assertEqual(SearchQuerySet().count(), 23)
self.assertEqual(self.search_index.objects.count(), 23)
def test_best_match(self):
self.assertTrue(
isinstance(self.search_index.objects.best_match(), SearchResult)
)
def test_latest(self):
self.assertTrue(
isinstance(self.search_index.objects.latest("pub_date"), SearchResult)
)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.search_index.objects.more_like_this(mock)), 23)
def test_facet_counts(self):
self.assertEqual(self.search_index.objects.facet_counts(), {})
def spelling_suggestion(self):
# Test the case where spelling support is disabled.
sqs = self.search_index.objects.filter(content="Indx")
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion(preferred_query=None), None)
def test_values(self):
sqs = self.search_index.objects.auto_query("test").values("id")
self.assertIsInstance(sqs, ValuesSearchQuerySet)
def test_valueslist(self):
sqs = self.search_index.objects.auto_query("test").values_list("id")
self.assertIsInstance(sqs, ValuesListSearchQuerySet)
| ManagerTestCase |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_backend.py | {
"start": 56703,
"end": 58321
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
clear_solr_index()
# Stow.
self.old_ui = connections["solr"].get_unified_index()
self.ui = UnifiedIndex()
self.srtsi = SolrRoundTripSearchIndex()
self.ui.build(indexes=[self.srtsi])
connections["solr"]._index = self.ui
self.sb = connections["solr"].get_backend()
self.sqs = SearchQuerySet("solr")
# Fake indexing.
mock = MockModel()
mock.id = 1
self.sb.update(self.srtsi, [mock])
def tearDown(self):
# Restore.
connections["solr"]._index = self.old_ui
super().tearDown()
def test_round_trip(self):
results = self.sqs.filter(id="core.mockmodel.1")
# Sanity check.
self.assertEqual(results.count(), 1)
# Check the individual fields.
result = results[0]
self.assertEqual(result.id, "core.mockmodel.1")
self.assertEqual(result.text, "This is some example text.")
self.assertEqual(result.name, "Mister Pants")
self.assertEqual(result.is_active, True)
self.assertEqual(result.post_count, 25)
self.assertEqual(result.average_rating, 3.6)
self.assertEqual(result.price, "24.99")
self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))
self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))
self.assertEqual(result.tags, ["staff", "outdoor", "activist", "scientist"])
self.assertEqual(result.sites, [3, 5, 1])
| LiveSolrRoundTripTestCase |
python | pytorch__pytorch | torch/ao/quantization/pt2e/duplicate_dq_pass.py | {
"start": 1550,
"end": 3129
} | class ____(PassBase):
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
for node in graph_module.graph.nodes:
if node.op == "call_function" and node.target in _DEQUANTIZE_OPS:
dq_users = _filter_sym_size_users(node)
if len(dq_users) <= 1:
continue
# Do not duplicate dq for dynamic quantization
# Pattern: choose_qparam - getitem - q - dq
q_node = node.args[0]
if q_node.op == "call_function" and q_node.target in _QUANTIZE_OPS:
getitem_node = q_node.args[1]
if (
isinstance(getitem_node, torch.fx.node.Node)
and getitem_node.op == "call_function"
and getitem_node.target is operator.getitem
):
choose_qparam_node = getitem_node.args[0]
if (
isinstance(choose_qparam_node, torch.fx.node.Node)
and choose_qparam_node.op == "call_function"
and choose_qparam_node.target
== torch.ops.quantized_decomposed.choose_qparams.tensor
):
continue
for user in dq_users:
_maybe_duplicate_dq(graph_module, node, user)
graph_module.graph.eliminate_dead_code()
graph_module.recompile()
return PassResult(graph_module, True)
| DuplicateDQPass |
python | pytorch__pytorch | test/dynamo/test_dicts.py | {
"start": 34914,
"end": 38816
} | class ____(LoggingTestCase):
thetype = dict
@make_logging_test(recompiles=True)
def test_popitem(self, records):
d = self.thetype()
d[1] = 2
d[3] = 4
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
k, v = d.popitem()
if k == 3 and v == 4:
return x.sin()
return x.cos()
x = torch.tensor(1.0)
y = fn(x)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(y, x.sin())
d[3] = 5
y = fn(x)
self.assertEqual(len(records), 1)
self.assertEqual(y, x.cos())
record = self.getRecord(records, "d")
self.assertIn(
"""d[3] == 4""",
munge_exc(record),
)
@make_logging_test(recompiles=True)
def test_cmp_eq(self, records):
@torch.compile(backend="eager", fullgraph=True)
def fn(x, d1, d2):
if d1 == d2:
return x.sin()
return x.cos()
x = torch.tensor(1.0)
d1 = self.thetype({1: 2, 3: 4})
d2 = self.thetype({1: 2, 5: 6})
y = fn(x, d1, d2)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(y, x.cos())
y = fn(x, d1, d1)
self.assertEqual(len(records), 1)
self.assertEqual(y, x.sin())
record = self.getRecord(records, "d2")
self.assertIn(
"""list(dict.keys(d2))""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_cmp_ne(self, records):
@torch.compile(backend="eager", fullgraph=True)
def fn(x, d1, d2):
if d1 == d2:
return x.sin()
return x.cos()
x = torch.tensor(1.0)
d1 = self.thetype({1: 2, 3: 4})
d2 = self.thetype({1: 2, 5: 6})
y = fn(x, d1, d2)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(y, x.cos())
y = fn(x, d1, d1)
self.assertEqual(len(records), 1)
self.assertEqual(y, x.sin())
record = self.getRecord(records, "d2")
self.assertIn(
"""list(dict.keys(d2))""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_cmp_or(self, records):
@torch.compile(backend="eager", fullgraph=True)
def fn(x, d1, d2):
d = d1 | d2
if d.get(5, False):
return x.sin()
return x.cos()
x = torch.tensor(1.0)
d1 = self.thetype({1: 2, 3: 4})
d2 = self.thetype({1: 2, 5: 6})
y = fn(x, d1, d2)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(y, x.sin())
y = fn(x, d1, d1)
self.assertEqual(len(records), 1)
self.assertEqual(y, x.cos())
record = self.getRecord(records, "d2")
self.assertIn(
"""KeyError on d2[5]""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_cmp_ior(self, records):
@torch.compile(backend="eager", fullgraph=True)
def fn(x, d1, d2):
d2 |= d1
if d2.get(3, False):
return x.sin()
return x.cos()
x = torch.tensor(1.0)
d1 = self.thetype({1: 2, 3: 4})
d2 = self.thetype({1: 2, 5: 6})
d3, d4 = d2.copy(), d2.copy()
y = fn(x, d1, d2)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(y, x.sin())
y = fn(x, d3, d4)
self.assertEqual(len(records), 1)
self.assertEqual(y, x.cos())
record = self.getRecord(records, "d1")
self.assertIn(
"""KeyError on d1[3]""",
munge_exc(record.getMessage()),
)
| DictGuardTests |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 30719,
"end": 33168
} | class ____:
# A copy of all forward metadata, but computed on the *dense* tensor forward (after desugaring subclasses)
# So for example, if the user had a model containing two `TwoTensor` inputs,
# Then `SubclassMeta.fw_metadata.input_infos` would have length 4 here.
fw_metadata: ViewAndMutationMeta
# Note: [Computing Subclass Metadata about grad_inputs]
# Given a list of flattened, plain tensor grad_inputs, this tells us how to reconstruct the grad_input subclasses
#
# You might think: why not just assume that all grad_inputs will have the same subclass-ness as the original inputs?
# (AOTAutograd generally assumes other properties, e.g. that grad_outputs are contiguous)
#
# This doesn't really work though. take this example:
#
# def f(DoubleTensor, DenseTensor):
# return DoubleTensor * DenseTensor
#
# In the above example, the .grad field of *both* DoubleTensor and DenseTensor will be a DoubleTensor.
# When we trace out a joint fw-bw graph, we'll end up returning two subclasses for the two grad_inputs.
# This means that our backward graph will return 4 outputs (two dense tensors for each DoubleTensor grad_input)
# and we need to properly store the metadata that tells us how to turn these 4 outputs back into DoubleTensors.
#
# Note that this info **cannot** easily be figured out from ViewAndMutationMeta.
# We can only compute this info by tracing the entire joint and examining the grad_inputs that we computed.
#
# See Note: [AOTAutograd Backward Guards]
# This will also eventually require us to install backward guards,
# in case we made incorrect assumptions about the subclass-ness of our grad_outputs
#
# Optional field because we don't compute for inference graphs
grad_input_metas: Optional[list[Union[PlainTensorMeta, SubclassCreationMeta]]] = (
None
)
def __init__(self) -> None:
# The fields in this class get set after its construction.
pass
# This class exists because:
# - the autograd.Function.forward() in aot autograd returns outputs that might alias inputs
# - we only care about the metadata on those aliases, so we can regenerate them.
# We do not want them to participate in the autograd.Function.
# We do that by wrapping them in an opaque class, so the autograd.Function
# does not know to treat them as tensors.
@dataclass(frozen=True)
| SubclassMeta |
python | openai__openai-python | src/openai/types/responses/web_search_preview_tool.py | {
"start": 917,
"end": 1469
} | class ____(BaseModel):
type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
"""The type of the web search tool.
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
search_context_size: Optional[Literal["low", "medium", "high"]] = None
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation] = None
"""The user's location."""
| WebSearchPreviewTool |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_v2_test.py | {
"start": 186865,
"end": 191345
} | class ____(lite_v2_test_util.ModelTest):
def _createGraphWithCustomOp(self):
# Create a graph that has one double op.
np.random.seed(0)
saved_model_dir = os.path.join(self.get_temp_dir(), 'double_model')
with ops.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1, 4], dtype=dtypes.float32, name='input'
)
out_tensor = double_op.double(in_tensor)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
def calibration_gen():
for _ in range(100):
yield [np.random.uniform(-1, 1, size=(1, 4)).astype(np.float32)]
return (saved_model_dir, calibration_gen)
def testCustomOpRegistererByName(self):
"""Test a calibration with custom op registered by name."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [
'TF_TestRegisterer'
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)
self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))
# Check the conversion metadata.
metadata = util.get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.options.allowCustomOps, True)
# Check the model works with custom ops.
interp = interpreter.InterpreterWithCustomOps(
model_content=tflite_model, custom_op_registerers=['TF_TestRegisterer']
)
interp.allocate_tensors()
input_details = interp.get_input_details()
test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], test_input)
interp.invoke()
output_details = interp.get_output_details()
expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)
output_data = interp.get_tensor(output_details[0]['index'])
self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)
def testCustomOpRegistererByFunc(self):
"""Test a calibration with custom op registered by function."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [
test_registerer.TF_TestRegisterer
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)
self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))
# Check the model works with custom ops.
interp = interpreter.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=[test_registerer.TF_TestRegisterer],
)
interp.allocate_tensors()
input_details = interp.get_input_details()
test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], test_input)
interp.invoke()
output_details = interp.get_output_details()
expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)
output_data = interp.get_tensor(output_details[0]['index'])
self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)
def testCustomOpRegistererFailure(self):
"""Test a calibration with wrong custom op registerer."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
bogus_name = 'CompletelyBogusRegistererName'
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [bogus_name]
with self.assertRaisesRegex(
ValueError, "Looking up symbol '" + bogus_name + "' failed"
):
converter.convert()
| CalibrateAndQuantizeWithCustomOpTest |
python | fastai__fastai | fastai/optimizer.py | {
"start": 697,
"end": 3731
} | class ____():
"Common functionality between `Optimizer` and `OptimWrapper`"
def all_params(self,
n:slice|int=slice(None), # Extended slicing over the optimizer `param_lists`
with_grad:bool=False # Get all param tuples. If `True` select only those with a gradient
):
res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg)
return L(o for o in res if hasattr(o[0], 'grad') and o[0].grad is not None) if with_grad else res
def _set_require_grad(self,
rg:bool, # Requires grad: if `True` sets gradient for parameters, else uses state `state["force_train"]`
p:Tensor, # Parameters to set gradient
pg, # Param groups (unused but needed because unpack *o)
state: dict,
h # Hyperparameter (unused but needed because unpack *o)
):
p.requires_grad_(rg or state.get('force_train', False))
def freeze_to(self,
n:int # Freeze up to `n` layers
):
self.frozen_idx = n if n >= 0 else len(self.param_lists) + n
if self.frozen_idx >= len(self.param_lists):
warn(f"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.")
for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o)
for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o)
def freeze(self):
assert(len(self.param_lists)>1)
self.freeze_to(-1)
def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper)
def _set_hyper(self,
k, # Hyperparameter key
v # Hyperparameter value
):
for v_,h in zip(v, self.hypers): h[k] = v_
def set_hyper(self,
k, # Hyperparameter key or slice of keys
v # Hyperparameter value or slice of values
):
if isinstance(v, slice):
if v.start: v = even_mults(v.start, v.stop, len(self.param_lists))
else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop]
v = L(v, use_list=None)
if len(v)==1: v = v*len(self.param_lists)
assert len(v) == len(self.hypers), f"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups."
self._set_hyper(k, v)
def unfreeze(self): self.freeze_to(0)
@property
def param_groups(self): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)]
@param_groups.setter
def param_groups(self,
v:dict # List of dicts to set `params` and other hyper parameters
):
for pg,v_ in zip(self.param_lists,v): pg = v_['params']
for hyper,v_ in zip(self.hypers,v):
for k,t in v_.items():
if k != 'params': hyper[k] = t
# %% ../nbs/12_optimizer.ipynb 8
def _update(
state:dict,
new=None # New values to update `state` dict
):
if new is None: return state
if isinstance(new, dict): state.update(new)
return state
# %% ../nbs/12_optimizer.ipynb 10
| _BaseOptimizer |
python | coleifer__peewee | peewee.py | {
"start": 53955,
"end": 54622
} | class ____(Node):
def __init__(self, expr, of=None, nowait=None):
expr = 'FOR UPDATE' if expr is True else expr
if expr.lower().endswith('nowait'):
expr = expr[:-7] # Strip off the "nowait" bit.
nowait = True
self._expr = expr
if of is not None and not isinstance(of, (list, set, tuple)):
of = (of,)
self._of = of
self._nowait = nowait
def __sql__(self, ctx):
ctx.literal(self._expr)
if self._of is not None:
ctx.literal(' OF ').sql(CommaNodeList(self._of))
if self._nowait:
ctx.literal(' NOWAIT')
return ctx
| ForUpdate |
python | kamyu104__LeetCode-Solutions | Python/reverse-integer.py | {
"start": 39,
"end": 849
} | class ____(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
if x < 0:
return -self.reverse(-x)
result = 0
while x:
result = result * 10 + x % 10
x //= 10
return result if result <= 0x7fffffff else 0 # Handle overflow.
def reverse2(self, x):
"""
:type x: int
:rtype: int
"""
if x < 0:
x = int(str(x)[::-1][-1] + str(x)[::-1][:-1])
else:
x = int(str(x)[::-1])
x = 0 if abs(x) > 0x7FFFFFFF else x
return x
def reverse3(self, x):
"""
:type x: int
:rtype: int
"""
s = cmp(x, 0)
r = int(repr(s * x)[::-1])
return s * r * (r < 2 ** 31)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 10070,
"end": 10162
} | class ____:
pass
_T2A = TypeVar("_T2A", bound=Foo)
_T2B = TypeVar("_T2B", bound=Foo)
| Foo |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_annotations.py | {
"start": 17921,
"end": 18826
} | class ____:
"""Tests Redis Annotations."""
def test_redis_annotations_are_added(self):
# Test Case
values = {"redis": {"annotations": {"example": "redis"}}}
show_only = "templates/redis/redis-statefulset.yaml"
expected_annotations = {"example": "redis"}
k8s_objects = render_chart(
values=values,
show_only=[show_only],
)
# This test relies on the convention that the helm chart puts annotations
# in its own .yaml file, so by specifying `show_only`,
# we should only get a single k8s_object here - the target object that
# we hope to test on.
assert len(k8s_objects) == 1
obj = k8s_objects[0]
for k, v in expected_annotations.items():
assert k in obj["metadata"]["annotations"]
assert v == obj["metadata"]["annotations"][k]
| TestRedisAnnotations |
python | PrefectHQ__prefect | tests/utilities/test_callables.py | {
"start": 402,
"end": 15724
} | class ____:
def test_simple_function_with_no_arguments(self):
def f():
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"properties": {},
"title": "Parameters",
"type": "object",
"definitions": {},
}
def test_function_with_pydantic_base_model_collisions(self):
# TODO: this test actually fails with pydantic v2 attributes like model_dump
# and friends. We need a new test for these.
def f(
json,
copy,
parse_obj,
parse_raw,
parse_file,
from_orm,
schema,
schema_json,
construct,
validate,
foo,
):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"json": {"title": "json", "position": 0},
"copy": {"title": "copy", "position": 1},
"parse_obj": {"title": "parse_obj", "position": 2},
"parse_raw": {"title": "parse_raw", "position": 3},
"parse_file": {"title": "parse_file", "position": 4},
"from_orm": {"title": "from_orm", "position": 5},
"schema": {"title": "schema", "position": 6},
"schema_json": {"title": "schema_json", "position": 7},
"construct": {"title": "construct", "position": 8},
"validate": {"title": "validate", "position": 9},
"foo": {"title": "foo", "position": 10},
},
"required": [
"json",
"copy",
"parse_obj",
"parse_raw",
"parse_file",
"from_orm",
"schema",
"schema_json",
"construct",
"validate",
"foo",
],
"definitions": {},
}
def test_function_with_one_required_argument(self):
def f(x):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_function_with_one_optional_argument(self):
def f(x=42):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"default": 42, "position": 0, "title": "x"}},
"definitions": {},
}
def test_function_with_one_optional_annotated_argument(self):
def f(x: int = 42):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"default": 42,
"position": 0,
"title": "x",
"type": "integer",
}
},
"definitions": {},
}
def test_function_with_two_arguments(self):
def f(x: int, y: float = 5.0):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {"title": "x", "type": "integer", "position": 0},
"y": {"title": "y", "default": 5.0, "type": "number", "position": 1},
},
"required": ["x"],
"definitions": {},
}
def test_function_with_datetime_arguments(self):
def f(
x: datetime.datetime,
y: DateTime = DateTime(2025, 1, 1, tzinfo=timezone.utc),
z: datetime.timedelta = datetime.timedelta(seconds=5),
):
pass
schema = callables.parameter_schema(f)
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"format": "date-time",
"position": 0,
"title": "x",
"type": "string",
},
"y": {
"default": "2025-01-01T00:00:00Z",
"format": "date-time",
"position": 1,
"title": "y",
"type": "string",
},
"z": {
"default": "PT5S",
"format": "duration",
"position": 2,
"title": "z",
"type": "string",
},
},
"required": ["x"],
"definitions": {},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_enum_argument(self):
class Color(Enum):
RED = "RED"
GREEN = "GREEN"
BLUE = "BLUE"
def f(x: Color = "RED"):
pass
schema = callables.parameter_schema(f)
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"$ref": "#/definitions/Color",
"default": "RED",
"position": 0,
"title": "x",
}
},
"definitions": {
"Color": {
"enum": ["RED", "GREEN", "BLUE"],
"title": "Color",
"type": "string",
}
},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_generic_arguments(self):
def f(
a: List[str],
b: Dict[str, Any],
c: Any,
d: Tuple[int, float],
e: Union[str, bytes, int],
):
pass
schema = callables.parameter_schema(f)
expected_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"a": {
"items": {"type": "string"},
"position": 0,
"title": "a",
"type": "array",
},
"b": {
"additionalProperties": True,
"position": 1,
"title": "b",
"type": "object",
},
"c": {"position": 2, "title": "c"},
"d": {
"maxItems": 2,
"minItems": 2,
"position": 3,
"prefixItems": [{"type": "integer"}, {"type": "number"}],
"title": "d",
"type": "array",
},
"e": {
"anyOf": [
{"type": "string"},
{"format": "binary", "type": "string"},
{"type": "integer"},
],
"position": 4,
"title": "e",
},
},
"required": ["a", "b", "c", "d", "e"],
"definitions": {},
}
assert schema.model_dump_for_openapi() == expected_schema
def test_function_with_user_defined_type(self):
class Foo:
y: int
def f(x: Foo):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"definitions": {},
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
}
def test_function_with_user_defined_pydantic_model(self):
class Foo(BaseModel):
y: int
z: str
def f(x: Foo):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"definitions": {
"Foo": {
"properties": {
"y": {"title": "Y", "type": "integer"},
"z": {"title": "Z", "type": "string"},
},
"required": ["y", "z"],
"title": "Foo",
"type": "object",
}
},
"properties": {
"x": {
"$ref": "#/definitions/Foo",
"title": "x",
"position": 0,
}
},
"required": ["x"],
"title": "Parameters",
"type": "object",
}
def test_function_with_pydantic_model_default_across_v1_and_v2(self):
# this import ensures this test imports the installed version of
# pydantic (not pydantic.v1) and allows us to test that we
# generate consistent schemas across v1 and v2
class Foo(BaseModel):
bar: str
def f(foo: Foo = Foo(bar="baz")): ...
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"foo": {
"$ref": "#/definitions/Foo",
"default": {"bar": "baz"},
"position": 0,
"title": "foo",
}
},
"definitions": {
"Foo": {
"properties": {"bar": {"title": "Bar", "type": "string"}},
"required": ["bar"],
"title": "Foo",
"type": "object",
}
},
}
def test_function_with_complex_args_across_v1_and_v2(self):
# this import ensures this test imports the installed version of
# pydantic (not pydantic.v1) and allows us to test that we
# generate consistent schemas across v1 and v2
class Foo(BaseModel):
bar: str
class Color(Enum):
RED = "RED"
GREEN = "GREEN"
BLUE = "BLUE"
def f(
a: int,
s: list[None],
m: Foo,
i: int = 0,
x: float = 1.0,
model: Foo = Foo(bar="bar"),
pdt: DateTime = DateTime(2025, 1, 1, tzinfo=timezone.utc),
pdate: Date = Date(2025, 1, 1),
pduration: Duration = Duration(seconds=5),
c: Color = Color.BLUE,
): ...
datetime_schema = {
"title": "pdt",
"default": "2025-01-01T00:00:00+00:00",
"position": 6,
"type": "string",
"format": "date-time",
}
duration_schema = {
"title": "pduration",
"default": 5.0,
"position": 8,
"type": "number",
"format": "time-delta",
}
enum_schema = {
"enum": ["RED", "GREEN", "BLUE"],
"title": "Color",
"type": "string",
"description": "An enumeration.",
}
# these overrides represent changes in how pydantic generates schemas in v2
datetime_schema["default"] = "2025-01-01T00:00:00Z"
duration_schema["default"] = "PT5S"
duration_schema["type"] = "string"
duration_schema["format"] = "duration"
enum_schema.pop("description")
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"a": {"position": 0, "title": "a", "type": "integer"},
"s": {
"items": {"type": "null"},
"position": 1,
"title": "s",
"type": "array",
},
"m": {
"$ref": "#/definitions/Foo",
"position": 2,
"title": "m",
},
"i": {"default": 0, "position": 3, "title": "i", "type": "integer"},
"x": {"default": 1.0, "position": 4, "title": "x", "type": "number"},
"model": {
"$ref": "#/definitions/Foo",
"default": {"bar": "bar"},
"position": 5,
"title": "model",
},
"pdt": datetime_schema,
"pdate": {
"title": "pdate",
"default": "2025-01-01",
"position": 7,
"type": "string",
"format": "date",
},
"pduration": duration_schema,
"c": {
"title": "c",
"default": "BLUE",
"position": 9,
"$ref": "#/definitions/Color",
},
},
"required": ["a", "s", "m"],
"definitions": {
"Foo": {
"properties": {"bar": {"title": "Bar", "type": "string"}},
"required": ["bar"],
"title": "Foo",
"type": "object",
},
"Color": enum_schema,
},
}
def test_function_with_secretstr(self):
def f(x: SecretStr):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"position": 0,
"format": "password",
"type": "string",
"writeOnly": True,
},
},
"required": ["x"],
"definitions": {},
}
@pytest.mark.skipif(
sys.version_info >= (3, 14),
reason="pydantic v1 is not supported in Python 3.14+",
)
def test_function_with_v1_secretstr_from_compat_module(self):
import pydantic.v1 as pydantic
def f(x: pydantic.SecretStr):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"position": 0,
"type": "string",
"writeOnly": True,
"format": "password",
},
},
"required": ["x"],
"definitions": {},
}
| TestFunctionToSchema |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/saved_model/save_impl.py | {
"start": 24446,
"end": 29182
} | class ____(object):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, call_fn, name, input_signature):
"""Initializes a LayerCall object.
Args:
call_collection: a LayerCallCollection, which contains the other layer
call functions (e.g. call_with_conditional_losses, call). These
functions should be traced with the same arguments.
call_fn: A call function.
name: Name of the call function.
input_signature: Input signature of call_fn (can be None).
"""
self.call_collection = call_collection
self.input_signature = input_signature
self.wrapped_call = def_function.function(
layer_call_wrapper(call_collection, call_fn, name),
input_signature=input_signature)
self.original_layer_call = call_collection.layer_call_method
def _maybe_trace(self, args, kwargs):
# Trigger traces of other call functions + extra training-arg traces.
if tracing_enabled():
self.call_collection.add_trace(*args, **kwargs)
def __call__(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call.get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
"""Returns layer (call_output, conditional losses) tuple."""
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(
_filtered_inputs([args, kwargs]))
else:
conditional_losses = [
l for l in layer.losses if not hasattr(l, '_unconditional_loss')
]
return call_output, conditional_losses
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access
default_training_value=False)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=fn,
decorator_argspec=arg_spec)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditional loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
# pylint: disable=protected-access
if isinstance(layer._activity_regularizer, def_function.Function):
return layer._activity_regularizer
return def_function.Function(
layer._activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[
tensor_spec.TensorSpec(None, layer._compute_dtype or K.floatx())
])
# pylint: enable=protected-access
def _get_layer_call_method(layer):
if isinstance(layer.call, (def_function.Function)):
return layer.call.python_function
return layer.call
| LayerCall |
python | viewflow__viewflow | viewflow/workflow/nodes/switch.py | {
"start": 1082,
"end": 2699
} | class ____(Node):
"""
Gateway that selects one of the outgoing node.
Activates first node with matched condition.
Example::
select_responsible_person = (
flow.Switch()
.Case(this.dean_approval, lambda act: a.process.need_dean)
.Case(this.head_approval, lambda act: a.process.need_head)
.Default(this.supervisor_approval)
)
"""
task_type = "SWITCH"
activation_class = SwitchActivation
def __init__(self, **kwargs): # noqa D102
super(Switch, self).__init__(**kwargs)
self._activate_next = []
def _outgoing(self):
for next_node, cond in self._activate_next:
edge_class = "cond_true" if cond else "default"
yield Edge(src=self, dst=next_node, edge_class=edge_class)
def _resolve(self, instance):
super()._resolve(instance)
next_nodes = []
for node, condition in self._activate_next:
node = this.resolve(instance, node)
condition = this.resolve(instance, condition)
next_nodes.append((node, condition))
self._activate_next = next_nodes
@property
def _branches(self):
return self._activate_next
def Case(self, node, cond=None):
"""Node to activate if condition is True.
:param cond: Calable[activation] -> bool
"""
self._activate_next.append((node, cond))
return self
def Default(self, node):
"""Last node to activate if no one other succeed."""
self._activate_next.append((node, None))
return self
| Switch |
python | crytic__slither | slither/tools/upgradeability/__main__.py | {
"start": 7214,
"end": 13249
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self,
parser: Any,
args: Any,
values: Optional[Union[str, Sequence[Any]]],
option_string: Any = None,
) -> Any: # pylint: disable=signature-differs
checks = _get_checks()
assert isinstance(values, str)
output_wiki(checks, values)
parser.exit()
def _run_checks(detectors: List[AbstractCheck]) -> List[Dict]:
results_ = [d.check() for d in detectors]
results_ = [r for r in results_ if r]
results = [item for sublist in results_ for item in sublist] # flatten
return results
def _checks_on_contract(
detectors: List[Type[AbstractCheck]], contract: Contract
) -> Tuple[List[Dict], int]:
detectors_ = [
d(logger, contract)
for d in detectors
if (not d.REQUIRE_PROXY and not d.REQUIRE_CONTRACT_V2)
]
return _run_checks(detectors_), len(detectors_)
def _checks_on_contract_update(
detectors: List[Type[AbstractCheck]], contract_v1: Contract, contract_v2: Contract
) -> Tuple[List[Dict], int]:
detectors_ = [
d(logger, contract_v1, contract_v2=contract_v2) for d in detectors if d.REQUIRE_CONTRACT_V2
]
return _run_checks(detectors_), len(detectors_)
def _checks_on_contract_and_proxy(
detectors: List[Type[AbstractCheck]], contract: Contract, proxy: Contract
) -> Tuple[List[Dict], int]:
detectors_ = [d(logger, contract, proxy=proxy) for d in detectors if d.REQUIRE_PROXY]
return _run_checks(detectors_), len(detectors_)
# endregion
###################################################################################
###################################################################################
# region Main
###################################################################################
###################################################################################
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
def main() -> None:
json_results: Dict = {
"proxy-present": False,
"contract_v2-present": False,
"detectors": [],
}
detectors = _get_checks()
args = parse_args(detectors)
detectors_to_run = choose_checks(args, detectors)
v1_filename = vars(args)["contract.sol"]
number_detectors_run = 0
try:
variable1 = Slither(v1_filename, **vars(args))
# Analyze logic contract
v1_name = args.ContractName
v1_contracts = variable1.get_contract_from_name(v1_name)
if len(v1_contracts) != 1:
info = f"Contract {v1_name} not found in {variable1.filename}"
logger.error(red(info))
if args.json:
output_to_json(args.json, str(info), json_results)
return
v1_contract = v1_contracts[0]
detectors_results, number_detectors = _checks_on_contract(detectors_to_run, v1_contract)
json_results["detectors"] += detectors_results
number_detectors_run += number_detectors
# Analyze Proxy
proxy_contract = None
if args.proxy_name:
if args.proxy_filename:
proxy = Slither(args.proxy_filename, **vars(args))
else:
proxy = variable1
proxy_contracts = proxy.get_contract_from_name(args.proxy_name)
if len(proxy_contracts) != 1:
info = f"Proxy {args.proxy_name} not found in {proxy.filename}"
logger.error(red(info))
if args.json:
output_to_json(args.json, str(info), json_results)
return
proxy_contract = proxy_contracts[0]
json_results["proxy-present"] = True
detectors_results, number_detectors = _checks_on_contract_and_proxy(
detectors_to_run, v1_contract, proxy_contract
)
json_results["detectors"] += detectors_results
number_detectors_run += number_detectors
# Analyze new version
if args.new_contract_name:
if args.new_contract_filename:
variable2 = Slither(args.new_contract_filename, **vars(args))
else:
variable2 = variable1
v2_contracts = variable2.get_contract_from_name(args.new_contract_name)
if len(v2_contracts) != 1:
info = (
f"New logic contract {args.new_contract_name} not found in {variable2.filename}"
)
logger.error(red(info))
if args.json:
output_to_json(args.json, str(info), json_results)
return
v2_contract = v2_contracts[0]
json_results["contract_v2-present"] = True
if proxy_contract:
detectors_results, _ = _checks_on_contract_and_proxy(
detectors_to_run, v2_contract, proxy_contract
)
json_results["detectors"] += detectors_results
detectors_results, number_detectors = _checks_on_contract_update(
detectors_to_run, v1_contract, v2_contract
)
json_results["detectors"] += detectors_results
number_detectors_run += number_detectors
# If there is a V2, we run the contract-only check on the V2
detectors_results, number_detectors = _checks_on_contract(detectors_to_run, v2_contract)
json_results["detectors"] += detectors_results
number_detectors_run += number_detectors
to_log = f'{len(json_results["detectors"])} findings, {number_detectors_run} detectors run'
logger.info(to_log)
if args.json:
output_to_json(args.json, None, json_results)
except SlitherException as slither_exception:
logger.error(str(slither_exception))
if args.json:
output_to_json(args.json, str(slither_exception), json_results)
return
# endregion
| OutputWiki |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 19207,
"end": 20786
} | class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [SpeechT5GroupNormConvLayer(config, layer_id=0)] + [
SpeechT5NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
SpeechT5LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->SpeechT5
| SpeechT5FeatureEncoder |
python | coleifer__peewee | tests/manytomany.py | {
"start": 7199,
"end": 19171
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [User, Note, NoteUserThrough, AltNote, AltThroughModel]
user_to_note = {
'gargie': [1, 2],
'huey': [2, 3],
'mickey': [3, 4],
'zaizee': [4, 5],
}
def setUp(self):
super(TestManyToMany, self).setUp()
for username in sorted(self.user_to_note):
User.create(username=username)
for i in range(5):
Note.create(text='note-%s' % (i + 1))
def test_through_model(self):
self.assertEqual(len(NoteUserThrough._meta.fields), 3)
fields = NoteUserThrough._meta.fields
self.assertEqual(sorted(fields), ['id', 'note', 'user'])
note_field = fields['note']
self.assertEqual(note_field.rel_model, Note)
self.assertFalse(note_field.null)
user_field = fields['user']
self.assertEqual(user_field.rel_model, User)
self.assertFalse(user_field.null)
def _set_data(self):
for username, notes in self.user_to_note.items():
user = User.get(User.username == username)
for note in notes:
NoteUserThrough.create(
note=Note.get(Note.text == 'note-%s' % note),
user=user)
def assertNotes(self, query, expected):
notes = [note.text for note in query]
self.assertEqual(sorted(notes),
['note-%s' % i for i in sorted(expected)])
def assertUsers(self, query, expected):
usernames = [user.username for user in query]
self.assertEqual(sorted(usernames), sorted(expected))
def test_accessor_query(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
self.assertNotes(gargie.notes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(2):
self.assertNotes(User.create(username='x').notes, [])
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['gargie'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['gargie', 'huey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
with self.assertQueryCount(2):
self.assertUsers(Note.create(text='x').users, [])
def test_prefetch_notes(self):
self._set_data()
for pt in PREFETCH_TYPE.values():
with self.assertQueryCount(3):
gargie, huey, mickey, zaizee = prefetch(
User.select().order_by(User.username),
NoteUserThrough,
Note,
prefetch_type=pt)
with self.assertQueryCount(0):
self.assertNotes(gargie.notes, [1, 2])
with self.assertQueryCount(0):
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(2):
self.assertNotes(User.create(username='x').notes, [])
def test_prefetch_users(self):
self._set_data()
for pt in PREFETCH_TYPE.values():
with self.assertQueryCount(3):
n1, n2, n3, n4, n5 = prefetch(
Note.select().order_by(Note.text),
NoteUserThrough,
User,
prefetch_type=pt)
with self.assertQueryCount(0):
self.assertUsers(n1.users, ['gargie'])
with self.assertQueryCount(0):
self.assertUsers(n2.users, ['gargie', 'huey'])
with self.assertQueryCount(0):
self.assertUsers(n5.users, ['zaizee'])
with self.assertQueryCount(2):
self.assertUsers(Note.create(text='x').users, [])
def test_query_filtering(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
notes = gargie.notes.where(Note.text != 'note-2')
self.assertNotes(notes, [1])
def test_set_value(self):
self._set_data()
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(2):
gargie.notes = n3
self.assertNotes(gargie.notes, [3])
self.assertUsers(n3.users, ['gargie', 'huey', 'mickey'])
self.assertUsers(n1.users, [])
gargie.notes = [n3, n4]
self.assertNotes(gargie.notes, [3, 4])
self.assertUsers(n3.users, ['gargie', 'huey', 'mickey'])
self.assertUsers(n4.users, ['gargie', 'mickey', 'zaizee'])
def test_set_query(self):
huey = User.get(User.username == 'huey')
with self.assertQueryCount(2):
huey.notes = Note.select().where(~Note.text.endswith('4'))
self.assertNotes(huey.notes, [1, 2, 3, 5])
def test_add(self):
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
gargie.notes.add([n1, n2])
self.assertNotes(gargie.notes, [1, 2])
self.assertUsers(n1.users, ['gargie'])
self.assertUsers(n2.users, ['gargie'])
for note in [n3, n4, n5]:
self.assertUsers(note.users, [])
with self.assertQueryCount(1):
huey.notes.add(Note.select().where(
fn.substr(Note.text, 6, 1) << ['1', '3', '5']))
self.assertNotes(huey.notes, [1, 3, 5])
self.assertUsers(n1.users, ['gargie', 'huey'])
self.assertUsers(n2.users, ['gargie'])
self.assertUsers(n3.users, ['huey'])
self.assertUsers(n4.users, [])
self.assertUsers(n5.users, ['huey'])
with self.assertQueryCount(1):
gargie.notes.add(n4)
self.assertNotes(gargie.notes, [1, 2, 4])
with self.assertQueryCount(2):
n3.users.add(
User.select().where(User.username != 'gargie'),
clear_existing=True)
self.assertUsers(n3.users, ['huey', 'mickey', 'zaizee'])
def test_add_by_pk(self):
huey = User.get(User.username == 'huey')
n1, n2, n3 = Note.select().order_by(Note.text).limit(3)
huey.notes.add([n1.id, n2.id])
self.assertNotes(huey.notes, [1, 2])
self.assertUsers(n1.users, ['huey'])
self.assertUsers(n2.users, ['huey'])
self.assertUsers(n3.users, [])
def test_unique(self):
n1 = Note.get(Note.text == 'note-1')
huey = User.get(User.username == 'huey')
def add_user(note, user):
with self.assertQueryCount(1):
note.users.add(user)
add_user(n1, huey)
self.assertRaises(IntegrityError, add_user, n1, huey)
add_user(n1, User.get(User.username == 'zaizee'))
self.assertUsers(n1.users, ['huey', 'zaizee'])
def test_remove(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
gargie.notes.remove([n1, n2, n3])
self.assertNotes(gargie.notes, [])
self.assertNotes(huey.notes, [2, 3])
with self.assertQueryCount(1):
huey.notes.remove(Note.select().where(
Note.text << ['note-2', 'note-4', 'note-5']))
self.assertNotes(huey.notes, [3])
self.assertNotes(mickey.notes, [3, 4])
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(1):
n4.users.remove([gargie, mickey])
self.assertUsers(n4.users, ['zaizee'])
with self.assertQueryCount(1):
n5.users.remove(User.select())
self.assertUsers(n5.users, [])
def test_remove_by_id(self):
self._set_data()
gargie, huey = User.select().order_by(User.username).limit(2)
n1, n2, n3, n4 = Note.select().order_by(Note.text).limit(4)
gargie.notes.add([n3, n4])
with self.assertQueryCount(1):
gargie.notes.remove([n1.id, n3.id])
self.assertNotes(gargie.notes, [2, 4])
self.assertNotes(huey.notes, [2, 3])
def test_clear(self):
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
gargie.notes = Note.select()
huey.notes = Note.select()
self.assertEqual(gargie.notes.count(), 5)
self.assertEqual(huey.notes.count(), 5)
gargie.notes.clear()
self.assertEqual(gargie.notes.count(), 0)
self.assertEqual(huey.notes.count(), 5)
n1 = Note.get(Note.text == 'note-1')
n2 = Note.get(Note.text == 'note-2')
n1.users = User.select()
n2.users = User.select()
self.assertEqual(n1.users.count(), 4)
self.assertEqual(n2.users.count(), 4)
n1.users.clear()
self.assertEqual(n1.users.count(), 0)
self.assertEqual(n2.users.count(), 4)
def test_manual_through(self):
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
alt_notes = []
for i in range(5):
alt_notes.append(AltNote.create(text='note-%s' % (i + 1)))
self.assertNotes(gargie.altnotes, [])
for alt_note in alt_notes:
self.assertUsers(alt_note.users, [])
n1, n2, n3, n4, n5 = alt_notes
# Test adding relationships by setting the descriptor.
gargie.altnotes = [n1, n2]
with self.assertQueryCount(2):
huey.altnotes = AltNote.select().where(
fn.substr(AltNote.text, 6, 1) << ['1', '3', '5'])
mickey.altnotes.add([n1, n4])
with self.assertQueryCount(2):
zaizee.altnotes = AltNote.select()
# Test that the notes were added correctly.
with self.assertQueryCount(1):
self.assertNotes(gargie.altnotes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(huey.altnotes, [1, 3, 5])
with self.assertQueryCount(1):
self.assertNotes(mickey.altnotes, [1, 4])
with self.assertQueryCount(1):
self.assertNotes(zaizee.altnotes, [1, 2, 3, 4, 5])
# Test removing notes.
with self.assertQueryCount(1):
gargie.altnotes.remove(n1)
self.assertNotes(gargie.altnotes, [2])
with self.assertQueryCount(1):
huey.altnotes.remove([n1, n2, n3])
self.assertNotes(huey.altnotes, [5])
with self.assertQueryCount(1):
sq = (AltNote
.select()
.where(fn.SUBSTR(AltNote.text, 6, 1) << ['1', '2', '4']))
zaizee.altnotes.remove(sq)
self.assertNotes(zaizee.altnotes, [3, 5])
# Test the backside of the relationship.
n1.users = User.select().where(User.username != 'gargie')
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['huey', 'mickey', 'zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['gargie'])
with self.assertQueryCount(1):
self.assertUsers(n3.users, ['zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n4.users, ['mickey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['huey', 'zaizee'])
with self.assertQueryCount(1):
n1.users.remove(User.select())
with self.assertQueryCount(1):
n5.users.remove([gargie, huey])
with self.assertQueryCount(1):
self.assertUsers(n1.users, [])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
| TestManyToMany |
python | readthedocs__readthedocs.org | readthedocs/redirects/migrations/0005_allow_to_force_redirects.py | {
"start": 149,
"end": 657
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("redirects", "0004_denormalize-from-url"),
]
operations = [
migrations.AddField(
model_name="redirect",
name="force",
field=models.BooleanField(
default=False,
help_text="Apply the redirect even if the page exists.",
null=True,
verbose_name="Force redirect",
),
),
]
| Migration |
python | walkccc__LeetCode | solutions/2262. Total Appeal of A String/2262.py | {
"start": 0,
"end": 486
} | class ____:
def appealSum(self, s: str) -> int:
ans = 0
# the total appeal of all substrings ending in the index so far
dp = 0
lastSeen = {}
for i, c in enumerate(s):
# the total appeal of all substrings ending in s[i]
# = the total appeal of all substrings ending in s[i - 1]
# + the number of substrings ending in s[i] that contain only this s[i]
dp += i - lastSeen.get(c, -1)
ans += dp
lastSeen[c] = i
return ans
| Solution |
python | gevent__gevent | src/greentest/3.9/test_asyncore.py | {
"start": 891,
"end": 2290
} | class ____:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
support.unlink(addr)
socket_helper.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
| crashingdummy |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/sns.py | {
"start": 2351,
"end": 6046
} | class ____(AwsBaseHook):
"""
Interact with Amazon Simple Notification Service.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("sns") <SNS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="sns", *args, **kwargs)
def publish_to_target(
self,
target_arn: str,
message: str,
subject: str | None = None,
message_attributes: dict | None = None,
message_deduplication_id: str | None = None,
message_group_id: str | None = None,
):
"""
Publish a message to a SNS topic or an endpoint.
.. seealso::
- :external+boto3:py:meth:`SNS.Client.publish`
:param target_arn: either a TopicArn or an EndpointArn
:param message: the default message you want to send
:param subject: subject of message
:param message_attributes: additional attributes to publish for message filtering. This should be
a flat dict; the DataType to be sent depends on the type of the value:
- bytes = Binary
- str = String
- int, float = Number
- iterable = String.Array
:param message_deduplication_id: Every message must have a unique message_deduplication_id.
This parameter applies only to FIFO (first-in-first-out) topics.
:param message_group_id: Tag that specifies that a message belongs to a specific message group.
This parameter applies only to FIFO (first-in-first-out) topics.
"""
return self.get_conn().publish(
**_build_publish_kwargs(
target_arn, message, subject, message_attributes, message_deduplication_id, message_group_id
)
)
async def apublish_to_target(
self,
target_arn: str,
message: str,
subject: str | None = None,
message_attributes: dict | None = None,
message_deduplication_id: str | None = None,
message_group_id: str | None = None,
):
"""
Publish a message to a SNS topic or an endpoint.
.. seealso::
- :external+boto3:py:meth:`SNS.Client.publish`
:param target_arn: either a TopicArn or an EndpointArn
:param message: the default message you want to send
:param subject: subject of message
:param message_attributes: additional attributes to publish for message filtering. This should be
a flat dict; the DataType to be sent depends on the type of the value:
- bytes = Binary
- str = String
- int, float = Number
- iterable = String.Array
:param message_deduplication_id: Every message must have a unique message_deduplication_id.
This parameter applies only to FIFO (first-in-first-out) topics.
:param message_group_id: Tag that specifies that a message belongs to a specific message group.
This parameter applies only to FIFO (first-in-first-out) topics.
"""
async with await self.get_async_conn() as async_client:
return await async_client.publish(
**_build_publish_kwargs(
target_arn,
message,
subject,
message_attributes,
message_deduplication_id,
message_group_id,
)
)
| SnsHook |
python | eth-brownie__brownie | brownie/network/gas/bases.py | {
"start": 2709,
"end": 3343
} | class ____(GasABC):
"""
Abstract base class for simple gas strategies.
Simple gas strategies are called once to provide a gas price
at the time a transaction is broadcasted. Transactions using simple
gas strategies are not automatically rebroadcasted.
Subclass from this ABC to implement your own simple gas strategy.
"""
@abstractmethod
def get_gas_price(self) -> int:
"""
Return the initial gas price for a transaction.
Returns
-------
int
Gas price, given as an integer in wei.
"""
raise NotImplementedError
| SimpleGasStrategy |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 2590,
"end": 5449
} | class ____(CreateEventTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.environment = self.create_environment(project=self.project)
self.event = self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name
)
self.event_two = self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name
)
self.expected = {
self.event.event_id: self.event,
self.event_two.event_id: self.event_two,
}
def test_basic(self) -> None:
event_ids: list[str] = [self.event.event_id, self.event_two.event_id]
result = bulk_fetch_events(event_ids, self.project.id)
assert len(result) == len(self.expected)
for expected, fetched in zip(self.expected, result):
assert expected == fetched
def test_empty_list(self) -> None:
event_ids: list[str] = []
result = bulk_fetch_events(event_ids, self.project.id)
assert len(result) == 0
def test_invalid_project(self) -> None:
event_ids: list[str] = [self.event.event_id, self.event_two.event_id]
result = bulk_fetch_events(event_ids, 0)
assert len(result) == 0
def test_with_invalid_event_ids(self) -> None:
event_ids: list[str] = ["-1", "0"]
result = bulk_fetch_events(event_ids, self.project.id)
assert len(result) == 0
def test_event_ids_with_mixed_validity(self) -> None:
event_ids: list[str] = ["-1", self.event.event_id, "0", self.event_two.event_id]
result = bulk_fetch_events(event_ids, self.project.id)
assert len(result) == len(self.expected)
for expected, fetched in zip(self.expected, result):
assert expected == fetched
@patch("sentry.rules.processing.delayed_processing.ConditionalRetryPolicy")
@patch("sentry.rules.processing.delayed_processing.EVENT_LIMIT", 2)
def test_more_than_limit_event_ids(self, mock_retry_policy) -> None:
"""
Test that when the number of event_ids exceeds the EVENT_LIMIT,
batches into groups based on the EVENT_LIMT, and then merges results.
"""
event_ids: list[str] = ["-1", self.event.event_id, "0", self.event_two.event_id]
mock_retry_instance = MagicMock()
mock_retry_policy.return_value = mock_retry_instance
def mock_return_value(lambda_func):
return lambda_func()
mock_retry_instance.side_effect = mock_return_value
results = bulk_fetch_events(event_ids, self.project.id)
assert mock_retry_instance.call_count == 2
assert len(results) == len(self.expected)
for expected, fetched in zip(self.expected, results):
assert expected == fetched
| BulkFetchEventsTest |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 34356,
"end": 34550
} | class ____(RootModel[Union[CanManage, CanManageRun, CanView]]):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(frozen=True)
| PermissionLevelForGroup |
python | openai__openai-python | src/openai/types/beta/thread_create_params.py | {
"start": 2209,
"end": 2417
} | class ____(TypedDict, total=False):
file_id: str
"""The ID of the file to attach to the message."""
tools: Iterable[MessageAttachmentTool]
"""The tools to add this file to."""
| MessageAttachment |
python | django__django | tests/model_forms/tests.py | {
"start": 3956,
"end": 4063
} | class ____(forms.ModelForm):
class Meta:
model = TextFile
fields = "__all__"
| TextFileForm |
python | doocs__leetcode | lcof/面试题64. 求1+2+…+n/Solution.py | {
"start": 0,
"end": 101
} | class ____:
def sumNums(self, n: int) -> int:
return n and (n + self.sumNums(n - 1))
| Solution |
python | getsentry__sentry | src/sentry/models/releases/util.py | {
"start": 787,
"end": 922
} | class ____(
namedtuple("SemverVersion", "major minor patch revision prerelease_case prerelease")
):
pass
@dataclass
| SemverVersion |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 48147,
"end": 50405
} | class ____:
"""Global info about a C string constant held by GlobalState.
"""
# cname string
# text EncodedString or BytesLiteral
# escaped_value str The string value as C code byte sequence.
# py_strings {(identifier, encoding) : PyStringConst}
# c_used boolean Is the plain C string used (or only the Python object?)
def __init__(self, cname, text, byte_string):
self.cname = cname
self.text = text
self.escaped_value = StringEncoding.escape_byte_string(byte_string)
self.py_strings = None
self.c_used = False
def get_py_string_const(self, encoding, identifier=None):
text = self.text
intern: cython.bint
is_unicode: cython.bint
if identifier or encoding is None:
# unicode string
encoding = encoding_key = None
is_unicode = True
else:
# bytes
is_unicode = False
encoding = encoding.lower()
if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
encoding = None
encoding_key = None
else:
encoding_key = ''.join(find_alphanums(encoding))
if identifier:
intern = True
elif identifier is None:
if isinstance(text, bytes):
intern = bool(possible_bytes_identifier(text))
else:
intern = bool(possible_unicode_identifier(text))
else:
intern = False
key = (intern, is_unicode, encoding_key)
if self.py_strings is None:
self.py_strings = {}
else:
try:
return self.py_strings[key]
except KeyError:
pass
pystring_cname = (
f"{Naming.interned_prefixes['str'] if intern else Naming.py_const_prefix}"
f"{'u' if is_unicode else 'b'}"
f"{'_' + encoding_key if encoding_key else ''}"
f"_{self.cname[len(Naming.const_prefix):]}"
)
py_string = PyStringConst(pystring_cname, encoding, intern, is_unicode)
self.py_strings[key] = py_string
return py_string
| StringConst |
python | qdrant__qdrant-client | qdrant_client/local/async_qdrant_local.py | {
"start": 1024,
"end": 38805
} | class ____(AsyncQdrantBase):
"""
Everything Qdrant server can do, but locally.
Use this implementation to run vector search without running a Qdrant server.
Everything that works with local Qdrant will work with server Qdrant as well.
Use for small-scale data, demos, and tests.
If you need more speed or size, use Qdrant server.
"""
LARGE_DATA_THRESHOLD = 20000
def __init__(self, location: str, force_disable_check_same_thread: bool = False) -> None:
"""
Initialize local Qdrant.
Args:
location: Where to store data. Can be a path to a directory or `:memory:` for in-memory storage.
force_disable_check_same_thread: Disable SQLite check_same_thread check. Use only if you know what you are doing.
"""
super().__init__()
self.force_disable_check_same_thread = force_disable_check_same_thread
self.location = location
self.persistent = location != ":memory:"
self.collections: dict[str, LocalCollection] = {}
self.aliases: dict[str, str] = {}
self._flock_file: Optional[TextIOWrapper] = None
self._load()
self._closed: bool = False
@property
def closed(self) -> bool:
return self._closed
async def close(self, **kwargs: Any) -> None:
self._closed = True
for collection in self.collections.values():
if collection is not None:
collection.close()
else:
show_warning(
message=f"Collection appears to be None before closing. The existing collections are: {list(self.collections.keys())}",
category=UserWarning,
stacklevel=4,
)
try:
if self._flock_file is not None and (not self._flock_file.closed):
portalocker.unlock(self._flock_file)
self._flock_file.close()
except TypeError:
pass
def _load(self) -> None:
deprecated_config_fields = ("init_from",)
if not self.persistent:
return
meta_path = os.path.join(self.location, META_INFO_FILENAME)
if not os.path.exists(meta_path):
os.makedirs(self.location, exist_ok=True)
with open(meta_path, "w") as f:
f.write(json.dumps({"collections": {}, "aliases": {}}))
else:
with open(meta_path, "r") as f:
meta = json.load(f)
for collection_name, config_json in meta["collections"].items():
for key in deprecated_config_fields:
config_json.pop(key, None)
config = rest_models.CreateCollection(**config_json)
collection_path = self._collection_path(collection_name)
collection = LocalCollection(
config,
collection_path,
force_disable_check_same_thread=self.force_disable_check_same_thread,
)
self.collections[collection_name] = collection
if len(collection.ids) > self.LARGE_DATA_THRESHOLD:
show_warning(
f"Local mode is not recommended for collections with more than {self.LARGE_DATA_THRESHOLD:,} points. Collection <{collection_name}> contains {len(collection.ids)} points. Consider using Qdrant in Docker or Qdrant Cloud for better performance with large datasets.",
category=UserWarning,
stacklevel=5,
)
self.aliases = meta["aliases"]
lock_file_path = os.path.join(self.location, ".lock")
if not os.path.exists(lock_file_path):
os.makedirs(self.location, exist_ok=True)
with open(lock_file_path, "w") as f:
f.write("tmp lock file")
self._flock_file = open(lock_file_path, "r+")
try:
portalocker.lock(
self._flock_file,
portalocker.LockFlags.EXCLUSIVE | portalocker.LockFlags.NON_BLOCKING,
)
except portalocker.exceptions.LockException:
raise RuntimeError(
f"Storage folder {self.location} is already accessed by another instance of Qdrant client. If you require concurrent access, use Qdrant server instead."
)
def _save(self) -> None:
if not self.persistent:
return
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
meta_path = os.path.join(self.location, META_INFO_FILENAME)
with open(meta_path, "w") as f:
f.write(
json.dumps(
{
"collections": {
collection_name: to_dict(collection.config)
for (collection_name, collection) in self.collections.items()
},
"aliases": self.aliases,
}
)
)
def _get_collection(self, collection_name: str) -> LocalCollection:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
if collection_name in self.collections:
return self.collections[collection_name]
if collection_name in self.aliases:
return self.collections[self.aliases[collection_name]]
raise ValueError(f"Collection {collection_name} not found")
def search(
self,
collection_name: str,
query_vector: Union[
types.NumpyArray,
Sequence[float],
tuple[str, list[float]],
types.NamedVector,
types.NamedSparseVector,
],
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> list[types.ScoredPoint]:
collection = self._get_collection(collection_name)
return collection.search(
query_vector=query_vector,
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
async def search_matrix_offsets(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
**kwargs: Any,
) -> types.SearchMatrixOffsetsResponse:
collection = self._get_collection(collection_name)
return collection.search_matrix_offsets(
query_filter=query_filter, limit=limit, sample=sample, using=using
)
async def search_matrix_pairs(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
**kwargs: Any,
) -> types.SearchMatrixPairsResponse:
collection = self._get_collection(collection_name)
return collection.search_matrix_pairs(
query_filter=query_filter, limit=limit, sample=sample, using=using
)
def _resolve_query_input(
self,
collection_name: str,
query: Optional[types.Query],
using: Optional[str],
lookup_from: Optional[types.LookupLocation],
) -> tuple[types.Query, set[types.PointId]]:
"""
Resolves any possible ids into vectors and returns a new query object, along with a set of the mentioned
point ids that should be filtered when searching.
"""
lookup_collection_name = lookup_from.collection if lookup_from else collection_name
collection = self._get_collection(lookup_collection_name)
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
vector_name = (
lookup_from.vector
if lookup_from is not None and lookup_from.vector is not None
else search_in_vector_name
)
sparse = vector_name in collection.sparse_vectors
multi = vector_name in collection.multivectors
if sparse:
collection_vectors = collection.sparse_vectors
elif multi:
collection_vectors = collection.multivectors
else:
collection_vectors = collection.vectors
mentioned_ids: set[types.PointId] = set()
def input_into_vector(vector_input: types.VectorInput) -> types.VectorInput:
if isinstance(vector_input, get_args(types.PointId)):
if isinstance(vector_input, uuid.UUID):
vector_input = str(vector_input)
point_id = vector_input
if point_id not in collection.ids:
raise ValueError(f"Point {point_id} is not found in the collection")
idx = collection.ids[point_id]
if vector_name in collection_vectors:
vec = collection_vectors[vector_name][idx]
else:
raise ValueError(f"Vector {vector_name} not found")
if isinstance(vec, np.ndarray):
vec = vec.tolist()
if collection_name == lookup_collection_name:
mentioned_ids.add(point_id)
return vec
else:
return vector_input
query = deepcopy(query)
if isinstance(query, rest_models.NearestQuery):
query.nearest = input_into_vector(query.nearest)
elif isinstance(query, rest_models.RecommendQuery):
if query.recommend.negative is not None:
query.recommend.negative = [
input_into_vector(vector_input) for vector_input in query.recommend.negative
]
if query.recommend.positive is not None:
query.recommend.positive = [
input_into_vector(vector_input) for vector_input in query.recommend.positive
]
elif isinstance(query, rest_models.DiscoverQuery):
query.discover.target = input_into_vector(query.discover.target)
pairs = (
query.discover.context
if isinstance(query.discover.context, list)
else [query.discover.context]
)
query.discover.context = [
rest_models.ContextPair(
positive=input_into_vector(pair.positive),
negative=input_into_vector(pair.negative),
)
for pair in pairs
]
elif isinstance(query, rest_models.ContextQuery):
pairs = query.context if isinstance(query.context, list) else [query.context]
query.context = [
rest_models.ContextPair(
positive=input_into_vector(pair.positive),
negative=input_into_vector(pair.negative),
)
for pair in pairs
]
elif isinstance(query, rest_models.OrderByQuery):
pass
elif isinstance(query, rest_models.FusionQuery):
pass
elif isinstance(query, rest_models.RrfQuery):
pass
return (query, mentioned_ids)
def _resolve_prefetches_input(
self,
prefetch: Optional[Union[Sequence[types.Prefetch], types.Prefetch]],
collection_name: str,
) -> list[types.Prefetch]:
if prefetch is None:
return []
if isinstance(prefetch, list) and len(prefetch) == 0:
return []
prefetches = []
if isinstance(prefetch, types.Prefetch):
prefetches = [prefetch]
prefetches.extend(
prefetch.prefetch if isinstance(prefetch.prefetch, list) else [prefetch.prefetch]
)
elif isinstance(prefetch, Sequence):
prefetches = list(prefetch)
return [
self._resolve_prefetch_input(prefetch, collection_name)
for prefetch in prefetches
if prefetch is not None
]
def _resolve_prefetch_input(
self, prefetch: types.Prefetch, collection_name: str
) -> types.Prefetch:
if prefetch.query is None:
return prefetch
prefetch = deepcopy(prefetch)
(query, mentioned_ids) = self._resolve_query_input(
collection_name, prefetch.query, prefetch.using, prefetch.lookup_from
)
prefetch.query = query
prefetch.filter = ignore_mentioned_ids_filter(prefetch.filter, list(mentioned_ids))
prefetch.prefetch = self._resolve_prefetches_input(prefetch.prefetch, collection_name)
return prefetch
async def query_points(
self,
collection_name: str,
query: Optional[types.Query] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
lookup_from: Optional[types.LookupLocation] = None,
**kwargs: Any,
) -> types.QueryResponse:
collection = self._get_collection(collection_name)
if query is not None:
(query, mentioned_ids) = self._resolve_query_input(
collection_name, query, using, lookup_from
)
query_filter = ignore_mentioned_ids_filter(query_filter, list(mentioned_ids))
prefetch = self._resolve_prefetches_input(prefetch, collection_name)
return collection.query_points(
query=query,
prefetch=prefetch,
query_filter=query_filter,
using=using,
score_threshold=score_threshold,
limit=limit,
offset=offset or 0,
with_payload=with_payload,
with_vectors=with_vectors,
)
async def query_batch_points(
self, collection_name: str, requests: Sequence[types.QueryRequest], **kwargs: Any
) -> list[types.QueryResponse]:
return [
await self.query_points(
collection_name=collection_name,
query=request.query,
prefetch=request.prefetch,
query_filter=request.filter,
limit=request.limit or 10,
offset=request.offset,
with_payload=request.with_payload,
with_vectors=request.with_vector,
score_threshold=request.score_threshold,
using=request.using,
lookup_from=request.lookup_from,
)
for request in requests
]
async def query_points_groups(
self,
collection_name: str,
group_by: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
group_size: int = 3,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
lookup_from: Optional[types.LookupLocation] = None,
**kwargs: Any,
) -> types.GroupsResult:
collection = self._get_collection(collection_name)
if query is not None:
(query, mentioned_ids) = self._resolve_query_input(
collection_name, query, using, lookup_from
)
query_filter = ignore_mentioned_ids_filter(query_filter, list(mentioned_ids))
with_lookup_collection = None
if with_lookup is not None:
if isinstance(with_lookup, str):
with_lookup_collection = self._get_collection(with_lookup)
else:
with_lookup_collection = self._get_collection(with_lookup.collection)
return collection.query_groups(
query=query,
query_filter=query_filter,
using=using,
prefetch=prefetch,
limit=limit,
group_by=group_by,
group_size=group_size,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
with_lookup=with_lookup,
with_lookup_collection=with_lookup_collection,
)
async def scroll(
self,
collection_name: str,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
order_by: Optional[types.OrderBy] = None,
offset: Optional[types.PointId] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
**kwargs: Any,
) -> tuple[list[types.Record], Optional[types.PointId]]:
collection = self._get_collection(collection_name)
return collection.scroll(
scroll_filter=scroll_filter,
limit=limit,
order_by=order_by,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
)
async def count(
self,
collection_name: str,
count_filter: Optional[types.Filter] = None,
exact: bool = True,
**kwargs: Any,
) -> types.CountResult:
collection = self._get_collection(collection_name)
return collection.count(count_filter=count_filter)
async def facet(
self,
collection_name: str,
key: str,
facet_filter: Optional[types.Filter] = None,
limit: int = 10,
exact: bool = False,
**kwargs: Any,
) -> types.FacetResponse:
collection = self._get_collection(collection_name)
return collection.facet(key=key, facet_filter=facet_filter, limit=limit)
async def upsert(
self,
collection_name: str,
points: types.Points,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.upsert(points, update_filter=update_filter)
return self._default_update_result()
async def update_vectors(
self,
collection_name: str,
points: Sequence[types.PointVectors],
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.update_vectors(points, update_filter=update_filter)
return self._default_update_result()
async def delete_vectors(
self,
collection_name: str,
vectors: Sequence[str],
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.delete_vectors(vectors, points)
return self._default_update_result()
async def retrieve(
self,
collection_name: str,
ids: Sequence[types.PointId],
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
**kwargs: Any,
) -> list[types.Record]:
collection = self._get_collection(collection_name)
return collection.retrieve(ids, with_payload, with_vectors)
@classmethod
def _default_update_result(cls, operation_id: int = 0) -> types.UpdateResult:
return types.UpdateResult(
operation_id=operation_id, status=rest_models.UpdateStatus.COMPLETED
)
async def delete(
self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.delete(points_selector)
return self._default_update_result()
async def set_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
key: Optional[str] = None,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.set_payload(payload=payload, selector=points, key=key)
return self._default_update_result()
async def overwrite_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.overwrite_payload(payload=payload, selector=points)
return self._default_update_result()
async def delete_payload(
self,
collection_name: str,
keys: Sequence[str],
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.delete_payload(keys=keys, selector=points)
return self._default_update_result()
async def clear_payload(
self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any
) -> types.UpdateResult:
collection = self._get_collection(collection_name)
collection.clear_payload(selector=points_selector)
return self._default_update_result()
async def batch_update_points(
self,
collection_name: str,
update_operations: Sequence[types.UpdateOperation],
**kwargs: Any,
) -> list[types.UpdateResult]:
collection = self._get_collection(collection_name)
collection.batch_update_points(update_operations)
return [self._default_update_result()] * len(update_operations)
async def update_collection_aliases(
self, change_aliases_operations: Sequence[types.AliasOperations], **kwargs: Any
) -> bool:
for operation in change_aliases_operations:
if isinstance(operation, rest_models.CreateAliasOperation):
self._get_collection(operation.create_alias.collection_name)
self.aliases[operation.create_alias.alias_name] = (
operation.create_alias.collection_name
)
elif isinstance(operation, rest_models.DeleteAliasOperation):
self.aliases.pop(operation.delete_alias.alias_name, None)
elif isinstance(operation, rest_models.RenameAliasOperation):
new_name = operation.rename_alias.new_alias_name
old_name = operation.rename_alias.old_alias_name
self.aliases[new_name] = self.aliases.pop(old_name)
else:
raise ValueError(f"Unknown operation: {operation}")
self._save()
return True
async def get_collection_aliases(
self, collection_name: str, **kwargs: Any
) -> types.CollectionsAliasesResponse:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
return types.CollectionsAliasesResponse(
aliases=[
rest_models.AliasDescription(alias_name=alias_name, collection_name=name)
for (alias_name, name) in self.aliases.items()
if name == collection_name
]
)
async def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
return types.CollectionsAliasesResponse(
aliases=[
rest_models.AliasDescription(alias_name=alias_name, collection_name=name)
for (alias_name, name) in self.aliases.items()
]
)
async def get_collections(self, **kwargs: Any) -> types.CollectionsResponse:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
return types.CollectionsResponse(
collections=[
rest_models.CollectionDescription(name=name)
for (name, _) in self.collections.items()
]
)
async def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo:
collection = self._get_collection(collection_name)
return collection.info()
async def collection_exists(self, collection_name: str, **kwargs: Any) -> bool:
try:
self._get_collection(collection_name)
return True
except ValueError:
return False
async def update_collection(
self,
collection_name: str,
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
_collection = self._get_collection(collection_name)
updated = False
if sparse_vectors_config is not None:
for vector_name, vector_params in sparse_vectors_config.items():
_collection.update_sparse_vectors_config(vector_name, vector_params)
updated = True
if metadata is not None:
if _collection.config.metadata is not None:
_collection.config.metadata.update(metadata)
else:
_collection.config.metadata = deepcopy(metadata)
updated = True
self._save()
return updated
def _collection_path(self, collection_name: str) -> Optional[str]:
if self.persistent:
return os.path.join(self.location, "collection", collection_name)
else:
return None
async def delete_collection(self, collection_name: str, **kwargs: Any) -> bool:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
_collection = self.collections.pop(collection_name, None)
del _collection
self.aliases = {
alias_name: name
for (alias_name, name) in self.aliases.items()
if name != collection_name
}
collection_path = self._collection_path(collection_name)
if collection_path is not None:
shutil.rmtree(collection_path, ignore_errors=True)
self._save()
return True
async def create_collection(
self,
collection_name: str,
vectors_config: Optional[
Union[types.VectorParams, Mapping[str, types.VectorParams]]
] = None,
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
if self.closed:
raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.")
if collection_name in self.collections:
raise ValueError(f"Collection {collection_name} already exists")
collection_path = self._collection_path(collection_name)
if collection_path is not None:
os.makedirs(collection_path, exist_ok=True)
collection = LocalCollection(
rest_models.CreateCollection(
vectors=vectors_config or {},
sparse_vectors=sparse_vectors_config,
metadata=deepcopy(metadata),
),
location=collection_path,
force_disable_check_same_thread=self.force_disable_check_same_thread,
)
self.collections[collection_name] = collection
self._save()
return True
async def recreate_collection(
self,
collection_name: str,
vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]],
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
await self.delete_collection(collection_name)
return await self.create_collection(
collection_name, vectors_config, sparse_vectors_config, metadata=metadata
)
def upload_points(
self,
collection_name: str,
points: Iterable[types.PointStruct],
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> None:
self._upload_points(collection_name, points, update_filter=update_filter)
def _upload_points(
self,
collection_name: str,
points: Iterable[Union[types.PointStruct, types.Record]],
update_filter: Optional[types.Filter] = None,
) -> None:
collection = self._get_collection(collection_name)
collection.upsert(
[
rest_models.PointStruct(
id=point.id, vector=point.vector or {}, payload=point.payload or {}
)
for point in points
],
update_filter=update_filter,
)
def upload_collection(
self,
collection_name: str,
vectors: Union[
dict[str, types.NumpyArray], types.NumpyArray, Iterable[types.VectorStruct]
],
payload: Optional[Iterable[dict[Any, Any]]] = None,
ids: Optional[Iterable[types.PointId]] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> None:
def uuid_generator() -> Generator[str, None, None]:
while True:
yield str(uuid4())
collection = self._get_collection(collection_name)
if isinstance(vectors, dict) and any(
(isinstance(v, np.ndarray) for v in vectors.values())
):
assert (
len(set([arr.shape[0] for arr in vectors.values()])) == 1
), "Each named vector should have the same number of vectors"
num_vectors = next(iter(vectors.values())).shape[0]
vectors = [
{name: vectors[name][i].tolist() for name in vectors.keys()}
for i in range(num_vectors)
]
collection.upsert(
[
rest_models.PointStruct(
id=str(point_id) if isinstance(point_id, uuid.UUID) else point_id,
vector=(vector.tolist() if isinstance(vector, np.ndarray) else vector) or {},
payload=payload or {},
)
for (point_id, vector, payload) in zip(
ids or uuid_generator(), iter(vectors), payload or itertools.cycle([{}])
)
],
update_filter=update_filter,
)
async def create_payload_index(
self,
collection_name: str,
field_name: str,
field_schema: Optional[types.PayloadSchemaType] = None,
field_type: Optional[types.PayloadSchemaType] = None,
**kwargs: Any,
) -> types.UpdateResult:
show_warning_once(
message="Payload indexes have no effect in the local Qdrant. Please use server Qdrant if you need payload indexes.",
category=UserWarning,
idx="create-local-payload-indexes",
stacklevel=5,
)
return self._default_update_result()
async def delete_payload_index(
self, collection_name: str, field_name: str, **kwargs: Any
) -> types.UpdateResult:
show_warning_once(
message="Payload indexes have no effect in the local Qdrant. Please use server Qdrant if you need payload indexes.",
category=UserWarning,
idx="delete-local-payload-indexes",
stacklevel=5,
)
return self._default_update_result()
async def list_snapshots(
self, collection_name: str, **kwargs: Any
) -> list[types.SnapshotDescription]:
return []
async def create_snapshot(
self, collection_name: str, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots."
)
async def delete_snapshot(
self, collection_name: str, snapshot_name: str, **kwargs: Any
) -> bool:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots."
)
async def list_full_snapshots(self, **kwargs: Any) -> list[types.SnapshotDescription]:
return []
async def create_full_snapshot(self, **kwargs: Any) -> types.SnapshotDescription:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots."
)
async def delete_full_snapshot(self, snapshot_name: str, **kwargs: Any) -> bool:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots."
)
async def recover_snapshot(self, collection_name: str, location: str, **kwargs: Any) -> bool:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots."
)
async def list_shard_snapshots(
self, collection_name: str, shard_id: int, **kwargs: Any
) -> list[types.SnapshotDescription]:
return []
async def create_shard_snapshot(
self, collection_name: str, shard_id: int, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need snapshots."
)
async def delete_shard_snapshot(
self, collection_name: str, shard_id: int, snapshot_name: str, **kwargs: Any
) -> bool:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need snapshots."
)
async def recover_shard_snapshot(
self, collection_name: str, shard_id: int, location: str, **kwargs: Any
) -> bool:
raise NotImplementedError(
"Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need snapshots."
)
async def create_shard_key(
self,
collection_name: str,
shard_key: types.ShardKey,
shards_number: Optional[int] = None,
replication_factor: Optional[int] = None,
placement: Optional[list[int]] = None,
**kwargs: Any,
) -> bool:
raise NotImplementedError(
"Sharding is not supported in the local Qdrant. Please use server Qdrant if you need sharding."
)
async def delete_shard_key(
self, collection_name: str, shard_key: types.ShardKey, **kwargs: Any
) -> bool:
raise NotImplementedError(
"Sharding is not supported in the local Qdrant. Please use server Qdrant if you need sharding."
)
async def info(self) -> types.VersionInfo:
version = importlib.metadata.version("qdrant-client")
return rest_models.VersionInfo(
title="qdrant - vector search engine", version=version, commit=None
)
async def cluster_collection_update(
self, collection_name: str, cluster_operation: types.ClusterOperations, **kwargs: Any
) -> bool:
raise NotImplementedError(
"Cluster collection update is not supported in the local Qdrant. Please use server Qdrant if you need a cluster"
)
async def collection_cluster_info(self, collection_name: str) -> types.CollectionClusterInfo:
raise NotImplementedError(
"Collection cluster info is not supported in the local Qdrant. Please use server Qdrant if you need a cluster"
)
async def cluster_status(self) -> types.ClusterStatus:
raise NotImplementedError(
"Cluster status is not supported in the local Qdrant. Please use server Qdrant if you need a cluster"
)
async def recover_current_peer(self) -> bool:
raise NotImplementedError(
"Recover current peer is not supported in the local Qdrant. Please use server Qdrant if you need a cluster"
)
async def remove_peer(self, peer_id: int, **kwargs: Any) -> bool:
raise NotImplementedError(
"Remove peer info is not supported in the local Qdrant. Please use server Qdrant if you need a cluster"
)
| AsyncQdrantLocal |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 15211,
"end": 15349
} | class ____(NamedTuple):
host: str
port: int
def __str__(self) -> str:
return f'{self.host}:{self.port}'
| _IpAddressValue |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 72043,
"end": 76849
} | class ____(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.session(graph=ops.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops_stack.stack([input_[0] for input_ in inputs]),
array_ops_stack.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
single_input: input_value
})
outputs_static_v = sess.run(
outputs_static, feed_dict={
single_input: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
single_input_using_dim: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
single_input: input_value
})
state_static_v = sess.run(
state_static, feed_dict={
single_input: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
single_input_using_dim: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
single_input_using_dim: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
| NestedLSTMTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_tasks.py | {
"start": 8623,
"end": 9766
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_create_task(self, mock_hook):
mock_hook.return_value.create_task.return_value = TEST_TASK
operator = CloudTasksTaskCreateOperator(
location=LOCATION, queue_name=QUEUE_ID, task=Task(), task_id="id"
)
result = operator.execute(context=mock.MagicMock())
assert result == {
"app_engine_http_request": {"body": "", "headers": {}, "http_method": 0, "relative_uri": ""},
"dispatch_count": 0,
"name": "",
"response_count": 0,
"view": 0,
}
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_task.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
task=Task(),
project_id=None,
task_name=None,
response_view=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudTasksTaskCreate |
python | davidhalter__jedi | test/completion/inheritance.py | {
"start": 450,
"end": 1069
} | class ____:
class Test2:
def __init__(self):
self.foo_nested = 0
#? ['foo_nested']
self.foo_
#?
self.foo_here
def __init__(self, self2):
self.foo_here = 3
#? ['foo_here', 'foo_in_func']
self.foo_
#? int()
self.foo_here
#?
self.foo_nested
#?
self.foo_not_on_self
#? float()
self.foo_in_func
self2.foo_on_second = ''
def closure():
self.foo_in_func = 4.
def bar(self):
self = 3
self.foo_not_on_self = 3
| Test1 |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 405,
"end": 772
} | class ____(BaseModel):
submodel: Optional['SelfReferencingModel']
@property
def prop(self) -> None:
...
SelfReferencingModel.model_rebuild()
model = Model(x=1, y='y')
Model(x=1, y='y', z='z')
model.x = 2
model.model_validate(model)
self_referencing_model = SelfReferencingModel(submodel=SelfReferencingModel(submodel=None))
| SelfReferencingModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_optimize09.py | {
"start": 315,
"end": 898
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
smiley = "\u263a"
worksheet.write("A1", smiley)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner/exec_code.py | {
"start": 1333,
"end": 5799
} | class ____: # noqa: N801
"""A context for prepending a directory to sys.path for a second.
Code inspired by IPython:
Source: https://github.com/ipython/ipython/blob/master/IPython/utils/syspathcontext.py#L42
"""
def __init__(self, main_script_path: str) -> None:
self._main_script_path = main_script_path
self._added_path = False
def __repr__(self) -> str:
return util.repr_(self)
def __enter__(self) -> None:
if self._main_script_path not in sys.path:
sys.path.insert(0, self._main_script_path)
self._added_path = True
def __exit__(
self,
typ: type[BaseException] | None,
exc: BaseException | None,
tb: TracebackType | None,
) -> Literal[False]:
if self._added_path:
try:
sys.path.remove(self._main_script_path)
except ValueError:
# It's already removed.
pass
# Returning False causes any exceptions to be re-raised.
return False
def exec_func_with_error_handling(
func: Callable[[], Any], ctx: ScriptRunContext
) -> tuple[
Any | None,
bool,
RerunData | None,
bool,
Exception | None,
]:
"""Execute the passed function wrapped in a try/except block.
This function is called by the script runner to execute the user's script or
fragment reruns, but also for the execution of fragment code in context of a normal
app run. This wrapper ensures that handle_uncaught_exception messages show up in the
correct context.
Parameters
----------
func : callable
The function to execute wrapped in the try/except block.
ctx : ScriptRunContext
The context in which the script is being run.
Returns
-------
tuple
A tuple containing:
- The result of the passed function.
- A boolean indicating whether the script ran without errors (RerunException and
StopException don't count as errors).
- The RerunData instance belonging to a RerunException if the script was
interrupted by a RerunException.
- A boolean indicating whether the script was stopped prematurely (False for
RerunExceptions, True for all other exceptions).
- The uncaught exception if one occurred, None otherwise
"""
run_without_errors = True
# This will be set to a RerunData instance if our execution
# is interrupted by a RerunException.
rerun_exception_data: RerunData | None = None
# If the script stops early, we don't want to remove unseen widgets,
# so we track this to potentially skip session state cleanup later.
premature_stop: bool = False
# The result of the passed function
result: Any | None = None
# The uncaught exception if one occurred, None otherwise
uncaught_exception: Exception | None = None
try:
result = func()
except RerunException as e:
rerun_exception_data = e.rerun_data
# Since the script is about to rerun, we may need to reset our cursors/dg_stack
# so that we write to the right place in the app. For full script runs, this
# needs to happen in case the same thread reruns our script (a different thread
# would automatically come with fresh cursors/dg_stack values). For fragments,
# it doesn't matter either way since the fragment resets these values from its
# snapshot before execution.
ctx.cursors.clear()
context_dg_stack.set(get_default_dg_stack_value())
# Interruption due to a rerun is usually from `st.rerun()`, which
# we want to count as a script completion so triggers reset.
# It is also possible for this to happen if fast reruns is off,
# but this is very rare.
premature_stop = False
except StopException:
# This is thrown when the script executes `st.stop()`.
# We don't have to do anything here.
premature_stop = True
except FragmentHandledException:
run_without_errors = False
premature_stop = True
except Exception as ex:
run_without_errors = False
premature_stop = True
handle_uncaught_app_exception(ex)
uncaught_exception = ex
return (
result,
run_without_errors,
rerun_exception_data,
premature_stop,
uncaught_exception,
)
| modified_sys_path |
python | ipython__ipython | docs/autogen_shortcuts.py | {
"start": 982,
"end": 3064
} | class ____(Filter):
"""Protocol reflecting non-public prompt_toolkit's `_Invert`."""
filter: Filter
conjunctions_labels = {"_AndList": "&", "_OrList": "|"}
ATOMIC_CLASSES = {"Never", "Always", "Condition"}
HUMAN_NAMES_FOR_FILTERS = {
filter_: name for name, filter_ in KEYBINDING_FILTERS.items()
}
def format_filter(
filter_: Union[Filter, _NestedFilter, Condition, _Invert],
is_top_level=True,
skip=None,
) -> str:
"""Create easily readable description of the filter."""
s = filter_.__class__.__name__
if s == "Condition":
func = cast(Condition, filter_).func
if filter_ in HUMAN_NAMES_FOR_FILTERS:
return HUMAN_NAMES_FOR_FILTERS[filter_]
name = func.__name__
if name == "<lambda>":
source = getsource(func)
return source.split("=")[0].strip()
return func.__name__
elif s == "_Invert":
operand = cast(_Invert, filter_).filter
if operand.__class__.__name__ in ATOMIC_CLASSES:
return f"~{format_filter(operand, is_top_level=False)}"
return f"~({format_filter(operand, is_top_level=False)})"
elif s in conjunctions_labels:
filters = cast(_NestedFilter, filter_).filters
if filter_ in HUMAN_NAMES_FOR_FILTERS:
return HUMAN_NAMES_FOR_FILTERS[filter_]
conjunction = conjunctions_labels[s]
glue = f" {conjunction} "
result = glue.join(format_filter(x, is_top_level=False) for x in filters)
if len(filters) > 1 and not is_top_level:
result = f"({result})"
return result
elif s in ["Never", "Always"]:
return s.lower()
elif s == "PassThrough":
return "pass_through"
else:
raise ValueError(f"Unknown filter type: {filter_}")
def sentencize(s) -> str:
"""Extract first sentence"""
s = re.split(r"\.\W", s.replace("\n", " ").strip())
s = s[0] if len(s) else ""
if not s.endswith("."):
s += "."
try:
return " ".join(s.split())
except AttributeError:
return s
| _Invert |
python | pytorch__pytorch | test/functorch/test_ac_knapsack.py | {
"start": 13801,
"end": 16657
} | class ____(TestCase):
def setUp(self):
# (memory, runtime, max_memory, expected_runtime, expected_saved, expected_recomputable)
self.test_cases = [
([2, 3, 2, 4, 1], [1, 2, 1, 3, 2], 5, 5.0, [3, 4], [2, 1, 0]),
([1, 1, 1], [1, 2, 3], 3, 6.0, [0, 1, 2], []),
([10, 20, 30], [1, 2, 3], 5, 0.0, [], [2, 1, 0]),
([1, 2, 3], [10, 20, 30], 1, 10.0, [0], [2, 1]),
([1, 1, 1], [2, 2, 2], 2, 4.0, [0, 1], [2]),
([0, 2, 3], [5, 2, 3], 5, 10.0, [0, 1, 2], []),
([1, 2, 3], [0, 2, 3], 3, 3.0, [2], [0, 1]),
([100, 200, 300], [1000, 2000, 3000], 500, 5000.0, [1, 2], [0]),
([0.5, 1.5, 2.0], [1.0, 2.0, 3.0], 2.0, 3.0, [1, 0], [2]),
([], [], 10, 0.0, [], []),
([1, 2, 3], [1, 2, 3], 0, 0.0, [], [2, 1, 0]),
([0, 0, 0], [1, 2, 3], 0, 6.0, [0, 1, 2], []),
([1, 2, 3], [0, 0, 0], 6, 0.0, [], [2, 1, 0]),
]
def _run_knapsack_and_check(
self,
func,
memory,
runtime,
max_memory,
expected_runtime,
expected_saved,
expected_recomputable,
):
result_runtime, result_saved, result_recomputable = func(
memory, runtime, max_memory
)
self.assertEqual(result_runtime, expected_runtime)
self.assertEqual(sorted(result_saved), sorted(expected_saved))
self.assertEqual(sorted(result_recomputable), sorted(expected_recomputable))
def test_dp_knapsack(self):
for i, (
memory,
runtime,
max_memory,
expected_runtime,
expected_saved,
expected_recomputable,
) in enumerate(self.test_cases):
with self.subTest(f"dp_knapsack_case_{i}"):
self._run_knapsack_and_check(
dp_knapsack,
memory,
runtime,
max_memory,
expected_runtime,
expected_saved,
expected_recomputable,
)
def test_dp_knapsack_sliding_hirschberg(self):
for i, (
memory,
runtime,
max_memory,
expected_runtime,
expected_saved,
expected_recomputable,
) in enumerate(self.test_cases):
with self.subTest(f"dp_knapsack_sliding_hirschberg_case_{i}"):
self._run_knapsack_and_check(
dp_knapsack_sliding_hirschberg,
memory,
runtime,
max_memory,
expected_runtime,
expected_saved,
expected_recomputable,
)
if __name__ == "__main__":
run_tests()
| TestActivationCheckpointingKnapsack |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dynamic.py | {
"start": 1561,
"end": 2371
} | class ____(WriteOnlyHistory[_T]):
def __init__(
self,
attr: _DynamicAttributeImpl,
state: InstanceState[_T],
passive: PassiveFlag,
apply_to: Optional[DynamicCollectionHistory[_T]] = None,
) -> None:
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
| DynamicCollectionHistory |
python | pexpect__pexpect | tests/test_pxssh.py | {
"start": 882,
"end": 12126
} | class ____(SSHTestBase):
def test_fake_ssh(self):
ssh = pxssh.pxssh()
#ssh.logfile_read = sys.stdout # DEBUG
ssh.login('server', 'me', password='s3cret')
ssh.sendline('ping')
ssh.expect('pong', timeout=10)
assert ssh.prompt(timeout=10)
ssh.logout()
def test_wrong_pw(self):
ssh = pxssh.pxssh()
try:
ssh.login('server', 'me', password='wr0ng')
except pxssh.ExceptionPxssh:
pass
else:
assert False, 'Password should have been refused'
def test_failed_set_unique_prompt(self):
ssh = pxssh.pxssh()
ssh.set_unique_prompt = lambda: False
try:
ssh.login('server', 'me', password='s3cret',
auto_prompt_reset=True)
except pxssh.ExceptionPxssh:
pass
else:
assert False, 'should have raised exception, pxssh.ExceptionPxssh'
def test_connection_refused(self):
ssh = pxssh.pxssh()
try:
ssh.login('noserver', 'me', password='s3cret')
except pxssh.ExceptionPxssh:
pass
else:
assert False, 'should have raised exception, pxssh.ExceptionPxssh'
def test_ssh_tunnel_string(self):
ssh = pxssh.pxssh(debug_command_string=True)
tunnels = { 'local': ['2424:localhost:22'],'remote': ['2525:localhost:22'],
'dynamic': [8888] }
confirmation_strings = 0
confirmation_array = ['-R 2525:localhost:22','-L 2424:localhost:22','-D 8888']
string = ssh.login('server', 'me', password='s3cret', ssh_tunnels=tunnels)
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated from tunneling is incorrect.'
def test_remote_ssh_tunnel_string(self):
ssh = pxssh.pxssh(debug_command_string=True)
tunnels = { 'local': ['2424:localhost:22'],'remote': ['2525:localhost:22'],
'dynamic': [8888] }
confirmation_strings = 0
confirmation_array = ['-R 2525:localhost:22','-L 2424:localhost:22','-D 8888']
string = ssh.login('server', 'me', password='s3cret', ssh_tunnels=tunnels, spawn_local_ssh=False)
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated from remote tunneling is incorrect.'
def test_ssh_config_passing_string(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
string = ssh.login('server', 'me', password='s3cret', spawn_local_ssh=False, ssh_config=config_path)
if not '-F '+config_path in string:
assert False, 'String generated from SSH config passing is incorrect.'
def test_username_or_ssh_config(self):
try:
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
string = ssh.login('server')
raise AssertionError('Should have failed due to missing username and missing ssh_config.')
except TypeError:
pass
def test_ssh_config_user(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
temp_file.write(b'HosT server\n'
b'UsEr me\n'
b'hOSt not-server\n')
temp_file.seek(0)
string = ssh.login('server', ssh_config=config_path)
def test_ssh_config_no_username_empty_config(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
try:
string = ssh.login('server', ssh_config=config_path)
raise AssertionError('Should have failed due to no Host.')
except TypeError:
pass
def test_ssh_config_wrong_Host(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
temp_file.write(b'Host not-server\n'
b'Host also-not-server\n')
temp_file.seek(0)
try:
string = ssh.login('server', ssh_config=config_path)
raise AssertionError('Should have failed due to no matching Host.')
except TypeError:
pass
def test_ssh_config_no_user(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
temp_file.write(b'Host server\n'
b'Host not-server\n')
temp_file.seek(0)
try:
string = ssh.login('server', ssh_config=config_path)
raise AssertionError('Should have failed due to no user.')
except TypeError:
pass
def test_ssh_config_empty_user(self):
ssh = pxssh.pxssh(debug_command_string=True)
temp_file = tempfile.NamedTemporaryFile()
config_path = temp_file.name
temp_file.write(b'Host server\n'
b'user \n'
b'Host not-server\n')
temp_file.seek(0)
try:
string = ssh.login('server', ssh_config=config_path)
raise AssertionError('Should have failed due to empty user.')
except TypeError:
pass
def test_ssh_key_string(self):
ssh = pxssh.pxssh(debug_command_string=True)
confirmation_strings = 0
confirmation_array = [' -A']
string = ssh.login('server', 'me', password='s3cret', ssh_key=True)
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated from forcing the SSH agent sock is incorrect.'
confirmation_strings = 0
temp_file = tempfile.NamedTemporaryFile()
ssh_key = temp_file.name
confirmation_array = [' -i '+ssh_key]
string = ssh.login('server', 'me', password='s3cret', ssh_key=ssh_key)
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated from adding an SSH key is incorrect.'
def test_custom_ssh_cmd_debug(self):
ssh = pxssh.pxssh(debug_command_string=True)
cipher_string = '-c aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,' \
+ 'aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,' \
+ 'aes256-cbc,arcfour'
confirmation_strings = 0
confirmation_array = [cipher_string, '-2']
string = ssh.login('server', 'me', password='s3cret', cmd='ssh ' + cipher_string + ' -2')
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated for custom ssh client command is incorrect.'
def test_custom_ssh_cmd_debug(self):
ssh = pxssh.pxssh(debug_command_string=True)
cipher_string = '-c aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,' \
+ 'aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,' \
+ 'aes256-cbc,arcfour'
confirmation_strings = 0
confirmation_array = [cipher_string, '-2']
string = ssh.login('server', 'me', password='s3cret', cmd='ssh ' + cipher_string + ' -2')
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated for custom ssh client command is incorrect.'
def test_failed_custom_ssh_cmd_debug(self):
ssh = pxssh.pxssh(debug_command_string=True)
cipher_string = '-c invalid_cipher'
confirmation_strings = 0
confirmation_array = [cipher_string, '-2']
string = ssh.login('server', 'me', password='s3cret', cmd='ssh ' + cipher_string + ' -2')
for confirmation in confirmation_array:
if confirmation in string:
confirmation_strings+=1
if confirmation_strings!=len(confirmation_array):
assert False, 'String generated for custom ssh client command is incorrect.'
def test_custom_ssh_cmd(self):
try:
ssh = pxssh.pxssh()
cipher_string = '-c aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,' \
+ 'aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,' \
+ 'aes256-cbc,arcfour'
result = ssh.login('server', 'me', password='s3cret', cmd='ssh ' + cipher_string + ' -2')
ssh.PROMPT = r'Closed connection'
ssh.sendline('exit')
ssh.prompt(timeout=5)
string = str(ssh.before) + str(ssh.after)
if 'Closed connection' not in string:
assert False, 'should have logged into Mock SSH client and exited'
except pxssh.ExceptionPxssh as e:
assert False, 'should not have raised exception, pxssh.ExceptionPxssh'
else:
pass
def test_failed_custom_ssh_cmd(self):
try:
ssh = pxssh.pxssh()
cipher_string = '-c invalid_cipher'
result = ssh.login('server', 'me', password='s3cret', cmd='ssh ' + cipher_string + ' -2')
ssh.PROMPT = r'Closed connection'
ssh.sendline('exit')
ssh.prompt(timeout=5)
string = str(ssh.before) + str(ssh.after)
if 'Closed connection' not in string:
assert False, 'should not have completed logging into Mock SSH client and exited'
except pxssh.ExceptionPxssh as e:
pass
else:
assert False, 'should have raised exception, pxssh.ExceptionPxssh'
def test_login_bash(self):
ssh = pxssh.pxssh()
result = ssh.login('server bash', 'me', password='s3cret')
ssh.sendline('ping')
ssh.expect('pong', timeout=10)
assert ssh.prompt(timeout=10)
ssh.logout()
def test_login_zsh(self):
ssh = pxssh.pxssh()
result = ssh.login('server zsh', 'me', password='s3cret')
ssh.sendline('ping')
ssh.expect('pong', timeout=10)
assert ssh.prompt(timeout=10)
ssh.logout()
def test_login_tcsh(self):
ssh = pxssh.pxssh()
result = ssh.login('server tcsh', 'me', password='s3cret')
ssh.sendline('ping')
ssh.expect('pong', timeout=10)
assert ssh.prompt(timeout=10)
ssh.logout()
if __name__ == '__main__':
unittest.main()
| PxsshTestCase |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 2352,
"end": 3571
} | class ____(Benchmark):
r"""
Meyer [1]_ objective function.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
TODO NIST regression standard
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 100., 100.],
[1, 1000., 500.]))
self.global_optimum = [[5.6096364710e-3, 6.1813463463e3,
3.4522363462e2]]
self.fglob = 8.7945855171e1
self.a = asarray([3.478E+04, 2.861E+04, 2.365E+04, 1.963E+04, 1.637E+04,
1.372E+04, 1.154E+04, 9.744E+03, 8.261E+03, 7.030E+03,
6.005E+03, 5.147E+03, 4.427E+03, 3.820E+03, 3.307E+03,
2.872E+03])
self.b = asarray([5.000E+01, 5.500E+01, 6.000E+01, 6.500E+01, 7.000E+01,
7.500E+01, 8.000E+01, 8.500E+01, 9.000E+01, 9.500E+01,
1.000E+02, 1.050E+02, 1.100E+02, 1.150E+02, 1.200E+02,
1.250E+02])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] * exp(x[1] / (self.b + x[2]))
return sum((self.a - vec) ** 2)
| Meyer |
python | lepture__authlib | authlib/oauth1/rfc5849/errors.py | {
"start": 1693,
"end": 1850
} | class ____(OAuth1Error):
error = "invalid_token"
description = 'Invalid or expired "oauth_token" in parameters'
status_code = 401
| InvalidTokenError |
python | realpython__materials | solid-principles-python/file_manager_srp.py | {
"start": 909,
"end": 1270
} | class ____:
def __init__(self, filename):
self.path = Path(filename)
def compress(self):
with ZipFile(self.path.with_suffix(".zip"), mode="w") as archive:
archive.write(self.path)
def decompress(self):
with ZipFile(self.path.with_suffix(".zip"), mode="r") as archive:
archive.extractall()
| ZipFileManager |
python | getsentry__sentry | tests/sentry/workflow_engine/buffer/test_batch_client.py | {
"start": 9276,
"end": 12033
} | class ____:
@pytest.fixture
def mock_buffer(self):
"""Create a mock buffer for testing."""
return Mock(spec=RedisHashSortedSetBuffer)
@pytest.fixture
def project_client(self, mock_buffer):
"""Create a ProjectDelayedWorkflowClient with mocked buffer."""
return DelayedWorkflowClient(buf=mock_buffer).for_project(123)
def test_filters_without_batch_key(self, project_client):
"""Test filters generation without batch key."""
filters = project_client._filters(batch_key=None)
assert filters == {"project_id": 123}
def test_filters_with_batch_key(self, project_client):
"""Test filters generation with batch key."""
filters = project_client._filters(batch_key="test-batch")
assert filters == {"project_id": 123, "batch_key": "test-batch"}
def test_delete_hash_fields(self, project_client, mock_buffer):
"""Test deleting specific fields from workflow hash."""
fields = ["field1", "field2"]
project_client.delete_hash_fields(batch_key=None, fields=fields)
from sentry.workflow_engine.models import Workflow
mock_buffer.delete_hash.assert_called_once_with(
model=Workflow, filters={"project_id": 123}, fields=fields
)
def test_get_hash_length(self, project_client, mock_buffer):
"""Test getting hash length."""
mock_buffer.get_hash_length.return_value = 5
result = project_client.get_hash_length(batch_key=None)
from sentry.workflow_engine.models import Workflow
mock_buffer.get_hash_length.assert_called_once_with(
model=Workflow, filters={"project_id": 123}
)
assert result == 5
def test_get_hash_data(self, project_client, mock_buffer):
"""Test fetching hash data."""
expected_data = {"key1": "value1", "key2": "value2"}
mock_buffer.get_hash.return_value = expected_data
result = project_client.get_hash_data(batch_key="test-batch")
from sentry.workflow_engine.models import Workflow
mock_buffer.get_hash.assert_called_once_with(
model=Workflow, filters={"project_id": 123, "batch_key": "test-batch"}
)
assert result == expected_data
def test_push_to_hash(self, project_client, mock_buffer):
"""Test pushing data to hash in bulk."""
data = {"key1": "value1", "key2": "value2"}
project_client.push_to_hash(batch_key="test-batch", data=data)
from sentry.workflow_engine.models import Workflow
mock_buffer.push_to_hash_bulk.assert_called_once_with(
model=Workflow, filters={"project_id": 123, "batch_key": "test-batch"}, data=data
)
| TestProjectDelayedWorkflowClient |
python | pydata__xarray | xarray/core/_aggregations.py | {
"start": 285932,
"end": 337564
} | class ____:
_obj: DataArray
def reduce(
self,
func: Callable[..., Any],
dim: Dims = None,
*,
axis: int | Sequence[int] | None = None,
keep_attrs: bool | None = None,
keepdims: bool = False,
**kwargs: Any,
) -> DataArray:
raise NotImplementedError()
def _flox_reduce(
self,
dim: Dims,
**kwargs: Any,
) -> DataArray:
raise NotImplementedError()
def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
pandas.DataFrame.count
dask.dataframe.DataFrame.count
DataArray.count
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").count()
<xarray.DataArray (time: 3)> Size: 24B
array([1, 3, 1])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="count",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
DataArray.all
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 6B
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").all()
<xarray.DataArray (time: 3)> Size: 3B
array([ True, True, False])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 6B
array([ True, True, True, True, True, False])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").any()
<xarray.DataArray (time: 3)> Size: 3B
array([ True, True, True])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").max()
<xarray.DataArray (time: 3)> Size: 24B
array([1., 3., 2.])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").max(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([ 1., 3., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="max",
dim=dim,
skipna=skipna,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").min()
<xarray.DataArray (time: 3)> Size: 24B
array([1., 0., 2.])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").min(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([ 1., 0., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="min",
dim=dim,
skipna=skipna,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").mean()
<xarray.DataArray (time: 3)> Size: 24B
array([1. , 1.66666667, 2. ])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").mean(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([1. , 1.66666667, nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="mean",
dim=dim,
skipna=skipna,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").prod()
<xarray.DataArray (time: 3)> Size: 24B
array([1., 0., 2.])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").prod(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([ 1., 0., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.resample(time="3ME").prod(skipna=True, min_count=2)
<xarray.DataArray (time: 3)> Size: 24B
array([nan, 0., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="prod",
dim=dim,
skipna=skipna,
min_count=min_count,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").sum()
<xarray.DataArray (time: 3)> Size: 24B
array([1., 5., 2.])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").sum(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([ 1., 5., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> da.resample(time="3ME").sum(skipna=True, min_count=2)
<xarray.DataArray (time: 3)> Size: 24B
array([nan, 5., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="sum",
dim=dim,
skipna=skipna,
min_count=min_count,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
ddof: int = 0,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
DataArray.std
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").std()
<xarray.DataArray (time: 3)> Size: 24B
array([0. , 1.24721913, 0. ])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").std(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([0. , 1.24721913, nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Specify ``ddof=1`` for an unbiased estimate.
>>> da.resample(time="3ME").std(skipna=True, ddof=1)
<xarray.DataArray (time: 3)> Size: 24B
array([ nan, 1.52752523, nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="std",
dim=dim,
skipna=skipna,
ddof=ddof,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
ddof: int = 0,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
DataArray.var
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").var()
<xarray.DataArray (time: 3)> Size: 24B
array([0. , 1.55555556, 0. ])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").var(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([0. , 1.55555556, nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Specify ``ddof=1`` for an unbiased estimate.
>>> da.resample(time="3ME").var(skipna=True, ddof=1)
<xarray.DataArray (time: 3)> Size: 24B
array([ nan, 2.33333333, nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="var",
dim=dim,
skipna=skipna,
ddof=ddof,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
DataArray.median
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").median()
<xarray.DataArray (time: 3)> Size: 24B
array([1., 2., 2.])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").median(skipna=False)
<xarray.DataArray (time: 3)> Size: 24B
array([ 1., 2., nan])
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``cumsum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``cumsum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``cumsum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.cumsum
dask.array.cumsum
DataArray.cumsum
DataArray.cumulative
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Note that the methods on the ``cumulative`` method are more performant (with numbagg installed)
and better supported. ``cumsum`` and ``cumprod`` may be deprecated
in the future.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").cumsum()
<xarray.DataArray (time: 6)> Size: 48B
array([1., 2., 5., 5., 2., 2.])
Coordinates:
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Dimensions without coordinates: time
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").cumsum(skipna=False)
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 5., 5., 2., nan])
Coordinates:
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Dimensions without coordinates: time
"""
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
"""
Reduce this DataArray's data by applying ``cumprod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``cumprod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : DataArray
New DataArray with ``cumprod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.cumprod
dask.array.cumprod
DataArray.cumprod
DataArray.cumulative
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Note that the methods on the ``cumulative`` method are more performant (with numbagg installed)
and better supported. ``cumsum`` and ``cumprod`` may be deprecated
in the future.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> da
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 3., 0., 2., nan])
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
>>> da.resample(time="3ME").cumprod()
<xarray.DataArray (time: 6)> Size: 48B
array([1., 2., 6., 0., 2., 2.])
Coordinates:
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Dimensions without coordinates: time
Use ``skipna`` to control whether NaNs are ignored.
>>> da.resample(time="3ME").cumprod(skipna=False)
<xarray.DataArray (time: 6)> Size: 48B
array([ 1., 2., 6., 0., 2., nan])
Coordinates:
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Dimensions without coordinates: time
"""
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
)
| DataArrayResampleAggregations |
python | pytest-dev__pytest-asyncio | pytest_asyncio/plugin.py | {
"start": 16909,
"end": 17277
} | class ____(PytestAsyncioFunction):
"""
Pytest item that is a coroutine or an asynchronous generator
decorated with staticmethod
"""
@staticmethod
def _can_substitute(item: Function) -> bool:
func = item.obj
return isinstance(func, staticmethod) and _is_coroutine_or_asyncgen(
func.__func__
)
| AsyncStaticMethod |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py | {
"start": 4434,
"end": 6630
} | class ____(Step):
context: PublishConnectorContext
title = "Upload connector dependencies list to GCS."
key_prefix = "connector_dependencies"
async def _run(self, built_containers_per_platform: Dict[Platform, Container]) -> StepResult:
assert self.context.connector.language in [
ConnectorLanguage.PYTHON,
ConnectorLanguage.LOW_CODE,
], "This step can only run for Python connectors."
built_container = built_containers_per_platform[LOCAL_BUILD_PLATFORM]
pip_freeze_output = await built_container.with_exec(["pip", "freeze"]).stdout()
dependencies = [
{"package_name": line.split("==")[0], "version": line.split("==")[1]} for line in pip_freeze_output.splitlines() if "==" in line
]
connector_technical_name = self.context.connector.technical_name
connector_version = self.context.metadata["dockerImageTag"]
dependencies_metadata = ConnectorDependenciesMetadata(
connector_technical_name=connector_technical_name,
connector_repository=self.context.metadata["dockerRepository"],
connector_version=connector_version,
connector_definition_id=self.context.metadata["definitionId"],
dependencies=dependencies,
).json()
file = (
(await self.context.get_connector_dir())
.with_new_file("dependencies.json", contents=dependencies_metadata)
.file("dependencies.json")
)
key = f"{self.key_prefix}/{connector_technical_name}/{connector_version}/dependencies.json"
exit_code, stdout, stderr = await upload_to_gcs(
self.context.dagger_client,
file,
key,
self.context.metadata_bucket_name,
self.context.metadata_service_gcs_credentials,
flags=['--cache-control="no-cache"'],
)
if exit_code != 0:
return StepResult(step=self, status=StepStatus.FAILURE, stdout=stdout, stderr=stderr)
return StepResult(step=self, status=StepStatus.SUCCESS, stdout="Uploaded connector dependencies to metadata service bucket.")
| UploadDependenciesToMetadataService |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common_test.py | {
"start": 22465,
"end": 25206
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegex(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
def testRegexFindOnPrependedLinesWorks(self):
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["Roses are red"])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "red", "bold")
self.assertEqual(
{0: [(10, 13, "bold")]}, searched_rich_lines.font_attr_segs)
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["A poem"], font_attr_segs=[(0, 1, "underline")])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "poem", "italic")
self.assertEqual(
{0: [(0, 1, "underline"), (2, 6, "italic")]},
searched_rich_lines.font_attr_segs)
| RegexFindTest |
python | pdm-project__pdm | src/pdm/cli/commands/publish/repository.py | {
"start": 1110,
"end": 6965
} | class ____:
def __init__(self, project: Project, config: RepositoryConfig) -> None:
self.url = cast(str, config.url)
self.session = project.environment._build_session([config])
self._credentials_to_save: tuple[str, str, str] | None = None
self.ui = project.core.ui
username, password = self._ensure_credentials(config.username, config.password)
self.session.auth = (username, password)
def _ensure_credentials(self, username: str | None, password: str | None) -> tuple[str, str]:
from pdm.models.auth import keyring
parsed_url = urlparse(self.url)
netloc = parsed_url.netloc
if username and password:
return username, password
if password:
return "__token__", password
if parsed_url.username is not None and parsed_url.password is not None:
return parsed_url.username, parsed_url.password
if keyring.enabled:
auth = keyring.get_auth_info(self.url, username)
if auth is not None:
return auth
token = self._get_pypi_token_via_oidc()
if token is not None:
return "__token__", token
if not termui.is_interactive():
raise PdmUsageError("Username and password are required")
username, password, save = self._prompt_for_credentials(netloc, username)
if save and keyring.enabled and termui.confirm("Save credentials to keyring?"):
self._credentials_to_save = (netloc, username, password)
return username, password
def _get_pypi_token_via_oidc(self) -> str | None:
self.ui.echo("Getting PyPI token via OIDC...")
try:
parsed_url = urlparse(self.url)
audience_url = urlunparse(parsed_url._replace(path="/_/oidc/audience"))
resp = self.session.get(audience_url)
resp.raise_for_status()
audience = cast(str, resp.json()["audience"])
oidc_token = detect_credential(audience)
if oidc_token is None:
self.ui.echo(
"This platform is not supported for trusted publishing via OIDC",
err=True,
)
return None
mint_token_url = urlunparse(parsed_url._replace(path="/_/oidc/mint-token"))
resp = self.session.post(mint_token_url, json={"token": oidc_token})
resp.raise_for_status()
token = resp.json()["token"]
except AmbientCredentialError as e:
self.ui.echo(f"Unable to detect OIDC token for CI platform: {e}", err=True)
return None
except httpx.HTTPError:
self.ui.echo("Failed to get PyPI token via OIDC", err=True)
return None
else:
if os.getenv("GITHUB_ACTIONS"):
# tell GitHub Actions to mask the token in any console logs
print(f"::add-mask::{token}")
return token
def _prompt_for_credentials(self, service: str, username: str | None) -> tuple[str, str, bool]:
from pdm.models.auth import keyring
if keyring.enabled:
cred = keyring.get_auth_info(service, username)
if cred is not None:
return cred[0], cred[1], False
if username is None:
username = termui.ask("[primary]Username")
password = termui.ask("[primary]Password", password=True)
return username, password, True
def _save_credentials(self, service: str, username: str, password: str) -> None:
from pdm.models.auth import keyring
self.ui.echo("Saving credentials to keyring")
keyring.save_auth_info(service, username, password)
def get_release_urls(self, packages: list[PackageFile]) -> Iterable[str]:
if self.url.startswith(DEFAULT_REPOSITORIES["pypi"].rstrip("/")):
base = "https://pypi.org/"
elif self.url.startswith(DEFAULT_REPOSITORIES["testpypi"].rstrip("/")):
base = "https://test.pypi.org/"
else:
return set()
return {f"{base}project/{package.metadata['name']}/{package.metadata['version']}/" for package in packages}
def upload(self, package: PackageFile) -> httpx.Response:
data_fields = package.metadata_dict
data_fields.update(
{
":action": "file_upload",
"protocol_version": "1",
}
)
with self.ui.make_progress(
" [progress.percentage]{task.percentage:>3.0f}%",
BarColumn(),
DownloadColumn(),
"•",
TimeRemainingColumn(
compact=True,
elapsed_when_finished=True,
),
"•",
TransferSpeedColumn(),
) as progress:
progress.console.print(f"Uploading [success]{package.base_filename}")
with open(package.filename, "rb") as fp:
file_fields = [("content", (package.base_filename, fp, "application/octet-stream"))]
def on_upload(monitor: CallbackWrapperStream) -> None:
progress.update(job, completed=monitor.bytes_read)
request = self.session.build_request("POST", self.url, data=data_fields, files=file_fields)
stream = cast("MultipartStream", request.stream)
request.stream = CallbackWrapperStream(stream, on_upload)
job = progress.add_task("", total=stream.get_content_length())
resp = self.session.send(request, follow_redirects=False)
if not resp.is_error and self._credentials_to_save is not None:
self._save_credentials(*self._credentials_to_save)
self._credentials_to_save = None
return resp
| Repository |
python | walkccc__LeetCode | solutions/3440. Reschedule Meetings for Maximum Free Time II/3440.py | {
"start": 0,
"end": 1026
} | class ____:
def maxFreeTime(
self,
eventTime: int,
startTime: list[int],
endTime: list[int]
) -> int:
n = len(startTime)
gaps = ([startTime[0]] +
[startTime[i] - endTime[i - 1] for i in range(1, len(startTime))] +
[eventTime - endTime[-1]])
ans = 0
maxLeft = [gaps[0]] + [0] * n # maxLeft[i] := max(gaps[0..i])
maxRight = [0] * n + [gaps[n]] # maxRight[i] := max(gaps[i..n])
for i in range(1, n + 1):
maxLeft[i] = max(gaps[i], maxLeft[i - 1])
for i in range(n - 1, -1, -1):
maxRight[i] = max(gaps[i], maxRight[i + 1])
for i, (start, end) in enumerate(zip(startTime, endTime)):
currMeetingTime = end - start
adjacentGapsSum = gaps[i] + gaps[i + 1]
canMoveMeeting = currMeetingTime <= max(
maxLeft[i - 1] if i > 0 else 0,
maxRight[i + 2] if i + 2 < n + 1 else 0
)
ans = max(ans, adjacentGapsSum +
(currMeetingTime if canMoveMeeting else 0))
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.