language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | tests/providers/test_person.py | {
"start": 46666,
"end": 47229
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("ne_NP")
Faker.seed(0)
def test_names(self):
name = self.fake.name().split()
assert all(isinstance(n, str) for n in name)
# name should always be 2-3 words. If 3, first word
# should be a prefix.
assert name[-2] in NeProvider.first_names
assert name[-1] in NeProvider.last_names
prefixes = NeProvider.prefixes_male + NeProvider.prefixes_female
if len(name) == 3:
assert name[0] in prefixes
| TestNeNP |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0142_update_dj_simple_history.py | {
"start": 149,
"end": 1428
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0141_create_addonsconfig"),
]
operations = [
migrations.AlterModelOptions(
name="historicaladdonsconfig",
options={
"get_latest_by": ("history_date", "history_id"),
"ordering": ("-history_date", "-history_id"),
"verbose_name": "historical addons config",
"verbose_name_plural": "historical addons configs",
},
),
migrations.AlterModelOptions(
name="historicalproject",
options={
"get_latest_by": ("history_date", "history_id"),
"ordering": ("-history_date", "-history_id"),
"verbose_name": "historical project",
"verbose_name_plural": "historical projects",
},
),
migrations.AlterField(
model_name="historicaladdonsconfig",
name="history_date",
field=models.DateTimeField(db_index=True),
),
migrations.AlterField(
model_name="historicalproject",
name="history_date",
field=models.DateTimeField(db_index=True),
),
]
| Migration |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/io.py | {
"start": 11342,
"end": 18596
} | class ____(IR):
"""Sink a dataframe in streaming mode."""
__slots__ = ("executor_options", "sink")
_non_child = ("schema", "sink", "executor_options")
sink: Sink
executor_options: StreamingExecutor
def __init__(
self,
schema: Schema,
sink: Sink,
executor_options: StreamingExecutor,
df: IR,
):
self.schema = schema
self.sink = sink
self.executor_options = executor_options
self.children = (df,)
def get_hashable(self) -> Hashable:
"""Hashable representation of the node."""
return (type(self), self.sink, *self.children)
@lower_ir_node.register(Sink)
def _(
ir: Sink, rec: LowerIRTransformer
) -> tuple[StreamingSink, MutableMapping[IR, PartitionInfo]]:
child, partition_info = rec(ir.children[0])
executor_options = rec.state["config_options"].executor
assert executor_options.name == "streaming", (
"'in-memory' executor not supported in 'lower_ir_node'"
)
# TODO: Support cloud storage
if Path(ir.path).exists() and executor_options.sink_to_directory:
raise NotImplementedError(
"Writing to an existing path is not supported when sinking "
"to a directory. If you are using the 'distributed' scheduler, "
"please remove the target directory before calling 'collect'. "
)
new_node = StreamingSink(
ir.schema,
ir.reconstruct([child]),
executor_options,
child,
)
partition_info[new_node] = partition_info[child]
return new_node, partition_info
def _prepare_sink_directory(path: str) -> None:
"""Prepare for a multi-partition sink."""
# TODO: Support cloud storage
Path(path).mkdir(parents=True)
def _sink_to_directory(
schema: Schema,
kind: str,
path: str,
parquet_options: ParquetOptions,
options: dict[str, Any],
df: DataFrame,
ready: None,
context: IRExecutionContext,
) -> DataFrame:
"""Sink a partition to a new file."""
return Sink.do_evaluate(
schema, kind, path, parquet_options, options, df, context=context
)
def _sink_to_parquet_file(
path: str,
options: dict[str, Any],
finalize: bool, # noqa: FBT001
writer: plc.io.parquet.ChunkedParquetWriter | None,
df: DataFrame,
) -> plc.io.parquet.ChunkedParquetWriter | DataFrame:
"""Sink a partition to an open Parquet file."""
# Set up a new chunked Parquet writer if necessary.
if writer is None:
metadata = Sink._make_parquet_metadata(df)
sink = plc.io.types.SinkInfo([path])
builder = Sink._apply_parquet_writer_options(
plc.io.parquet.ChunkedParquetWriterOptions.builder(sink), options
)
writer_options = builder.metadata(metadata).build()
writer = plc.io.parquet.ChunkedParquetWriter.from_options(
writer_options, stream=df.stream
)
# Append to the open Parquet file.
assert isinstance(writer, plc.io.parquet.ChunkedParquetWriter), (
"ChunkedParquetWriter is required."
)
writer.write(df.table)
# Finalize or return active writer.
if finalize:
writer.close([])
return df
else:
return writer
def _sink_to_file(
kind: str,
path: str,
options: dict[str, Any],
finalize: bool, # noqa: FBT001
writer_state: Any,
df: DataFrame,
) -> Any:
"""Sink a partition to an open file."""
if kind == "Parquet":
# Parquet writer will pass along a
# ChunkedParquetWriter "writer state".
return _sink_to_parquet_file(
path,
options,
finalize,
writer_state,
df,
)
elif kind == "Csv":
use_options = options.copy()
if writer_state is None:
mode = "wb"
else:
mode = "ab"
use_options["include_header"] = False
with Path.open(Path(path), mode) as f:
# Path.open returns IO[Any] but SinkInfo needs more specific IO types
sink = plc.io.types.SinkInfo([f]) # type: ignore[arg-type]
Sink._write_csv(sink, use_options, df)
elif kind == "Json":
mode = "wb" if writer_state is None else "ab"
with Path.open(Path(path), mode) as f:
# Path.open returns IO[Any] but SinkInfo needs more specific IO types
sink = plc.io.types.SinkInfo([f]) # type: ignore[arg-type]
Sink._write_json(sink, df)
else: # pragma: no cover; Shouldn't get here.
raise NotImplementedError(f"{kind} not yet supported in _sink_to_file")
# Default return type is bool | DataFrame.
# We only return a DataFrame for the final sink task.
# The other tasks return a "ready" signal of True.
return df if finalize else True
def _file_sink_graph(
ir: StreamingSink,
partition_info: MutableMapping[IR, PartitionInfo],
context: IRExecutionContext,
) -> MutableMapping[Any, Any]:
"""Sink to a single file."""
name = get_key_name(ir)
count = partition_info[ir].count
child_name = get_key_name(ir.children[0])
sink = ir.sink
if count == 1:
return {
(name, 0): (
partial(sink.do_evaluate, context=context),
*sink._non_child_args,
(child_name, 0),
)
}
sink_name = get_key_name(sink)
graph: MutableMapping[Any, Any] = {
(sink_name, i): (
_sink_to_file,
sink.kind,
sink.path,
sink.options,
i == count - 1, # Whether to finalize
None if i == 0 else (sink_name, i - 1), # Writer state
(child_name, i),
)
for i in range(count)
}
# Make sure final tasks point to empty DataFrame output
graph.update({(name, i): (sink_name, count - 1) for i in range(count)})
return graph
def _directory_sink_graph(
ir: StreamingSink,
partition_info: MutableMapping[IR, PartitionInfo],
context: IRExecutionContext,
) -> MutableMapping[Any, Any]:
"""Sink to a directory of files."""
name = get_key_name(ir)
count = partition_info[ir].count
child_name = get_key_name(ir.children[0])
sink = ir.sink
setup_name = f"setup-{name}"
suffix = sink.kind.lower()
width = math.ceil(math.log10(count))
graph: MutableMapping[Any, Any] = {
(name, i): (
_sink_to_directory,
sink.schema,
sink.kind,
f"{sink.path}/part.{str(i).zfill(width)}.{suffix}",
sink.parquet_options,
sink.options,
(child_name, i),
setup_name,
context,
)
for i in range(count)
}
graph[setup_name] = (_prepare_sink_directory, sink.path)
return graph
@generate_ir_tasks.register(StreamingSink)
def _(
ir: StreamingSink,
partition_info: MutableMapping[IR, PartitionInfo],
context: IRExecutionContext,
) -> MutableMapping[Any, Any]:
if ir.executor_options.sink_to_directory:
return _directory_sink_graph(ir, partition_info, context=context)
else:
return _file_sink_graph(ir, partition_info, context=context)
| StreamingSink |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gap02.py | {
"start": 315,
"end": 1569
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gap02.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with bar gap/overlap."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45470464, 45472000]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"overlap": -100,
"gap": 0,
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 6621,
"end": 6728
} | class ____(forms.Form):
"""Get the import backend."""
backend = forms.CharField()
| ProjectBackendForm |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 33717,
"end": 33944
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ONE_DAY", "ONE_MONTH", "ONE_WEEK", "SIX_MONTHS", "THREE_DAYS")
| RepositoryInteractionLimitExpiry |
python | doocs__leetcode | lcof/面试题43. 1~n整数中1出现的次数/Solution.py | {
"start": 0,
"end": 473
} | class ____:
def countDigitOne(self, n: int) -> int:
@cache
def dfs(pos, cnt, limit):
if pos < 0:
return cnt
up = a[pos] if limit else 9
ans = 0
for i in range(up + 1):
ans += dfs(pos - 1, cnt + (i == 1), limit and i == up)
return ans
a = []
while n:
a.append(n % 10)
n //= 10
return dfs(len(a) - 1, 0, True)
| Solution |
python | automl__auto-sklearn | autosklearn/ensembles/abstract_ensemble.py | {
"start": 4468,
"end": 4916
} | class ____(AbstractEnsemble):
@property
@abstractmethod
def pareto_set(self) -> Sequence[AbstractEnsemble]:
"""Get a sequence on ensembles that are on the pareto front
Raises
------
SklearnNotFittedError
If ``fit`` has not been called and the pareto set does not exist yet
Returns
-------
Sequence[AbstractEnsemble]
"""
...
| AbstractMultiObjectiveEnsemble |
python | kamyu104__LeetCode-Solutions | Python/restore-ip-addresses.py | {
"start": 55,
"end": 1019
} | class ____(object):
# @param s, a string
# @return a list of strings
def restoreIpAddresses(self, s):
result = []
self.restoreIpAddressesRecur(result, s, 0, "", 0)
return result
def restoreIpAddressesRecur(self, result, s, start, current, dots):
# pruning to improve performance
if (4 - dots) * 3 < len(s) - start or (4 - dots) > len(s) - start:
return
if start == len(s) and dots == 4:
result.append(current[:-1])
else:
for i in xrange(start, start + 3):
if len(s) > i and self.isValid(s[start:i + 1]):
current += s[start:i + 1] + '.'
self.restoreIpAddressesRecur(result, s, i + 1, current, dots + 1)
current = current[:-(i - start + 2)]
def isValid(self, s):
if len(s) == 0 or (s[0] == '0' and s != "0"):
return False
return int(s) < 256
| Solution |
python | python-openxml__python-docx | src/docx/oxml/xmlchemy.py | {
"start": 17317,
"end": 18354
} | class ____(_BaseChildElement):
"""Defines a required child element for MetaOxmlElement."""
def __init__(self, nsptagname: str):
super(OneAndOnlyOne, self).__init__(nsptagname, ())
def populate_class_members(self, element_cls: MetaOxmlElement, prop_name: str) -> None:
"""Add the appropriate methods to `element_cls`."""
super(OneAndOnlyOne, self).populate_class_members(element_cls, prop_name)
self._add_getter()
@property
def _getter(self):
"""Return a function object suitable for the "get" side of the property
descriptor."""
def get_child_element(obj: BaseOxmlElement):
child = obj.find(qn(self._nsptagname))
if child is None:
raise InvalidXmlError(
"required ``<%s>`` child element not present" % self._nsptagname
)
return child
get_child_element.__doc__ = "Required ``<%s>`` child element." % self._nsptagname
return get_child_element
| OneAndOnlyOne |
python | kamyu104__LeetCode-Solutions | Python/check-if-there-is-a-valid-parentheses-string-path.py | {
"start": 83,
"end": 626
} | class ____(object):
def hasValidPath(self, grid):
"""
:type grid: List[List[str]]
:rtype: bool
"""
if (len(grid)+len(grid[0])-1)%2:
return False
dp = [0]*(len(grid[0])+1)
for i in xrange(len(grid)):
dp[0] = int(not i)
for j in xrange(len(grid[0])):
dp[j+1] = (dp[j]|dp[j+1])<<1 if grid[i][j] == '(' else (dp[j]|dp[j+1])>>1
return dp[-1]&1
# Time: O(m * n)
# Space: O(n)
# dp, optimized from solution1 (wrong answer)
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/_typing.py | {
"start": 3027,
"end": 13260
} | class ____(Protocol):
"""protocol for Engine/Connection-like objects that have dialect
attribute.
"""
@property
def dialect(self) -> Dialect: ...
# match column types that are not ORM entities
_NOT_ENTITY = TypeVar(
"_NOT_ENTITY",
int,
str,
bool,
"datetime",
"date",
"time",
"timedelta",
"UUID",
float,
"Decimal",
)
_StarOrOne = Literal["*", 1]
_MAYBE_ENTITY = TypeVar(
"_MAYBE_ENTITY",
roles.ColumnsClauseRole,
_StarOrOne,
Type[Any],
Inspectable[_HasClauseElement[Any]],
_HasClauseElement[Any],
)
# convention:
# XYZArgument - something that the end user is passing to a public API method
# XYZElement - the internal representation that we use for the thing.
# the coercions system is responsible for converting from XYZArgument to
# XYZElement.
_TextCoercedExpressionArgument = Union[
str,
"TextClause",
"ColumnElement[_T]",
_HasClauseElement[_T],
roles.ExpressionElementRole[_T],
]
_ColumnsClauseArgument = Union[
roles.TypedColumnsClauseRole[_T],
roles.ColumnsClauseRole,
"SQLCoreOperations[_T]",
_StarOrOne,
Type[_T],
Inspectable[_HasClauseElement[_T]],
_HasClauseElement[_T],
]
"""open-ended SELECT columns clause argument.
Includes column expressions, tables, ORM mapped entities, a few literal values.
This type is used for lists of columns / entities to be returned in result
sets; select(...), insert().returning(...), etc.
"""
_TypedColumnClauseArgument = Union[
roles.TypedColumnsClauseRole[_T],
"SQLCoreOperations[_T]",
Type[_T],
]
_T0 = TypeVar("_T0", bound=Any)
_T1 = TypeVar("_T1", bound=Any)
_T2 = TypeVar("_T2", bound=Any)
_T3 = TypeVar("_T3", bound=Any)
_T4 = TypeVar("_T4", bound=Any)
_T5 = TypeVar("_T5", bound=Any)
_T6 = TypeVar("_T6", bound=Any)
_T7 = TypeVar("_T7", bound=Any)
_T8 = TypeVar("_T8", bound=Any)
_T9 = TypeVar("_T9", bound=Any)
_ColumnExpressionArgument = Union[
"ColumnElement[_T]",
_HasClauseElement[_T],
"SQLCoreOperations[_T]",
roles.ExpressionElementRole[_T],
roles.TypedColumnsClauseRole[_T],
Callable[[], "ColumnElement[_T]"],
"LambdaElement",
]
"See docs in public alias ColumnExpressionArgument."
ColumnExpressionArgument: TypeAlias = _ColumnExpressionArgument[_T]
"""Narrower "column expression" argument.
This type is used for all the other "column" kinds of expressions that
typically represent a single SQL column expression, not a set of columns the
way a table or ORM entity does.
This includes ColumnElement, or ORM-mapped attributes that will have a
``__clause_element__()`` method, it also has the ExpressionElementRole
overall which brings in the TextClause object also.
.. versionadded:: 2.0.13
"""
_ColumnExpressionOrLiteralArgument = Union[Any, _ColumnExpressionArgument[_T]]
_ColumnExpressionOrStrLabelArgument = Union[str, _ColumnExpressionArgument[_T]]
_ByArgument = Union[
Iterable[_ColumnExpressionOrStrLabelArgument[Any]],
_ColumnExpressionOrStrLabelArgument[Any],
]
"""Used for keyword-based ``order_by`` and ``partition_by`` parameters."""
_InfoType = Dict[Any, Any]
"""the .info dictionary accepted and used throughout Core /ORM"""
_FromClauseArgument = Union[
roles.FromClauseRole,
Type[Any],
Inspectable[_HasClauseElement[Any]],
_HasClauseElement[Any],
]
"""A FROM clause, like we would send to select().select_from().
Also accommodates ORM entities and related constructs.
"""
_JoinTargetArgument = Union[_FromClauseArgument, roles.JoinTargetRole]
"""target for join() builds on _FromClauseArgument to include additional
join target roles such as those which come from the ORM.
"""
_OnClauseArgument = Union[_ColumnExpressionArgument[Any], roles.OnClauseRole]
"""target for an ON clause, includes additional roles such as those which
come from the ORM.
"""
_SelectStatementForCompoundArgument = Union[
"Select[Unpack[_Ts]]",
"CompoundSelect[Unpack[_Ts]]",
roles.CompoundElementRole,
]
"""SELECT statement acceptable by ``union()`` and other SQL set operations"""
_DMLColumnArgument = Union[
str,
_HasClauseElement[Any],
roles.DMLColumnRole,
"SQLCoreOperations[Any]",
]
"""A DML column expression. This is a "key" inside of insert().values(),
update().values(), and related.
These are usually strings or SQL table columns.
There's also edge cases like JSON expression assignment, which we would want
the DMLColumnRole to be able to accommodate.
"""
_DMLOnlyColumnArgument = Union[
_HasClauseElement[_T],
roles.DMLColumnRole,
"SQLCoreOperations[_T]",
]
_DMLKey = TypeVar("_DMLKey", bound=_DMLColumnArgument)
_DMLColumnKeyMapping = Mapping[_DMLKey, Any]
_DDLColumnArgument = Union[str, "Column[Any]", roles.DDLConstraintColumnRole]
"""DDL column.
used for :class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`, etc.
"""
_DDLColumnReferenceArgument = _DDLColumnArgument
_DMLTableArgument = Union[
"TableClause",
"Join",
"Alias",
"CTE",
Type[Any],
Inspectable[_HasClauseElement[Any]],
_HasClauseElement[Any],
]
_PropagateAttrsType = util.immutabledict[str, Any]
_TypeEngineArgument = Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"]
_EquivalentColumnMap = Dict["ColumnElement[Any]", Set["ColumnElement[Any]"]]
_LimitOffsetType = Union[int, _ColumnExpressionArgument[int], None]
_AutoIncrementType = Union[bool, Literal["auto", "ignore_fk"]]
_CreateDropBind = Union["Engine", "Connection", "MockConnection"]
if TYPE_CHECKING:
def is_sql_compiler(c: Compiled) -> TypeGuard[SQLCompiler]: ...
def is_ddl_compiler(c: Compiled) -> TypeGuard[DDLCompiler]: ...
def is_named_from_clause(
t: FromClauseRole,
) -> TypeGuard[NamedFromClause]: ...
def is_column_element(
c: ClauseElement,
) -> TypeGuard[ColumnElement[Any]]: ...
def is_keyed_column_element(
c: ClauseElement,
) -> TypeGuard[KeyedColumnElement[Any]]: ...
def is_text_clause(c: ClauseElement) -> TypeGuard[TextClause]: ...
def is_from_clause(c: ClauseElement) -> TypeGuard[FromClause]: ...
def is_tuple_type(t: TypeEngine[Any]) -> TypeGuard[TupleType]: ...
def is_table_value_type(
t: TypeEngine[Any],
) -> TypeGuard[TableValueType]: ...
def is_selectable(t: Any) -> TypeGuard[Selectable]: ...
def is_select_base(
t: Union[Executable, ReturnsRows],
) -> TypeGuard[SelectBase]: ...
def is_select_statement(
t: Union[Executable, ReturnsRows],
) -> TypeGuard[Select[Unpack[TupleAny]]]: ...
def is_table(t: FromClause) -> TypeGuard[TableClause]: ...
def is_subquery(t: FromClause) -> TypeGuard[Subquery]: ...
def is_dml(c: ClauseElement) -> TypeGuard[UpdateBase]: ...
else:
is_sql_compiler = operator.attrgetter("is_sql")
is_ddl_compiler = operator.attrgetter("is_ddl")
is_named_from_clause = operator.attrgetter("named_with_column")
is_column_element = operator.attrgetter("_is_column_element")
is_keyed_column_element = operator.attrgetter("_is_keyed_column_element")
is_text_clause = operator.attrgetter("_is_text_clause")
is_from_clause = operator.attrgetter("_is_from_clause")
is_tuple_type = operator.attrgetter("_is_tuple_type")
is_table_value_type = operator.attrgetter("_is_table_value")
is_selectable = operator.attrgetter("is_selectable")
is_select_base = operator.attrgetter("_is_select_base")
is_select_statement = operator.attrgetter("_is_select_statement")
is_table = operator.attrgetter("_is_table")
is_subquery = operator.attrgetter("_is_subquery")
is_dml = operator.attrgetter("is_dml")
def has_schema_attr(t: FromClauseRole) -> TypeGuard[TableClause]:
return hasattr(t, "schema")
def is_quoted_name(s: str) -> TypeGuard[quoted_name]:
return hasattr(s, "quote")
def is_has_clause_element(s: object) -> TypeGuard[_HasClauseElement[Any]]:
return hasattr(s, "__clause_element__")
def is_insert_update(c: ClauseElement) -> TypeGuard[ValuesBase]:
return c.is_dml and (c.is_insert or c.is_update) # type: ignore
def _no_kw() -> exc.ArgumentError:
return exc.ArgumentError(
"Additional keyword arguments are not accepted by this "
"function/method. The presence of **kw is for pep-484 typing purposes"
)
def _unexpected_kw(methname: str, kw: Dict[str, Any]) -> NoReturn:
k = list(kw)[0]
raise TypeError(f"{methname} got an unexpected keyword argument '{k}'")
@overload
def Nullable(
val: "SQLCoreOperations[_T]",
) -> "SQLCoreOperations[Optional[_T]]": ...
@overload
def Nullable(
val: roles.ExpressionElementRole[_T],
) -> roles.ExpressionElementRole[Optional[_T]]: ...
@overload
def Nullable(val: Type[_T]) -> Type[Optional[_T]]: ...
def Nullable(
val: _TypedColumnClauseArgument[_T],
) -> _TypedColumnClauseArgument[Optional[_T]]:
"""Types a column or ORM class as nullable.
This can be used in select and other contexts to express that the value of
a column can be null, for example due to an outer join::
stmt1 = select(A, Nullable(B)).outerjoin(A.bs)
stmt2 = select(A.data, Nullable(B.data)).outerjoin(A.bs)
At runtime this method returns the input unchanged.
.. versionadded:: 2.0.20
"""
return val
@overload
def NotNullable(
val: "SQLCoreOperations[Optional[_T]]",
) -> "SQLCoreOperations[_T]": ...
@overload
def NotNullable(
val: roles.ExpressionElementRole[Optional[_T]],
) -> roles.ExpressionElementRole[_T]: ...
@overload
def NotNullable(val: Type[Optional[_T]]) -> Type[_T]: ...
@overload
def NotNullable(val: Optional[Type[_T]]) -> Type[_T]: ...
def NotNullable(
val: Union[_TypedColumnClauseArgument[Optional[_T]], Optional[Type[_T]]],
) -> _TypedColumnClauseArgument[_T]:
"""Types a column or ORM class as not nullable.
This can be used in select and other contexts to express that the value of
a column cannot be null, for example due to a where condition on a
nullable column::
stmt = select(NotNullable(A.value)).where(A.value.is_not(None))
At runtime this method returns the input unchanged.
.. versionadded:: 2.0.20
"""
return val # type: ignore
| _HasDialect |
python | doocs__leetcode | lcp/LCP 68. 美观的花束/Solution.py | {
"start": 0,
"end": 362
} | class ____:
def beautifulBouquet(self, flowers: List[int], cnt: int) -> int:
mod = 10**9 + 7
d = Counter()
ans = j = 0
for i, x in enumerate(flowers):
d[x] += 1
while d[x] > cnt:
d[flowers[j]] -= 1
j += 1
ans = (ans + i - j + 1) % mod
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/next-permutation.py | {
"start": 29,
"end": 673
} | class ____(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
k, l = -1, 0
for i in reversed(xrange(len(nums)-1)):
if nums[i] < nums[i+1]:
k = i
break
else:
nums.reverse()
return
for i in reversed(xrange(k+1, len(nums))):
if nums[i] > nums[k]:
l = i
break
nums[k], nums[l] = nums[l], nums[k]
nums[k+1:] = nums[:k:-1]
# Time: O(n)
# Space: O(1)
| Solution |
python | getsentry__sentry | tests/sentry/integrations/github/test_integration.py | {
"start": 3901,
"end": 4036
} | class ____(IssueTrackingPlugin2):
slug = "github"
name = "GitHub Mock Plugin"
conf_key = slug
@control_silo_test
| GitHubPlugin |
python | walkccc__LeetCode | solutions/887. Super Egg Drop/887-3.py | {
"start": 0,
"end": 330
} | class ____:
def superEggDrop(self, k: int, n: int) -> int:
moves = 0
dp = [[0] * (k + 1) for _ in range(n + 1)]
while dp[moves][k] < n:
moves += 1
for eggs in range(1, k + 1):
dp[moves][eggs] = (dp[moves - 1][eggs - 1] +
dp[moves - 1][eggs] + 1)
return moves
| Solution |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/seq2seq_lstm.py | {
"start": 2922,
"end": 4069
} | class ____(nn.Module):
"""DecoderLSTM Module wrapped in a lifted scan transform.
Attributes:
teacher_force: See docstring on Seq2seq module.
vocab_size: Size of the vocabulary.
"""
teacher_force: bool
vocab_size: int
@functools.partial(
nn.scan,
variable_broadcast='params',
in_axes=1,
out_axes=1,
split_rngs={'params': False, 'lstm': True})
@nn.compact
def __call__(self, carry: tuple[Array, Array], x: Array) -> Array:
"""Applies the DecoderLSTM model."""
lstm_state, last_prediction = carry
if not self.teacher_force:
x = last_prediction
lstm_state, y = nn.LSTMCell(features=lstm_state[0].shape[-1])(lstm_state, x)
logits = nn.Dense(features=self.vocab_size)(y)
# Sample the predicted token using a categorical distribution over the
# logits.
categorical_rng = self.make_rng('lstm')
predicted_token = jax.random.categorical(categorical_rng, logits)
# Convert to one-hot encoding.
prediction = jax.nn.one_hot(
predicted_token, self.vocab_size, dtype=jnp.float32)
return (lstm_state, prediction), (logits, prediction)
| DecoderLSTM |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/conditional1.py | {
"start": 1515,
"end": 1583
} | class ____(type):
def __bool__(self) -> int:
return 1
| Meta |
python | langchain-ai__langchain | libs/core/tests/unit_tests/runnables/test_tracing_interops.py | {
"start": 8664,
"end": 16837
} | class ____:
@pytest.fixture(autouse=True)
def _setup(self) -> None:
self.tracer = _create_tracer_with_mocked_client()
@staticmethod
def _create_parent(
other_thing: Callable[
[int], Generator[int, None, None] | AsyncGenerator[int, None]
],
) -> RunnableLambda:
@RunnableLambda
def my_child_function(a: int) -> int:
return a + 2
parallel = RunnableParallel(
chain_result=my_child_function.with_config(tags=["atag"]),
other_thing=other_thing,
)
def before(x: int) -> int:
return x
def after(x: dict) -> int:
return x["chain_result"]
sequence = before | parallel | after
if isasyncgenfunction(other_thing):
@RunnableLambda # type: ignore[arg-type]
async def parent(a: int) -> int:
return await sequence.ainvoke(a)
else:
@RunnableLambda
def parent(a: int) -> int:
return sequence.invoke(a)
return parent
def _check_posts(self) -> None:
posts = _get_posts(self.tracer.client)
name_order = [
"parent",
"RunnableSequence",
"before",
"RunnableParallel<chain_result,other_thing>",
["my_child_function", "other_thing"],
"after",
]
expected_parents = {
"parent": None,
"RunnableSequence": "parent",
"before": "RunnableSequence",
"RunnableParallel<chain_result,other_thing>": "RunnableSequence",
"my_child_function": "RunnableParallel<chain_result,other_thing>",
"other_thing": "RunnableParallel<chain_result,other_thing>",
"after": "RunnableSequence",
}
assert len(posts) == sum(
1 if isinstance(n, str) else len(n) for n in name_order
)
prev_dotted_order = None
dotted_order_map = {}
id_map = {}
parent_id_map = {}
i = 0
for name in name_order:
if isinstance(name, list):
for n in name:
matching_post = next(
p for p in posts[i : i + len(name)] if p["name"] == n
)
assert matching_post
dotted_order = matching_post["dotted_order"]
if prev_dotted_order is not None:
assert dotted_order > prev_dotted_order
dotted_order_map[n] = dotted_order
id_map[n] = matching_post["id"]
parent_id_map[n] = matching_post.get("parent_run_id")
i += len(name)
continue
assert posts[i]["name"] == name
dotted_order = posts[i]["dotted_order"]
if prev_dotted_order is not None and not str(
expected_parents[name] # type: ignore[index]
).startswith("RunnableParallel"):
assert dotted_order > prev_dotted_order, (
f"{name} not after {name_order[i - 1]}"
)
prev_dotted_order = dotted_order
if name in dotted_order_map:
msg = f"Duplicate name {name}"
raise ValueError(msg)
dotted_order_map[name] = dotted_order
id_map[name] = posts[i]["id"]
parent_id_map[name] = posts[i].get("parent_run_id")
i += 1
# Now check the dotted orders
for name, parent_ in expected_parents.items():
dotted_order = dotted_order_map[name]
if parent_ is not None:
parent_dotted_order = dotted_order_map[parent_]
assert dotted_order.startswith(parent_dotted_order), (
f"{name}, {parent_dotted_order} not in {dotted_order}"
)
assert str(parent_id_map[name]) == str(id_map[parent_])
else:
assert dotted_order.split(".")[0] == dotted_order
@pytest.mark.parametrize(
"method",
[
lambda parent, cb: parent.invoke(1, {"callbacks": cb}),
lambda parent, cb: list(parent.stream(1, {"callbacks": cb}))[-1],
lambda parent, cb: parent.batch([1], {"callbacks": cb})[0],
],
ids=["invoke", "stream", "batch"],
)
def test_sync(
self, method: Callable[[RunnableLambda, list[BaseCallbackHandler]], int]
) -> None:
def other_thing(_: int) -> Generator[int, None, None]:
yield 1
parent = self._create_parent(other_thing)
# Now run the chain and check the resulting posts
assert method(parent, [self.tracer]) == 3
self._check_posts()
@staticmethod
async def ainvoke(parent: RunnableLambda, cb: list[BaseCallbackHandler]) -> int:
return await parent.ainvoke(1, {"callbacks": cb})
@staticmethod
async def astream(parent: RunnableLambda, cb: list[BaseCallbackHandler]) -> int:
return [res async for res in parent.astream(1, {"callbacks": cb})][-1]
@staticmethod
async def abatch(parent: RunnableLambda, cb: list[BaseCallbackHandler]) -> int:
return (await parent.abatch([1], {"callbacks": cb}))[0]
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="Asyncio context vars require Python 3.11+"
)
@pytest.mark.parametrize("method", [ainvoke, astream, abatch])
async def test_async(
self,
method: Callable[
[RunnableLambda, list[BaseCallbackHandler]], Coroutine[Any, Any, int]
],
) -> None:
async def other_thing(_: int) -> AsyncGenerator[int, None]:
yield 1
parent = self._create_parent(other_thing)
# Now run the chain and check the resulting posts
assert await method(parent, [self.tracer]) == 3
self._check_posts()
@pytest.mark.parametrize("parent_type", ["ls", "lc"])
def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None:
mock_session = MagicMock()
mock_client_ = Client(
session=mock_session, api_key="test", auto_batch_tracing=False
)
@traceable
def kitten(x: str) -> str:
return x
@RunnableLambda
def grandchild(x: str) -> str:
return kitten(x)
@RunnableLambda
def child(x: str) -> str:
return grandchild.invoke(x)
rid = uuid.uuid4()
with tracing_context(
client=mock_client_,
enabled=True,
metadata={"some_foo": "some_bar"},
tags=["afoo"],
):
collected: dict[str, RunTree] = {}
def collect_run(run: RunTree) -> None:
collected[str(run.id)] = run
if parent_type == "ls":
@traceable
def parent() -> str:
return child.invoke("foo")
assert (
parent(langsmith_extra={"on_end": collect_run, "run_id": rid}) == "foo"
)
assert collected
else:
@RunnableLambda
def parent(_: Any) -> str:
return child.invoke("foo")
tracer = LangChainTracer()
tracer._persist_run = collect_run # type: ignore[method-assign]
assert parent.invoke(..., {"run_id": rid, "callbacks": [tracer]}) == "foo" # type: ignore[attr-defined]
run = collected.get(str(rid))
assert run is not None
assert run.name == "parent"
assert run.child_runs
child_run = run.child_runs[0]
assert child_run.name == "child"
assert child_run.child_runs
grandchild_run = child_run.child_runs[0]
assert grandchild_run.name == "grandchild"
assert grandchild_run.child_runs
assert grandchild_run.metadata.get("some_foo") == "some_bar"
assert "afoo" in grandchild_run.tags # type: ignore[operator]
kitten_run = grandchild_run.child_runs[0]
assert kitten_run.name == "kitten"
assert not kitten_run.child_runs
assert kitten_run.metadata.get("some_foo") == "some_bar"
assert "afoo" in kitten_run.tags # type: ignore[operator]
| TestRunnableSequenceParallelTraceNesting |
python | apache__airflow | airflow-core/src/airflow/models/dagwarning.py | {
"start": 3567,
"end": 3831
} | class ____(str, Enum):
"""
Enum for DAG warning types.
This is the set of allowable values for the ``warning_type`` field
in the DagWarning model.
"""
ASSET_CONFLICT = "asset conflict"
NONEXISTENT_POOL = "non-existent pool"
| DagWarningType |
python | tensorflow__tensorflow | tensorflow/compiler/tests/quantized_ops_test.py | {
"start": 1186,
"end": 1767
} | class ____(xla_test.XLATestCase):
# Verify that quantized types can be clustered by XLA.
def testQuantizedTypeRoundtrip(self):
with self.session() as session:
for dtype in self.quantized_tf_types:
in_values = np.array([1, 2, 3, 4, 5, 6])
expected = [[1, 2], [3, 4], [5, 6]]
with self.test_scope():
p = array_ops.placeholder(dtype=dtypes.int32)
x = math_ops.cast(p, dtype)
x = array_ops.reshape(x, [3, 2])
value = session.run(x, {p: in_values})
self.assertAllEqual(value, expected)
| QuantizedOpsTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 11704,
"end": 14884
} | class ____(OrganizationEventsSpansEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
serializer = SpanSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
serialized = serializer.validated_data
span = serialized["span"]
def get_event_stats(
query_columns: list[str],
query: str,
snuba_params: SnubaParams,
rollup: int,
zerofill_results: bool,
comparison_delta: timedelta | None = None,
) -> SnubaTSResult:
with sentry_sdk.start_span(op="discover.discover", name="timeseries.filter_transform"):
builder = TimeseriesQueryBuilder(
Dataset.Discover,
{},
rollup,
snuba_params=snuba_params,
query=query,
selected_columns=query_columns,
config=QueryBuilderConfig(
functions_acl=["array_join", "percentileArray", "sumArray"],
),
)
span_op_column = builder.resolve_function("array_join(spans_op)")
span_group_column = builder.resolve_function("array_join(spans_group)")
# Adding spans.op and spans.group to the group by because
# We need them in the query to help the array join optimizer
# in snuba take effect but the TimeseriesQueryBuilder
# removes all non aggregates from the select clause.
builder.groupby.extend([span_op_column, span_group_column])
builder.add_conditions(
[
Condition(
Function("tuple", [span_op_column, span_group_column]),
Op.IN,
Function("tuple", [Function("tuple", [span.op, span.group])]),
),
]
)
snql_query = builder.get_snql_query()
results = raw_snql_query(
snql_query, "api.organization-events-spans-performance-stats"
)
with sentry_sdk.start_span(op="discover.discover", name="timeseries.transform_results"):
result = discover.zerofill(
results["data"],
snuba_params.start_date,
snuba_params.end_date,
rollup,
["time"],
)
return SnubaTSResult(
{"data": result}, snuba_params.start_date, snuba_params.end_date, rollup
)
return Response(
self.get_event_stats_data(
request,
organization,
get_event_stats,
query_column="sumArray(spans_exclusive_time)",
),
status=200,
)
@dataclasses.dataclass(frozen=True)
| OrganizationEventsSpansStatsEndpoint |
python | ApeWorX__ape | tests/functional/geth/test_provider.py | {
"start": 32544,
"end": 37814
} | class ____:
"""
Tests targeting the process-starter directly.
"""
@pytest.fixture
def ignore_bin_check(self, mocker):
# Trick py- into thinking reth is available even when it isn't.
is_exec_check_patch = mocker.patch("geth.wrapper.is_executable_available")
is_exec_check_patch.return_value = True
@geth_process_test
def test_from_uri_http(self, data_folder):
geth_dev = GethDevProcess.from_uri("http://localhost:6799", data_folder)
kwargs = geth_dev.geth_kwargs
assert kwargs["rpc_addr"] == "localhost"
assert kwargs["rpc_port"] == "6799"
assert kwargs["ws_enabled"] is False
assert kwargs.get("ws_api") is None
assert kwargs.get("ws_addr") is None
assert kwargs.get("ws_port") is None
@geth_process_test
def test_from_uri_ws(self, data_folder):
geth_dev = GethDevProcess.from_uri("ws://localhost:6799", data_folder)
kwargs = geth_dev.geth_kwargs
assert kwargs.get("rpc_addr") is None
assert kwargs["ws_enabled"] is True
assert kwargs["ws_addr"] == "localhost"
assert kwargs["ws_port"] == "6799"
@geth_process_test
def test_from_uri_ipc(self, data_folder):
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
kwargs = geth_dev.geth_kwargs
assert kwargs["ipc_path"] == "path/to/geth.ipc"
assert kwargs.get("ws_api") is None
assert kwargs.get("ws_addr") is None
assert kwargs.get("rpc_addr") is None
@geth_process_test
def test_block_period(self, data_folder):
geth_dev = GethDevProcess.from_uri(
"path/to/geth.ipc",
data_folder,
block_time=1,
generate_accounts=False,
initialize_chain=False,
)
assert geth_dev.geth_kwargs["dev_period"] == "1"
@geth_process_test
def test_is_rpc_ready_false(self, mocker, data_folder):
"""
Both Geth and Reth nodes raise simple URLError when the node is not running.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.side_effect = URLError("Unable to connect")
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert not geth_dev.is_rpc_ready
@geth_process_test
def test_is_rpc_ready_true_geth(self, mocker, data_folder):
"""
Geth has no error when the RPC is ready.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.return_value = None
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert geth_dev.is_rpc_ready
@geth_process_test
def test_is_rpc_ready_true_reth(self, mocker, data_folder):
"""
Reth raises HTTPError("Method not found") when the RPC is ready.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.side_effect = HTTPError("127.0.0.1", 404, "method not found", 0, 0) # type: ignore
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert geth_dev.is_rpc_ready
@geth_process_test
def test_command_reth(self, mocker, data_folder, ignore_bin_check):
"""
Showing we get usable kwargs for a reth --dev node.
"""
reth_dev = GethDevProcess.from_uri(
"path/to/reth.ipc", data_folder, executable=["reth", "node"], verify_bin=False
)
actual = reth_dev.command
assert "reth" in actual
assert "node" in actual
assert "--http.port" in actual
assert "--dev" in actual
# Geth only
assert "localhost" not in actual
assert "--maxpeers" not in actual
assert "--password" not in actual
assert "--nodiscover" not in actual
assert "--networkid" not in actual
@geth_process_test
def test_ipc_path_geth(self, data_folder):
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert geth_dev.ipc_path.endswith("geth.ipc")
assert geth_dev.geth_kwargs["ipc_path"].endswith("geth.ipc")
@geth_process_test
def test_ipc_path_reth(self, data_folder, ignore_bin_check):
reth_dev = GethDevProcess.from_uri(
"path/to/reth.ipc", data_folder, executable=["reth", "node"], verify_bin=False
)
assert reth_dev.ipc_path.endswith("reth.ipc")
assert reth_dev.geth_kwargs["ipc_path"].endswith("reth.ipc")
@geth_process_test
def test_rpc_api_geth(self, data_folder):
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
actual = set(geth_dev.geth_kwargs["rpc_api"].split(","))
expected = {"admin", "debug", "eth", "net", "txpool", "web3"}
assert actual == expected
@geth_process_test
def test_rpc_api_reth(self, data_folder, ignore_bin_check):
reth_dev = GethDevProcess.from_uri(
"path/to/reth.ipc", data_folder, executable=["reth", "node"], verify_bin=False
)
actual = set(reth_dev.geth_kwargs["rpc_api"].split(","))
expected = {"admin", "debug", "eth", "net", "txpool", "web3", "mev"}
assert actual == expected
| TestGethDevProcess |
python | getsentry__sentry | src/sentry/api/authentication.py | {
"start": 9223,
"end": 10357
} | class ____(QuietBasicAuthentication):
token_name = b"basic"
def accepts_auth(self, auth: list[bytes]) -> bool:
return bool(auth) and auth[0].lower() == self.token_name
def authenticate_credentials(self, userid, password, request=None):
# We don't use request, but it needs to be passed through to DRF 3.7+.
if password:
return None
key: ApiKeyReplica | ApiKey
if SiloMode.get_current_mode() == SiloMode.REGION:
key_replica = ApiKeyReplica.objects.filter(key=userid).last()
if key_replica is None:
raise AuthenticationFailed("API key is not valid")
else:
key = key_replica
else:
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed("API key is not valid")
if not key.is_active:
raise AuthenticationFailed("Key is disabled")
return self.transform_auth(None, key, "api_key")
@AuthenticationSiloLimit(SiloMode.CONTROL, SiloMode.REGION)
| ApiKeyAuthentication |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 12265,
"end": 12397
} | class ____(Enum):
apiKey = "apiKey"
http = "http"
oauth2 = "oauth2"
openIdConnect = "openIdConnect"
| SecuritySchemeType |
python | kamyu104__LeetCode-Solutions | Python/reorder-routes-to-make-all-paths-lead-to-the-city-zero.py | {
"start": 762,
"end": 1433
} | class ____(object):
def minReorder(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: int
"""
def dfs(n, lookup, graph, parent, u):
result = (parent*n+u in lookup)
for v in graph[u]:
if v == parent:
continue
result += dfs(n, lookup, graph, u, v)
return result
lookup, graph = set(), collections.defaultdict(list)
for u, v in connections:
lookup.add(u*n+v)
graph[v].append(u)
graph[u].append(v)
return dfs(n, lookup, graph, -1, 0)
| Solution2 |
python | django__django | django/db/models/functions/datetime.py | {
"start": 4549,
"end": 4602
} | class ____(Extract):
lookup_name = "day"
| ExtractDay |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/private1.py | {
"start": 489,
"end": 659
} | class ____(TestClass):
def blah(self):
return self._prot1
def blah2(self):
# This should generate an error
return self.__priv1
| TestSubclass |
python | django-extensions__django-extensions | django_extensions/collision_resolvers.py | {
"start": 1360,
"end": 2393
} | class ____(LegacyCR, metaclass=ABCMeta):
APP_PRIORITIES = None # type: List[str]
def resolve_collisions(self, namespace):
assert self.APP_PRIORITIES is not None, (
"You must define APP_PRIORITIES in your resolver class!"
)
result = {}
for name, models in namespace.items():
if len(models) > 0:
sorted_models = self._sort_models_depending_on_priorities(models)
result[name] = sorted_models[0][1]
return result
def _sort_models_depending_on_priorities(self, models): # type: (List[str]) -> List[Tuple[int, str]]
models_with_priorities = []
for model in models:
try:
app_name, _ = self.get_app_name_and_model(model)
position = self.APP_PRIORITIES.index(app_name)
except (ImportError, ValueError):
position = sys.maxsize
models_with_priorities.append((position, model))
return sorted(models_with_priorities)
| AppsOrderCR |
python | django__django | tests/csrf_tests/tests.py | {
"start": 52862,
"end": 58130
} | class ____(CsrfViewMiddlewareTestMixin, SimpleTestCase):
"""
CSRF tests with CSRF_USE_SESSIONS=True.
"""
def _set_csrf_cookie(self, req, cookie):
req.session[CSRF_SESSION_KEY] = cookie
def _read_csrf_cookie(self, req, resp=None):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if CSRF_SESSION_KEY not in req.session:
return False
return req.session[CSRF_SESSION_KEY]
def _get_cookies_set(self, req, resp):
return req.session._cookies_set
def test_no_session_on_request(self):
msg = (
"CSRF_USE_SESSIONS is enabled, but request.session is not set. "
"SessionMiddleware must appear before CsrfViewMiddleware in MIDDLEWARE."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
mw = CsrfViewMiddleware(lambda req: HttpResponse())
mw.process_request(HttpRequest())
def test_masked_unmasked_combinations(self):
"""
Masked and unmasked tokens are allowed both as POST and as the
X-CSRFToken header.
"""
cases = [
# Bare secrets are not allowed when CSRF_USE_SESSIONS=True.
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=post_token,
meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_process_response_get_token_used(self):
"""The ensure_csrf_cookie() decorator works without middleware."""
req = self._get_request()
ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
def test_session_modify(self):
"""The session isn't saved if the CSRF cookie is unchanged."""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
req.session.modified = False
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertFalse(req.session.modified)
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
SESSION_COOKIE_DOMAIN=".example.com",
USE_X_FORWARDED_PORT=True,
DEBUG=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], SESSION_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], SESSION_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(SESSION_COOKIE_DOMAIN=".example.com", DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "http://example.com/"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - Referer is insecure while host is secure.",
status_code=403,
)
@override_settings(ROOT_URLCONF="csrf_tests.csrf_token_error_handler_urls", DEBUG=False)
| CsrfViewMiddlewareUseSessionsTests |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/jit/rpc_test.py | {
"start": 10359,
"end": 10509
} | class ____:
def __init__(self, a: int):
self.a = a
def get_value(self) -> int:
return self.a
@torch.jit.interface
| MyScriptClass |
python | dask__distributed | distributed/batched.py | {
"start": 363,
"end": 7341
} | class ____:
"""Batch messages in batches on a stream
This takes an IOStream and an interval (in ms) and ensures that we send no
more than one message every interval milliseconds. We send lists of
messages.
Batching several messages at once helps performance when sending
a myriad of tiny messages.
Examples
--------
>>> stream = await connect(address)
>>> bstream = BatchedSend(interval='10 ms')
>>> bstream.start(stream)
>>> bstream.send('Hello,')
>>> bstream.send('world!')
On the other side, the recipient will get a message like the following::
['Hello,', 'world!']
"""
# XXX why doesn't BatchedSend follow either the IOStream or Comm API?
def __init__(self, interval, loop=None, serializers=None):
# XXX is the loop arg useful?
self.loop = loop or IOLoop.current()
self.interval = parse_timedelta(interval, default="ms")
self.waker = locks.Event()
self.stopped = locks.Event()
self.please_stop = False
self.buffer = []
self.comm = None
self.message_count = 0
self.batch_count = 0
self.byte_count = 0
self.next_deadline = None
self.recent_message_log = deque(
maxlen=dask.config.get("distributed.admin.low-level-log-length")
)
self.serializers = serializers
self._consecutive_failures = 0
def start(self, comm):
self.comm = comm
self.loop.add_callback(self._background_send)
def closed(self):
return self.comm and self.comm.closed()
def __repr__(self):
if self.closed():
return "<BatchedSend: closed>"
else:
return "<BatchedSend: %d in buffer>" % len(self.buffer)
__str__ = __repr__
@gen.coroutine
def _background_send(self):
while not self.please_stop:
try:
yield self.waker.wait(self.next_deadline)
self.waker.clear()
except gen.TimeoutError:
pass
if not self.buffer:
# Nothing to send
self.next_deadline = None
continue
if self.next_deadline is not None and time() < self.next_deadline:
# Send interval not expired yet
continue
payload, self.buffer = self.buffer, []
self.batch_count += 1
self.next_deadline = time() + self.interval
try:
# NOTE: Since `BatchedSend` doesn't have a handle on the running
# `_background_send` coroutine, the only thing with a reference to this
# coroutine is the event loop itself. If the event loop stops while
# we're waiting on a `write`, the `_background_send` coroutine object
# may be garbage collected. If that happens, the `yield coro` will raise
# `GeneratorExit`. But because this is an old-school `gen.coroutine`,
# and we're using `yield` and not `await`, the `write` coroutine object
# will not actually have been awaited, and it will remain sitting around
# for someone to retrieve it. At interpreter exit, this will warn
# something like `RuntimeWarning: coroutine 'TCP.write' was never
# awaited`. By using the `closing` contextmanager, the `write` coroutine
# object is always cleaned up, even if `yield` raises `GeneratorExit`.
with contextlib.closing(
self.comm.write(
payload, serializers=self.serializers, on_error="raise"
)
) as coro:
nbytes = yield coro
if nbytes < 1e6:
self.recent_message_log.append(payload)
else:
self.recent_message_log.append("large-message")
self.byte_count += nbytes
except CommClosedError:
logger.info("Batched Comm Closed %r", self.comm, exc_info=True)
break
except Exception:
# We cannot safely retry self.comm.write, as we have no idea
# what (if anything) was actually written to the underlying stream.
# Re-writing messages could result in complete garbage (e.g. if a frame
# header has been written, but not the frame payload), therefore
# the only safe thing to do here is to abort the stream without
# any attempt to re-try `write`.
logger.exception("Error in batched write")
break
finally:
payload = None # lose ref
else:
# nobreak. We've been gracefully closed.
self.stopped.set()
return
# If we've reached here, it means `break` was hit above and
# there was an exception when using `comm`.
# We can't close gracefully via `.close()` since we can't send messages.
# So we just abort.
# This means that any messages in our buffer our lost.
# To propagate exceptions, we rely on subsequent `BatchedSend.send`
# calls to raise CommClosedErrors.
self.stopped.set()
self.abort()
def send(self, *msgs: Any) -> None:
"""Schedule a message for sending to the other side
This completes quickly and synchronously
"""
if self.comm is not None and self.comm.closed():
raise CommClosedError(f"Comm {self.comm!r} already closed.")
self.message_count += len(msgs)
self.buffer.extend(msgs)
# Avoid spurious wakeups if possible
if self.next_deadline is None:
self.waker.set()
@gen.coroutine
def close(self, timeout=None):
"""Flush existing messages and then close comm
If set, raises `tornado.util.TimeoutError` after a timeout.
"""
if self.comm is None:
return
self.please_stop = True
self.waker.set()
yield self.stopped.wait(timeout=timeout)
if not self.comm.closed():
try:
if self.buffer:
self.buffer, payload = [], self.buffer
# See note in `_background_send` for explanation of `closing`.
with contextlib.closing(
self.comm.write(
payload, serializers=self.serializers, on_error="raise"
)
) as coro:
yield coro
except CommClosedError:
pass
yield self.comm.close()
def abort(self):
if self.comm is None:
return
self.please_stop = True
self.buffer = []
self.waker.set()
if not self.comm.closed():
self.comm.abort()
| BatchedSend |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 2819,
"end": 2930
} | class ____(Contra_TA[Co_TA[Contra_TA[T_contra]]]): ...
# This should generate an error.
| CoToContraToContra_WithTA |
python | MongoEngine__mongoengine | docs/code/tumblelog.py | {
"start": 302,
"end": 574
} | class ____(Document):
title = StringField(max_length=120, required=True)
author = ReferenceField(User)
tags = ListField(StringField(max_length=30))
comments = ListField(EmbeddedDocumentField(Comment))
# bugfix
meta = {"allow_inheritance": True}
| Post |
python | pytorch__pytorch | benchmarks/tensorexpr/concat.py | {
"start": 2153,
"end": 4203
} | class ____(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
self.I2_D2 = I2_D2
self.concat_dim = concat_dim
self.input1 = self.randn(
[I1_D1, I1_D2], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.input2 = self.randn(
[I2_D1, I2_D2], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [self.input1, self.input2]
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_cat_wo_conditionals(True)
def forward(self, input1, input2):
x1 = self.add(input1, 0.00001)
x2 = self.add(input2, 0.00001)
y = self.cat((x1, x2), dim=self.concat_dim)
z = self.relu(y)
return z
def reference(self):
return np.concatenate(
(self.numpy(self.input1), self.numpy(self.input2)),
axis=self.concat_dim,
)
def config(self):
return [self.I1_D1, self.I1_D2, self.I2_D1, self.I2_D2, self.concat_dim]
@staticmethod
def module():
return "concatGraphOpt"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.I1_D1 * self.I1_D2 + self.I2_D1 * self.I2_D2
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [
[1 << 13, 1060, 1 << 13, 1040, 1],
[1 << 13, 2000, 1 << 13, 1074, 1],
[1 << 15, 1060, 1 << 15, 2670, 1],
[1 << 15, 5120, 1 << 15, 2512, 1],
]
benchmark.register_benchmark_class(ConcatGraphOptBench)
| ConcatGraphOptBench |
python | simplejson__simplejson | simplejson/tests/test_decode.py | {
"start": 213,
"end": 434
} | class ____(binary_type):
def decode(self, encoding=None):
return "bad decode"
def __str__(self):
return "bad __str__"
def __bytes__(self):
return b("bad __bytes__")
| MisbehavingBytesSubtype |
python | Textualize__textual | examples/theme_sandbox.py | {
"start": 1989,
"end": 2169
} | class ____(OptionList):
def on_mount(self) -> None:
self.add_options(
[Option(name, id=name) for name in self.app.available_themes.keys()]
)
| ThemeList |
python | mlflow__mlflow | tests/pyfunc/sample_code/code_with_dependencies.py | {
"start": 582,
"end": 1241
} | class ____(PythonModel):
def _call_retriever(self, id):
return f"Retriever called with ID: {id}. Output: 42."
@mlflow.trace
def predict(self, context, model_input):
return f"Input: {model_input}. {self._call_retriever(model_input)}"
@mlflow.trace
def predict_stream(self, context, model_input, params=None):
yield f"Input: {model_input}. {self._call_retriever(model_input)}"
model = MyModelWithTrace() if test_trace else MyModel()
set_model(model)
set_retriever_schema(
primary_key="primary-key",
text_column="text-column",
doc_uri="doc-uri",
other_columns=["column1", "column2"],
)
| MyModelWithTrace |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1050479,
"end": 1053076
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"check_suite",
"created_at",
"database_id",
"deployment_reviews",
"pending_deployment_requests",
"resource_path",
"run_number",
"updated_at",
"url",
"workflow",
)
check_suite = sgqlc.types.Field(
sgqlc.types.non_null(CheckSuite), graphql_name="checkSuite"
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
deployment_reviews = sgqlc.types.Field(
sgqlc.types.non_null(DeploymentReviewConnection),
graphql_name="deploymentReviews",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pending_deployment_requests = sgqlc.types.Field(
sgqlc.types.non_null(DeploymentRequestConnection),
graphql_name="pendingDeploymentRequests",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
run_number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="runNumber")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
workflow = sgqlc.types.Field(
sgqlc.types.non_null(Workflow), graphql_name="workflow"
)
########################################################################
# Unions
########################################################################
| WorkflowRun |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/mutate_self.py | {
"start": 318,
"end": 2307
} | class ____:
def __init__(self) -> None:
self.foo: str = ""
self.bar: str = ""
def foo_sink(self) -> None:
_test_sink(self.foo)
def bar_sink(self) -> None:
_test_sink(self.bar)
def mutates_foo(self) -> None:
self.foo = _test_source()
def mutates_foo_with_hop(self) -> None:
self.mutates_foo()
def isssue_mutates_foo(self) -> None:
self.mutates_foo()
self.foo_sink() # Issue.
self.bar_sink() # Not an issue.
def issue_mutates_foo_with_hop(self) -> None:
self.mutates_foo_with_hop()
self.foo_sink() # Issue.
self.bar_sink() # Not an issue.
def mutates_foo_and_returns(self) -> "Base":
self.foo = _test_source()
return self
def issue_mutates_and_returns(self) -> None:
self.mutates_foo_and_returns().foo_sink() # Issue.
# pyre-ignore[47]: Self is a valid type
def mutates_foo_self_annotation(self: Self) -> None:
self.foo = _test_source()
def issue_mutates_foo_self_annotation(self) -> None:
self.mutates_foo_self_annotation()
self.foo_sink() # Issue.
self.bar_sink() # Not an issue.
def mutates_foo_self_typevar(self: MySelf) -> None:
self.foo = _test_source()
def issue_mutates_foo_self_typevar(self) -> None:
self.mutates_foo_self_typevar()
self.foo_sink() # Issue.
self.bar_sink() # Not an issue.
def issue_mutates_foo_instance() -> None:
b = Base()
b.mutates_foo()
b.foo_sink() # Issue.
b.bar_sink() # Not an issue.
def issue_mutates_foo_and_returns_instance() -> None:
Base().mutates_foo_and_returns().foo_sink() # Issue.
def free_function_mutates_self(self: Base) -> None:
self.foo = _test_source()
def issue_free_function() -> None:
b = Base()
free_function_mutates_self(b)
b.foo_sink() # TODO(T171333442): Issue, currently a false negative.
b.bar_sink() # Not an issue.
| Base |
python | pypa__setuptools | setuptools/_vendor/backports/tarfile/__init__.py | {
"start": 23403,
"end": 23763
} | class ____(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.name, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#-----------------------------
# extraction filters (PEP 706)
#-----------------------------
| ExFileObject |
python | celery__celery | celery/beat.py | {
"start": 2126,
"end": 6520
} | class ____:
"""An entry in the scheduler.
Arguments:
name (str): see :attr:`name`.
schedule (~celery.schedules.schedule): see :attr:`schedule`.
args (Tuple): see :attr:`args`.
kwargs (Dict): see :attr:`kwargs`.
options (Dict): see :attr:`options`.
last_run_at (~datetime.datetime): see :attr:`last_run_at`.
total_run_count (int): see :attr:`total_run_count`.
relative (bool): Is the time relative to when the server starts?
"""
#: The task name
name = None
#: The schedule (:class:`~celery.schedules.schedule`)
schedule = None
#: Positional arguments to apply.
args = None
#: Keyword arguments to apply.
kwargs = None
#: Task execution options.
options = None
#: The time and date of when this task was last scheduled.
last_run_at = None
#: Total number of times this task has been scheduled.
total_run_count = 0
def __init__(self, name=None, task=None, last_run_at=None,
total_run_count=None, schedule=None, args=(), kwargs=None,
options=None, relative=False, app=None):
self.app = app
self.name = name
self.task = task
self.args = args
self.kwargs = kwargs if kwargs else {}
self.options = options if options else {}
self.schedule = maybe_schedule(schedule, relative, app=self.app)
self.last_run_at = last_run_at or self.default_now()
self.total_run_count = total_run_count or 0
def default_now(self):
return self.schedule.now() if self.schedule else self.app.now()
_default_now = default_now # compat
def _next_instance(self, last_run_at=None):
"""Return new instance, with date and count fields updated."""
return self.__class__(**dict(
self,
last_run_at=last_run_at or self.default_now(),
total_run_count=self.total_run_count + 1,
))
__next__ = next = _next_instance # for 2to3
def __reduce__(self):
return self.__class__, (
self.name, self.task, self.last_run_at, self.total_run_count,
self.schedule, self.args, self.kwargs, self.options,
)
def update(self, other):
"""Update values from another entry.
Will only update "editable" fields:
``task``, ``schedule``, ``args``, ``kwargs``, ``options``.
"""
self.__dict__.update({
'task': other.task, 'schedule': other.schedule,
'args': other.args, 'kwargs': other.kwargs,
'options': other.options,
})
def is_due(self):
"""See :meth:`~celery.schedules.schedule.is_due`."""
return self.schedule.is_due(self.last_run_at)
def __iter__(self):
return iter(vars(self).items())
def __repr__(self):
return '<{name}: {0.name} {call} {0.schedule}'.format(
self,
call=reprcall(self.task, self.args or (), self.kwargs or {}),
name=type(self).__name__,
)
def __lt__(self, other):
if isinstance(other, ScheduleEntry):
# How the object is ordered doesn't really matter, as
# in the scheduler heap, the order is decided by the
# preceding members of the tuple ``(time, priority, entry)``.
#
# If all that's left to order on is the entry then it can
# just as well be random.
return id(self) < id(other)
return NotImplemented
def editable_fields_equal(self, other):
for attr in ('task', 'args', 'kwargs', 'options', 'schedule'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __eq__(self, other):
"""Test schedule entries equality.
Will only compare "editable" fields:
``task``, ``schedule``, ``args``, ``kwargs``, ``options``.
"""
return self.editable_fields_equal(other)
def _evaluate_entry_args(entry_args):
if not entry_args:
return []
return [
v() if isinstance(v, BeatLazyFunc) else v
for v in entry_args
]
def _evaluate_entry_kwargs(entry_kwargs):
if not entry_kwargs:
return {}
return {
k: v() if isinstance(v, BeatLazyFunc) else v
for k, v in entry_kwargs.items()
}
| ScheduleEntry |
python | wandb__wandb | wandb/sdk/lib/auth/auth.py | {
"start": 547,
"end": 1149
} | class ____(abc.ABC):
"""Credentials that give access to a W&B server."""
@abc.abstractmethod
def __init__(self, *, host: str | HostUrl) -> None:
if isinstance(host, str):
host = HostUrl(host)
self._host = host
@property
def host(self) -> HostUrl:
"""The W&B server for which the credentials are valid."""
return self._host
@final
@override
def __repr__(self) -> str:
return f"<{type(self).__name__} host={self.host.url!r}>"
@final
@override
def __str__(self) -> str:
return repr(self)
@final
| Auth |
python | great-expectations__great_expectations | great_expectations/core/partitioners.py | {
"start": 519,
"end": 744
} | class ____(pydantic.BaseModel):
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_year_and_month_and_day"] = (
"partition_on_year_and_month_and_day"
)
| ColumnPartitionerDaily |
python | kamyu104__LeetCode-Solutions | Python/closest-binary-search-tree-value.py | {
"start": 29,
"end": 600
} | class ____(object):
def closestValue(self, root, target):
"""
:type root: TreeNode
:type target: float
:rtype: int
"""
gap = float("inf")
closest = float("inf")
while root:
if abs(root.val - target) < gap:
gap = abs(root.val - target)
closest = root.val
if target == root.val:
break
elif target < root.val:
root = root.left
else:
root = root.right
return closest
| Solution |
python | Textualize__textual | src/textual/containers.py | {
"start": 4850,
"end": 5109
} | class ____(Widget):
"""An expanding container with horizontal layout and no scrollbars."""
DEFAULT_CSS = """
Horizontal {
width: 1fr;
height: 1fr;
layout: horizontal;
overflow: hidden hidden;
}
"""
| Horizontal |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/secret_tests/test_business_logic.py | {
"start": 8948,
"end": 16641
} | class ____:
"""Test processing of secret data structures.
This class tests any pure functions that process the raw GraphQL responses
into our domain models and data model creation.
"""
def test_secret_creation_with_all_scopes(self, snapshot):
"""Test creating secrets with all possible scope combinations."""
secrets = []
# Test different scope combinations
scope_combinations = [
{"full_deployment_scope": True, "all_branch_deployments_scope": False},
{"full_deployment_scope": False, "all_branch_deployments_scope": True},
{
"full_deployment_scope": False,
"all_branch_deployments_scope": False,
"specific_branch_deployment_scope": "main",
},
{"local_deployment_scope": True},
{
"full_deployment_scope": True,
"all_branch_deployments_scope": True,
"local_deployment_scope": True,
},
]
for i, scopes in enumerate(scope_combinations):
secret = DgApiSecret(
id=f"scope-test-secret-{i}",
secretName=f"scope_test_secret_{i}",
secretValue=f"test_value_{i}",
locationNames=[f"location_{i}"],
fullDeploymentScope=scopes.get("full_deployment_scope", False),
allBranchDeploymentsScope=scopes.get("all_branch_deployments_scope", False),
specificBranchDeploymentScope=scopes.get("specific_branch_deployment_scope", None),
localDeploymentScope=scopes.get("local_deployment_scope", False),
canViewSecretValue=True,
canEditSecret=True,
updatedBy=DgApiUpdatedBy(email=f"user{i}@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 20, i),
)
secrets.append(secret)
secret_list = DgApiSecretList(items=secrets, total=len(secrets))
# Test JSON serialization works correctly for all scopes
result = secret_list.model_dump_json(indent=2)
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_secret_updated_by_handling(self):
"""Test secret updated_by entry creation and access."""
secret = DgApiSecret(
id="updated-by-test-secret",
secretName="updated_by_test",
secretValue="test_value",
locationNames=["test_location"],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=DgApiUpdatedBy(email="test@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 20, 0),
)
assert secret.updated_by is not None
assert secret.updated_by.email == "test@company.com"
assert secret.update_timestamp == datetime(2022, 1, 1, 14, 20, 0)
def test_secret_list_total_count(self):
"""Test that SecretList properly tracks total count."""
secrets = [
DgApiSecret(
id=f"secret-{i}",
secretName=f"secret_{i}",
secretValue=f"value_{i}",
locationNames=[],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=None,
updateTimestamp=None,
)
for i in range(3)
]
secret_list = DgApiSecretList(
items=secrets, total=10
) # Total could be different from items length (pagination)
assert len(secret_list.items) == 3
assert secret_list.total == 10
def test_secret_scopes_input_to_dict(self):
"""Test SecretScopesInput conversion to dict."""
# Test with all fields
scopes_input = DgApiSecretScopesInput(
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope="main",
localDeploymentScope=True,
)
result = scopes_input.to_dict()
expected = {
"fullDeploymentScope": True,
"allBranchDeploymentsScope": False,
"specificBranchDeploymentScope": "main",
"localDeploymentScope": True,
}
assert result == expected
# Test with None values (should be excluded)
scopes_input_sparse = DgApiSecretScopesInput(
fullDeploymentScope=True,
allBranchDeploymentsScope=None,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
)
result_sparse = scopes_input_sparse.to_dict()
expected_sparse = {
"fullDeploymentScope": True,
"localDeploymentScope": False,
}
assert result_sparse == expected_sparse
def test_secret_permissions_combinations(self):
"""Test secret with various permission combinations."""
permission_combos = [
{"can_view_secret_value": True, "can_edit_secret": True},
{"can_view_secret_value": True, "can_edit_secret": False},
{"can_view_secret_value": False, "can_edit_secret": False},
{"can_view_secret_value": False, "can_edit_secret": True}, # Unusual but possible
]
for i, perms in enumerate(permission_combos):
secret = DgApiSecret(
id=f"perm-test-secret-{i}",
secretName=f"permission_test_{i}",
secretValue="test_value" if perms["can_view_secret_value"] else None,
locationNames=[],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=perms["can_view_secret_value"],
canEditSecret=perms["can_edit_secret"],
updatedBy=None,
updateTimestamp=None,
)
assert secret.can_view_secret_value == perms["can_view_secret_value"]
assert secret.can_edit_secret == perms["can_edit_secret"]
def test_secret_value_hiding_in_json_format(self):
"""Test that secret values are properly hidden in JSON output when requested."""
secret = DgApiSecret(
id="value-hiding-test",
secretName="secret_with_value",
secretValue="super_secret_password",
locationNames=["secure_app"],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=None,
updateTimestamp=None,
)
# Test with show_value=False (default) - should hide value
result_hidden = format_secret(secret, as_json=True, show_value=False)
import json
parsed_hidden = json.loads(result_hidden)
assert parsed_hidden["value"] == "<hidden>"
# Test with show_value=True - should show actual value
result_shown = format_secret(secret, as_json=True, show_value=True)
parsed_shown = json.loads(result_shown)
assert parsed_shown["value"] == "super_secret_password"
| TestSecretDataProcessing |
python | sympy__sympy | sympy/core/tests/test_expr.py | {
"start": 7747,
"end": 27314
} | class ____(Basic):
'''Represents a Basic subclass that does not support arithmetic operations'''
pass
def test_cooperative_operations():
'''Tests that Expr uses binary operations cooperatively.
In particular it should be possible for non-Expr classes to override
binary operators like +, - etc when used with Expr instances. This should
work for non-Expr classes whether they are Basic subclasses or not. Also
non-Expr classes that do not define binary operators with Expr should give
TypeError.
'''
# A bunch of instances of Expr subclasses
exprs = [
Expr(),
S.Zero,
S.One,
S.Infinity,
S.NegativeInfinity,
S.ComplexInfinity,
S.Half,
Float(0.5),
Integer(2),
Symbol('x'),
Mul(2, Symbol('x')),
Add(2, Symbol('x')),
Pow(2, Symbol('x')),
]
for e in exprs:
# Test that these classes can override arithmetic operations in
# combination with various Expr types.
for ne in [NonBasic(), NonExpr()]:
results = [
(ne + e, ('+', ne, e)),
(e + ne, ('+', e, ne)),
(ne - e, ('-', ne, e)),
(e - ne, ('-', e, ne)),
(ne * e, ('*', ne, e)),
(e * ne, ('*', e, ne)),
(ne / e, ('/', ne, e)),
(e / ne, ('/', e, ne)),
(ne // e, ('//', ne, e)),
(e // ne, ('//', e, ne)),
(ne % e, ('%', ne, e)),
(e % ne, ('%', e, ne)),
(divmod(ne, e), ('divmod', ne, e)),
(divmod(e, ne), ('divmod', e, ne)),
(ne ** e, ('**', ne, e)),
(e ** ne, ('**', e, ne)),
(e < ne, ('>', ne, e)),
(ne < e, ('<', ne, e)),
(e > ne, ('<', ne, e)),
(ne > e, ('>', ne, e)),
(e <= ne, ('>=', ne, e)),
(ne <= e, ('<=', ne, e)),
(e >= ne, ('<=', ne, e)),
(ne >= e, ('>=', ne, e)),
]
for res, args in results:
assert type(res) is SpecialOp and res.args == args
# These classes do not support binary operators with Expr. Every
# operation should raise in combination with any of the Expr types.
for na in [NonArithmetic(), object()]:
raises(TypeError, lambda : e + na)
raises(TypeError, lambda : na + e)
raises(TypeError, lambda : e - na)
raises(TypeError, lambda : na - e)
raises(TypeError, lambda : e * na)
raises(TypeError, lambda : na * e)
raises(TypeError, lambda : e / na)
raises(TypeError, lambda : na / e)
raises(TypeError, lambda : e // na)
raises(TypeError, lambda : na // e)
raises(TypeError, lambda : e % na)
raises(TypeError, lambda : na % e)
raises(TypeError, lambda : divmod(e, na))
raises(TypeError, lambda : divmod(na, e))
raises(TypeError, lambda : e ** na)
raises(TypeError, lambda : na ** e)
raises(TypeError, lambda : e > na)
raises(TypeError, lambda : na > e)
raises(TypeError, lambda : e < na)
raises(TypeError, lambda : na < e)
raises(TypeError, lambda : e >= na)
raises(TypeError, lambda : na >= e)
raises(TypeError, lambda : e <= na)
raises(TypeError, lambda : na <= e)
def test_relational():
from sympy.core.relational import Lt
assert (pi < 3) is S.false
assert (pi <= 3) is S.false
assert (pi > 3) is S.true
assert (pi >= 3) is S.true
assert (-pi < 3) is S.true
assert (-pi <= 3) is S.true
assert (-pi > 3) is S.false
assert (-pi >= 3) is S.false
r = Symbol('r', real=True)
assert (r - 2 < r - 3) is S.false
assert Lt(x + I, x + I + 2).func == Lt # issue 8288
def test_relational_assumptions():
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=False, real=True)
m2 = Symbol("m2", nonpositive=False, real=True)
m3 = Symbol("m3", positive=False, real=True)
m4 = Symbol("m4", nonnegative=False, real=True)
assert (m1 < 0) is S.false
assert (m2 <= 0) is S.false
assert (m3 > 0) is S.false
assert (m4 >= 0) is S.false
# See https://github.com/sympy/sympy/issues/17708
#def test_relational_noncommutative():
# from sympy import Lt, Gt, Le, Ge
# A, B = symbols('A,B', commutative=False)
# assert (A < B) == Lt(A, B)
# assert (A <= B) == Le(A, B)
# assert (A > B) == Gt(A, B)
# assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) is oo
raises(ValueError, lambda: (x + 1).as_leading_term(1))
# https://github.com/sympy/sympy/issues/21177
e = -3*x + (x + Rational(3, 2) - sqrt(3)*S.ImaginaryUnit/2)**2\
- Rational(3, 2) + 3*sqrt(3)*S.ImaginaryUnit/2
assert e.as_leading_term(x) == -sqrt(3)*I*x
# https://github.com/sympy/sympy/issues/21245
e = 1 - x - x**2
d = (1 + sqrt(5))/2
assert e.subs(x, y + 1/d).as_leading_term(y) == \
(-40*y - 16*sqrt(5)*y)/(16 + 8*sqrt(5))
# https://github.com/sympy/sympy/issues/26991
assert sinh(tanh(3/(100*x))).as_leading_term(x, cdir = 1) == sinh(1)
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == 2*x + pi*x
def test_as_leading_term4():
# see issue 6843
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(DefinedFunction):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_as_leading_term_deriv_integral():
# related to issue 11313
assert Derivative(x ** 3, x).as_leading_term(x) == 3*x**2
assert Derivative(x ** 3, y).as_leading_term(x) == 0
assert Integral(x ** 3, x).as_leading_term(x) == x**4/4
assert Integral(x ** 3, y).as_leading_term(x) == y*x**3
assert Derivative(exp(x), x).as_leading_term(x) == 1
assert Derivative(log(x), x).as_leading_term(x) == (1/x).as_leading_term(x)
def test_atoms():
assert x.atoms() == {x}
assert (1 + x).atoms() == {x, S.One}
assert (1 + 2*cos(x)).atoms(Symbol) == {x}
assert (1 + 2*cos(x)).atoms(Symbol, Number) == {S.One, S(2), x}
assert (2*(x**(y**x))).atoms() == {S(2), x, y}
assert S.Half.atoms() == {S.Half}
assert S.Half.atoms(Symbol) == set()
assert sin(oo).atoms(oo) == set()
assert Poly(0, x).atoms() == {S.Zero, x}
assert Poly(1, x).atoms() == {S.One, x}
assert Poly(x, x).atoms() == {x}
assert Poly(x, x, y).atoms() == {x, y}
assert Poly(x + y, x, y).atoms() == {x, y}
assert Poly(x + y, x, y, z).atoms() == {x, y, z}
assert Poly(x + y*t, x, y, z).atoms() == {t, x, y, z}
assert (I*pi).atoms(NumberSymbol) == {pi}
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == {pi, I}
assert exp(exp(x)).atoms(exp) == {exp(exp(x)), exp(x)}
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
{1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z}
# issue 6132
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
{f(x)}
assert e.atoms(AppliedUndef, Function) == \
{f(x), sin(x)}
assert e.atoms(Function) == \
{f(x), sin(x)}
assert e.atoms(AppliedUndef, Number) == \
{f(x), S(2)}
assert e.atoms(Function, Number) == \
{S(2), sin(x), f(x)}
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
assert (1/f(x) + 1).is_polynomial(f(x)) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
for i in _illegal:
assert not i.is_rational_function()
for d in (1, x):
assert not (i/d).is_rational_function()
def test_is_meromorphic():
f = a/x**2 + b + x + c*x**2
assert f.is_meromorphic(x, 0) is True
assert f.is_meromorphic(x, 1) is True
assert f.is_meromorphic(x, zoo) is True
g = 3 + 2*x**(log(3)/log(2) - 1)
assert g.is_meromorphic(x, 0) is False
assert g.is_meromorphic(x, 1) is True
assert g.is_meromorphic(x, zoo) is False
n = Symbol('n', integer=True)
e = sin(1/x)**n*x
assert e.is_meromorphic(x, 0) is False
assert e.is_meromorphic(x, 1) is True
assert e.is_meromorphic(x, zoo) is False
e = log(x)**pi
assert e.is_meromorphic(x, 0) is False
assert e.is_meromorphic(x, 1) is False
assert e.is_meromorphic(x, 2) is True
assert e.is_meromorphic(x, zoo) is False
assert (log(x)**a).is_meromorphic(x, 0) is False
assert (log(x)**a).is_meromorphic(x, 1) is False
assert (a**log(x)).is_meromorphic(x, 0) is None
assert (3**log(x)).is_meromorphic(x, 0) is False
assert (3**log(x)).is_meromorphic(x, 1) is True
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S.One/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see https://github.com/sympy/sympy/issues/3346
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt:
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_noncommutative_expand_issue_3757():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert S.Half.as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in range(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
# the following morphs from Add to Mul during processing
assert Add(0, (x + y)/z/-2, evaluate=False).as_numer_denom(
) == (-x - y, 2*z)
def test_trunc():
import math
x, y = symbols('x y')
assert math.trunc(2) == 2
assert math.trunc(4.57) == 4
assert math.trunc(-5.79) == -5
assert math.trunc(pi) == 3
assert math.trunc(log(7)) == 1
assert math.trunc(exp(5)) == 148
assert math.trunc(cos(pi)) == -1
assert math.trunc(sin(5)) == 0
raises(TypeError, lambda: math.trunc(x))
raises(TypeError, lambda: math.trunc(x + y**2))
raises(TypeError, lambda: math.trunc(oo))
| NonArithmetic |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/execution_plan_snapshot.py | {
"start": 8069,
"end": 9200
} | class ____(
NamedTuple(
"_ExecutionStepInputSnap",
[
("name", str),
("dagster_type_key", str),
("upstream_output_handles", Sequence[StepOutputHandle]),
("source", Optional[StepInputSourceUnion]),
],
)
):
def __new__(
cls,
name: str,
dagster_type_key: str,
upstream_output_handles: Sequence[StepOutputHandle],
source: Optional[StepInputSourceUnion] = None,
):
return super().__new__(
cls,
check.str_param(name, "name"),
check.str_param(dagster_type_key, "dagster_type_key"),
check.sequence_param(
upstream_output_handles, "upstream_output_handles", of_type=StepOutputHandle
),
check.opt_inst_param(source, "source", StepInputSourceUnion.__args__), # type: ignore
)
@property
def upstream_step_keys(self):
return [output_handle.step_key for output_handle in self.upstream_output_handles]
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
| ExecutionStepInputSnap |
python | django__django | tests/db_functions/models.py | {
"start": 1723,
"end": 1931
} | class ____(models.Model):
big = models.BigIntegerField(null=True, blank=True)
normal = models.IntegerField(null=True, blank=True)
small = models.SmallIntegerField(null=True, blank=True)
| IntegerModel |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 1292,
"end": 1893
} | class ____(models.StructModel):
# from string_view.hpp:
_members = (
# const char* _data{}
# Pointer to device memory contain char array for this string
("data", types.CPointer(types.char)),
# size_type _bytes{};
# Number of bytes in _data for this string
("bytes", size_type),
# mutable size_type _length{};
# Number of characters in this string (computed)
("length", size_type),
)
def __init__(self, dmm, fe_type):
super().__init__(dmm, fe_type, self._members)
@register_model(UDFString)
| stringview_model |
python | cherrypy__cherrypy | cherrypy/test/benchmark.py | {
"start": 3161,
"end": 3864
} | class ____:
"""A null HTTP request class, returning 200 and an empty body."""
def __init__(self, local, remote, scheme='http'):
"""Initialize a null request instance."""
def close(self):
"""Close the null request."""
def run(self, method, path, query_string, protocol, headers, rfile):
"""Construct an HTTP response."""
cherrypy.response.status = '200 OK'
cherrypy.response.header_list = [
('Content-Type', 'text/html'),
('Server', 'Null CherryPy'),
('Date', httputil.HTTPDate()),
('Content-Length', '0'),
]
cherrypy.response.body = ['']
return cherrypy.response
| NullRequest |
python | scipy__scipy | scipy/stats/tests/test_qmc.py | {
"start": 40734,
"end": 45937
} | class ____:
def test_NormalQMC(self):
# d = 1
engine = qmc.MultivariateNormalQMC(mean=np.zeros(1))
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2))
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_NormalQMCInvTransform(self):
# d = 1
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(1), inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_NormalQMCSeeded(self):
# test even dimension
rng = np.random.default_rng(274600237797326520096085022671371676017)
# preserve use of legacy keyword during SPEC 7 transition
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=False, seed=rng)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923],
[-1.477655, 0.846851]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
rng = np.random.default_rng(274600237797326520096085022671371676017)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), inv_transform=False, rng=rng)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923, 0.036578],
[-1.778011, 0.912428, -0.065421]])
assert_allclose(samples, samples_expected, atol=1e-4)
# same test with another engine
rng = np.random.default_rng(274600237797326520096085022671371676017)
base_engine = qmc.Sobol(4, scramble=True, rng=rng)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), inv_transform=False,
engine=base_engine, rng=rng
)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923, 0.036578],
[-1.778011, 0.912428, -0.065421]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_NormalQMCSeededInvTransform(self):
# test even dimension
rng = np.random.default_rng(288527772707286126646493545351112463929)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), rng=rng, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array([[-0.913237, -0.964026],
[0.255904, 0.003068]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
rng = np.random.default_rng(288527772707286126646493545351112463929)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), rng=rng, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array([[-0.913237, -0.964026, 0.355501],
[0.699261, 2.90213 , -0.6418]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_other_engine(self):
for d in (0, 1, 2):
base_engine = qmc.Sobol(d=d, scramble=False)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(d),
engine=base_engine,
inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, d))
def test_NormalQMCShapiro(self):
rng = np.random.default_rng(13242)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), rng=rng)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
def test_NormalQMCShapiroInvTransform(self):
rng = np.random.default_rng(32344554)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=True, rng=rng)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
| TestNormalQMC |
python | django__django | django/contrib/auth/forms.py | {
"start": 7515,
"end": 8839
} | class ____(SetPasswordMixin, forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
This is the documented base class for customizing the user creation form.
It should be kept mostly unchanged to ensure consistency and compatibility.
"""
password1, password2 = SetPasswordMixin.create_password_fields()
class Meta:
model = User
fields = ("username",)
field_classes = {"username": UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs[
"autofocus"
] = True
def clean(self):
self.validate_passwords()
return super().clean()
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
self.validate_password_for_user(self.instance)
def save(self, commit=True):
user = super().save(commit=False)
user = self.set_password_and_save(user, commit=commit)
if commit and hasattr(self, "save_m2m"):
self.save_m2m()
return user
| BaseUserCreationForm |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 32917,
"end": 34412
} | class ____(PaliGemmaCausalLMOutputWithPast):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
audio_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
audio_hidden_states of the model produced by the audio encoder and after projecting the last hidden state.
"""
audio_hidden_states: Optional[torch.FloatTensor] = None
| Gemma3nCausalLMOutputWithPast |
python | django__django | django/contrib/gis/forms/fields.py | {
"start": 4324,
"end": 4393
} | class ____(GeometryField):
geom_type = "LINESTRING"
| LineStringField |
python | walkccc__LeetCode | solutions/1675. Minimize Deviation in Array/1675.py | {
"start": 0,
"end": 467
} | class ____:
def minimumDeviation(self, nums: list[int]) -> int:
ans = math.inf
mn = math.inf
maxHeap = []
for num in nums:
evenNum = num if num % 2 == 0 else num * 2
heapq.heappush(maxHeap, -evenNum)
mn = min(mn, evenNum)
while maxHeap[0] % 2 == 0:
mx = -heapq.heappop(maxHeap)
ans = min(ans, mx - mn)
mn = min(mn, mx // 2)
heapq.heappush(maxHeap, -mx // 2)
return min(ans, -maxHeap[0] - mn)
| Solution |
python | walkccc__LeetCode | solutions/3171. Find Subarray With Bitwise AND Closest to K/3171.py | {
"start": 0,
"end": 571
} | class ____:
# Similar to 1521. Find a Value of a Mysterious Function Closest to Target
def minimumDifference(self, nums: list[int], k: int) -> int:
ans = math.inf
dp = set() # all the values of subarrays that end in the current number
for num in nums:
# Extend each subarray that ends in the dpious number. Due to
# monotonicity of the OR operation, the size of `next_set` will be at most
# bin(num).count('1') + 1.
dp = {num} | {val | num for val in dp}
ans = min(ans, min(abs(k - val) for val in dp))
return ans
| Solution |
python | huggingface__transformers | tests/models/maskformer/test_modeling_maskformer.py | {
"start": 7741,
"end": 20687
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
is_encoder_decoder = False
test_missing_keys = False
zero_init_hidden_state = True
test_torch_exportable = True
def setUp(self):
self.model_tester = MaskFormerModelTester(self)
self.config_tester = ConfigTester(self, config_class=MaskFormerConfig, has_text_modality=False)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class == MaskFormerForInstanceSegmentation:
inputs_dict["mask_labels"] = torch.zeros(
(
self.model_tester.batch_size,
self.model_tester.num_labels,
self.model_tester.min_size,
self.model_tester.max_size,
),
dtype=torch.float32,
device=torch_device,
)
inputs_dict["class_labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
def test_maskformer_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=False)
def test_maskformer_instance_segmentation_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*config_and_inputs)
@unittest.skip(reason="MaskFormer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="MaskFormer is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`"
)
def test_multi_gpu_data_parallel_forward(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in ["facebook/maskformer-swin-small-coco"]:
model = MaskFormerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_model_with_labels(self):
size = (self.model_tester.min_size,) * 2
inputs = {
"pixel_values": torch.randn((2, 3, *size), device=torch_device),
"mask_labels": torch.randn((2, 10, *size), device=torch_device),
"class_labels": torch.zeros(2, 10, device=torch_device).long(),
}
model = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(torch_device)
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
def test_hidden_states_output(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=True)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# Check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# encoder_hidden_states, pixel_decoder_hidden_states, transformer_decoder_hidden_states, hidden_states
added_hidden_states = 4
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
def test_retain_grad_hidden_states_attentions(self):
# only MaskFormerForInstanceSegmentation has the loss
model_class = self.all_model_classes[1]
config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs()
config.output_hidden_states = True
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.train()
outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels)
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
attentions = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_forward_auxiliary_loss(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_auxiliary_loss = True
config.output_auxiliary_logits = True
config.output_hidden_states = True
# only test for object detection and segmentation model
for model_class in self.all_model_classes[1:]:
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
outputs = model(**inputs)
self.assertIsNotNone(outputs.auxiliary_logits)
self.assertEqual(len(outputs.auxiliary_logits), self.model_tester.num_channels - 1)
def test_batching_equivalence(self):
def equivalence(tensor1, tensor2):
return 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=0).max()
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif batched_object is None:
return
else:
batched_row = batched_object[:1]
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}"
)
self.assertTrue(
(equivalence(batched_row, single_row_object)) <= 1e-03,
msg=(
f"Batched and Single row outputs are not equal in {model_name} for key={key}. "
f"Difference={equivalence(batched_row, single_row_object)}."
),
)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
for key in model_batched_output:
# remove the first zero-init queries to decoder, otherwise cos_similarity = `nan`
# no need to check all hidden_states, already checked separately each one
if key == "transformer_decoder_hidden_states":
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
elif key == "hidden_states":
continue
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
@require_timm
def test_backbone_selection(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
config.backbone_config = None
config.backbone_kwargs = {"out_indices": [1, 2, 3]}
config.use_pretrained_backbone = True
# Load a timm backbone
# We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices
config.backbone = "resnet18"
config.use_timm_backbone = True
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device).eval()
if model.__class__.__name__ == "MaskFormerModel":
self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3])
elif model.__class__.__name__ == "MaskFormerForUniversalSegmentation":
self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3])
# Load a HF backbone
config.backbone = "microsoft/resnet-18"
config.use_timm_backbone = False
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device).eval()
if model.__class__.__name__ == "MaskFormerModel":
self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3])
elif model.__class__.__name__ == "MaskFormerForUniversalSegmentation":
self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3])
TOLERANCE = 2e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@slow
| MaskFormerModelTest |
python | getsentry__sentry | src/sentry/rules/history/backends/postgres.py | {
"start": 1341,
"end": 1449
} | class ____(Subquery):
def get_group_by_cols(self, alias=None) -> list:
return []
| NoGroupBySubquery |
python | jazzband__django-model-utils | tests/models.py | {
"start": 13531,
"end": 13787
} | class ____(TimeStampedModel, StatusModel):
STATUS = Choices(
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
test_field = models.PositiveSmallIntegerField(default=0)
| TimeStampWithStatusModel |
python | getsentry__sentry | tests/sentry/api/serializers/test_base.py | {
"start": 275,
"end": 390
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kw):
return {"kw": kw}
| VariadicSerializer |
python | doocs__leetcode | solution/2100-2199/2185.Counting Words With a Given Prefix/Solution.py | {
"start": 0,
"end": 133
} | class ____:
def prefixCount(self, words: List[str], pref: str) -> int:
return sum(w.startswith(pref) for w in words)
| Solution |
python | getsentry__sentry | tests/sentry/api/serializers/test_group_tombstone.py | {
"start": 253,
"end": 1328
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user("foo@example.com")
rpc_user = user_service.get_many(filter={"user_ids": [user.id]})[0]
self.login_as(user=user)
org = self.create_organization(owner=rpc_user)
project = self.create_project(organization=org, name="CoolProj")
group = self.create_group(project=project)
tombstone = GroupTombstone.objects.create(
project_id=group.project_id,
level=group.level,
message=group.message,
culprit=group.culprit,
data=group.data,
actor_id=rpc_user.id,
previous_group_id=group.id,
)
GroupHash.objects.create(
project=group.project, hash="x" * 32, group=group, group_tombstone_id=tombstone.id
)
result = serialize(tombstone, rpc_user)
assert result["message"] == group.message
assert result["culprit"] == group.culprit
assert result["actor"]["email"] == "foo@example.com"
| GroupTombstoneSerializerTest |
python | django__django | tests/custom_managers/models.py | {
"start": 5014,
"end": 5146
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(top_speed__gt=150)
| FastCarManager |
python | numba__numba | numba/tests/test_errorhandling.py | {
"start": 13682,
"end": 14139
} | class ____(SerialMixin, unittest.TestCase):
def test_bound_function_error_string(self):
# See PR #5952
def foo(x):
x.max(-1)
with override_config('DEVELOPER_MODE', 1):
with self.assertRaises(errors.TypingError) as raises:
njit("void(int64[:,:])")(foo)
excstr = str(raises.exception)
self.assertIn("too many positional arguments", excstr)
| TestDeveloperSpecificErrorMessages |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 7209,
"end": 8018
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
missing = helper_functions.get_value("MissingValues")
num_missing = missing.sum(axis=0)
return float(np.sum([1 if num > 0 else 0 for num in num_missing]))
def _calculate_sparse(self, X, y, logger, feat_type):
missing = helper_functions.get_value("MissingValues")
new_missing = missing.tocsc()
num_missing = [
np.sum(new_missing.data[new_missing.indptr[i] : new_missing.indptr[i + 1]])
for i in range(missing.shape[1])
]
return float(np.sum([1 if num > 0 else 0 for num in num_missing]))
@metafeatures.define(
"PercentageOfFeaturesWithMissingValues",
dependency="NumberOfFeaturesWithMissingValues",
)
| NumberOfFeaturesWithMissingValues |
python | getsentry__sentry | src/sentry/models/dynamicsampling.py | {
"start": 2895,
"end": 14061
} | class ____(Model):
"""
This represents a custom dynamic sampling rule that is created by the user based
on a query (a.k.a. investigation rule).
"""
__relocation_scope__ = RelocationScope.Organization
date_added = models.DateTimeField(default=timezone.now)
organization = FlexibleForeignKey("sentry.Organization", on_delete=models.CASCADE)
projects = models.ManyToManyField(
"sentry.Project",
related_name="custom_dynamic_sampling_rules",
through=CustomDynamicSamplingRuleProject,
)
is_active = models.BooleanField(default=True)
is_org_level = models.BooleanField(default=False)
rule_id = models.IntegerField(default=0)
condition = models.TextField()
sample_rate = models.FloatField(default=0.0)
start_date = models.DateTimeField(default=timezone.now)
end_date = models.DateTimeField()
num_samples = models.IntegerField()
condition_hash = models.CharField(max_length=40)
# the raw query field from the request
query = models.TextField(null=True)
created_by_id = HybridCloudForeignKey("sentry.User", on_delete="CASCADE", null=True, blank=True)
notification_sent = models.BooleanField(null=True, blank=True)
@property
def external_rule_id(self) -> int:
"""
Returns the external rule id
For external users, i.e. Relay, we need to shift the ids since the slot we
have allocated starts at the offset specified in RESERVED_IDS.
"""
return self.rule_id + CUSTOM_RULE_START
class Meta:
app_label = "sentry"
db_table = "sentry_customdynamicsamplingrule"
indexes = [
# get active rules for an organization
models.Index(fields=["organization"], name="org_idx", condition=Q(is_active=True)),
# get expired rules (that are still marked as active)
models.Index(fields=["end_date"], name="end_date_idx", condition=Q(is_active=True)),
# find active rules for a condition
models.Index(
fields=["condition_hash"], name="condition_hash_idx", condition=Q(is_active=True)
),
]
@staticmethod
def get_rule_for_org(
condition: Any,
organization_id: int,
project_ids: Sequence[int],
) -> CustomDynamicSamplingRule | None:
"""
Returns an active rule for the given condition and organization if it exists otherwise None
Note: There should not be more than one active rule for a given condition and organization
This function doesn't verify this condition, it just returns the first one.
"""
rule_hash = get_rule_hash(condition, project_ids)
rules = CustomDynamicSamplingRule.objects.filter(
organization_id=organization_id,
condition_hash=rule_hash,
is_active=True,
end_date__gt=timezone.now(),
)[:1]
return rules[0] if rules else None
@staticmethod
def update_or_create(
condition: Any,
start: datetime,
end: datetime,
project_ids: Sequence[int],
organization_id: int,
num_samples: int,
sample_rate: float,
query: str,
created_by_id: int | None = None,
) -> CustomDynamicSamplingRule:
from sentry.models.organization import Organization
from sentry.models.project import Project
with transaction.atomic(router.db_for_write(CustomDynamicSamplingRule)):
# check if rule already exists for this organization
existing_rule = CustomDynamicSamplingRule.get_rule_for_org(
condition, organization_id, project_ids
)
if existing_rule is not None:
# we already have an active rule for this condition and this organization
# update the expiration date and ensure that our projects are included
existing_rule.end_date = max(end, existing_rule.end_date)
existing_rule.num_samples = max(num_samples, existing_rule.num_samples)
existing_rule.sample_rate = max(sample_rate, existing_rule.sample_rate)
# for org rules we don't need to do anything with the projects
existing_rule.save()
return existing_rule
else:
projects = Project.objects.get_many_from_cache(project_ids)
projects = list(projects)
organization = Organization.objects.get_from_cache(id=organization_id)
if CustomDynamicSamplingRule.per_project_limit_reached(projects, organization):
raise TooManyRules()
# create a new rule
rule_hash = get_rule_hash(condition, project_ids)
is_org_level = len(project_ids) == 0
condition_str = json.dumps(condition)
rule = CustomDynamicSamplingRule.objects.create(
organization_id=organization_id,
condition=condition_str,
sample_rate=sample_rate,
start_date=start,
end_date=end,
num_samples=num_samples,
condition_hash=rule_hash,
is_active=True,
is_org_level=is_org_level,
query=query,
notification_sent=False,
created_by_id=created_by_id,
)
rule.save()
# now try to assign a rule id
id = rule.assign_rule_id()
if id > MAX_CUSTOM_RULES:
# we have too many rules, delete this one
rule.delete()
raise TooManyRules()
# set the projects if not org level
for project in projects:
CustomDynamicSamplingRuleProject.objects.create(
custom_dynamic_sampling_rule=rule, project=project
)
return rule
def assign_rule_id(self) -> int:
"""
Assigns the smallest rule id that is not taken in the
current organization.
"""
if self.id is None:
raise ValueError("Cannot assign rule id to unsaved object")
if self.rule_id != 0:
raise ValueError("Cannot assign rule id to object that already has a rule id")
now = timezone.now()
base_qs = CustomDynamicSamplingRule.objects.filter(
organization_id=self.organization.id, end_date__gt=now, is_active=True
)
# We want to find the smallest free rule id. We do this by self-joining with rule_id + 1 and excluding the existing rule_ids.
# We then order by rule_id_plus_one and take the first value.
# This also works for the first rule, as it is pre-initialized with 0, and will thus end up with 1.
new_rule_id_subquery = Subquery(
base_qs.annotate(rule_id_plus_one=F("rule_id") + 1)
.exclude(rule_id_plus_one__in=base_qs.values_list("rule_id", flat=True))
.order_by("rule_id_plus_one")
.values("rule_id_plus_one")[:1]
)
max_rule_id = base_qs.aggregate(Max("rule_id"))["rule_id__max"] or 0
fallback_value = Value(max_rule_id + 1, output_field=IntegerField())
safe_new_rule_id = Coalesce(new_rule_id_subquery, fallback_value)
# Update this instance with the new rule_id
CustomDynamicSamplingRule.objects.filter(id=self.id).update(rule_id=safe_new_rule_id)
self.refresh_from_db()
return self.rule_id
@staticmethod
def deactivate_old_rules() -> None:
"""
Deactivates all rules expired rules (this is just an optimization to remove old rules from indexes).
This should be called periodically to clean up old rules (it is not necessary to call it for correctness,
just for performance)
"""
CustomDynamicSamplingRule.objects.filter(
# give it a minute grace period to make sure we don't deactivate rules that are still active
end_date__lt=timezone.now()
- timedelta(minutes=1),
).update(is_active=False)
@staticmethod
def get_project_rules(
project: Project,
) -> Sequence[CustomDynamicSamplingRule]:
"""
Returns all active project rules
"""
now = timezone.now()
# org rules ( apply to all projects in the org)
org_rules = CustomDynamicSamplingRule.objects.filter(
is_active=True,
is_org_level=True,
organization=project.organization,
end_date__gt=now,
start_date__lt=now,
)[: MAX_CUSTOM_RULES_PER_PROJECT + 1]
# project rules
project_rules = CustomDynamicSamplingRule.objects.filter(
is_active=True,
projects__in=[project],
end_date__gt=now,
start_date__lt=now,
)[: MAX_CUSTOM_RULES_PER_PROJECT + 1]
rules = list(project_rules.union(org_rules)[: MAX_CUSTOM_RULES_PER_PROJECT + 1])
if len(rules) > MAX_CUSTOM_RULES_PER_PROJECT:
metrics.incr("dynamic_sampling.custom_rules.overflow")
return rules[:MAX_CUSTOM_RULES_PER_PROJECT]
@staticmethod
def deactivate_expired_rules() -> None:
"""
Deactivates all rules that have expired
"""
CustomDynamicSamplingRule.objects.filter(
end_date__lt=timezone.now(), is_active=True
).update(is_active=False)
@staticmethod
def num_active_rules_for_project(project: Project) -> int:
"""
Returns the number of active rules for the given project
"""
now = timezone.now()
num_org_rules = CustomDynamicSamplingRule.objects.filter(
is_active=True,
is_org_level=True,
organization=project.organization,
end_date__gt=now,
start_date__lte=now,
).count()
num_proj_rules = CustomDynamicSamplingRule.objects.filter(
is_active=True,
is_org_level=False,
projects__in=[project],
end_date__gt=now,
start_date__lte=now,
).count()
return num_proj_rules + num_org_rules
@staticmethod
def per_project_limit_reached(projects: Sequence[Project], organization: Organization) -> bool:
"""
Returns True if the rule limit is reached for any of the given projects (or all
the projects in the organization if org level rule)
"""
projects = list(projects)
if len(projects) == 0:
# an org rule check all the org projects
org_projects = organization.project_set.filter(status=ObjectStatus.ACTIVE)
projects = list(org_projects)
for project in projects:
num_rules = CustomDynamicSamplingRule.num_active_rules_for_project(project)
if num_rules >= MAX_CUSTOM_RULES_PER_PROJECT:
return True
return False
| CustomDynamicSamplingRule |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_pattern07.py | {
"start": 315,
"end": 3539
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [110902272, 110756608]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$3",
"pattern": {
"pattern": "percent_40",
"fg_color": "#C00000",
"bg_color": "#FFFFFF",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$3",
"pattern": {
"pattern": "percent_90",
"fg_color": "#FF0000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$C$1:$C$3",
"pattern": {
"pattern": "wide_upward_diagonal",
"fg_color": "#FFC000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$D$1:$D$3",
"pattern": {
"pattern": "dark_horizontal",
"fg_color": "#FFFF00",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$E$1:$E$3",
"pattern": {
"pattern": "large_confetti",
"fg_color": "#92D050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$F$1:$F$3",
"pattern": {
"pattern": "plaid",
"fg_color": "#00B050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$G$1:$G$3",
"pattern": {
"pattern": "sphere",
"fg_color": "#00B0F0",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$H$1:$H$3",
"pattern": {
"pattern": "solid_diamond",
"fg_color": "#0070C0",
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_gcs.py | {
"start": 14236,
"end": 23471
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.gcs.TemporaryDirectory")
@mock.patch("airflow.providers.google.cloud.operators.gcs.subprocess")
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_execute(self, mock_hook, mock_subprocess, mock_tempdir):
source_bucket = TEST_BUCKET
source_prefix = "source_prefix"
source_gcp_conn_id = ""
destination_bucket = TEST_BUCKET + "_dest"
destination_prefix = "destination_prefix"
destination_gcp_conn_id = ""
transform_script = "script.py"
source = "source"
destination = "destination"
file1 = "file1"
file2 = "file2"
timespan_start = datetime(2015, 2, 1, 15, 16, 17, 345, tzinfo=timezone.utc)
timespan_end = timespan_start + timedelta(hours=1)
mock_ti = mock.Mock()
context = dict(
logical_date=timespan_start,
data_interval_start=timespan_start,
data_interval_end=timespan_end,
ti=mock_ti,
task=mock.MagicMock(),
)
mock_tempdir.return_value.__enter__.side_effect = [source, destination]
mock_hook.return_value.list_by_timespan.return_value = [
f"{source_prefix}/{file1}",
f"{source_prefix}/{file2}",
]
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout.readline = lambda: b""
mock_proc.wait.return_value = None
mock_popen = mock.MagicMock()
mock_popen.return_value.__enter__.return_value = mock_proc
mock_subprocess.Popen = mock_popen
mock_subprocess.PIPE = "pipe"
mock_subprocess.STDOUT = "stdout"
op = GCSTimeSpanFileTransformOperator(
task_id=TASK_ID,
source_bucket=source_bucket,
source_prefix=source_prefix,
source_gcp_conn_id=source_gcp_conn_id,
destination_bucket=destination_bucket,
destination_prefix=destination_prefix,
destination_gcp_conn_id=destination_gcp_conn_id,
transform_script=transform_script,
)
with mock.patch.object(Path, "glob") as path_glob:
path_glob.return_value.__iter__.return_value = [
Path(f"{destination}/{file1}"),
Path(f"{destination}/{file2}"),
]
op.execute(context=context)
mock_hook.return_value.list_by_timespan.assert_called_once_with(
bucket_name=source_bucket,
timespan_start=timespan_start,
timespan_end=timespan_end,
prefix=source_prefix,
)
mock_hook.return_value.download.assert_has_calls(
[
mock.call(
bucket_name=source_bucket,
object_name=f"{source_prefix}/{file1}",
filename=f"{source}/{source_prefix}/{file1}",
chunk_size=None,
num_max_attempts=1,
),
mock.call(
bucket_name=source_bucket,
object_name=f"{source_prefix}/{file2}",
filename=f"{source}/{source_prefix}/{file2}",
chunk_size=None,
num_max_attempts=1,
),
]
)
mock_subprocess.Popen.assert_called_once_with(
args=[
transform_script,
source,
destination,
timespan_start.replace(microsecond=0).isoformat(),
timespan_end.replace(microsecond=0).isoformat(),
],
stdout="pipe",
stderr="stdout",
close_fds=True,
)
mock_hook.return_value.upload.assert_has_calls(
[
mock.call(
bucket_name=destination_bucket,
filename=f"{destination}/{file1}",
object_name=f"{destination_prefix}/{file1}",
chunk_size=None,
num_max_attempts=1,
),
mock.call(
bucket_name=destination_bucket,
filename=f"{destination}/{file2}",
object_name=f"{destination_prefix}/{file2}",
chunk_size=None,
num_max_attempts=1,
),
]
)
@pytest.mark.parametrize(
("source_prefix", "dest_prefix", "inputs", "outputs"),
(
(
None,
None,
[Dataset(f"gs://{TEST_BUCKET}", "/")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "/")],
),
(
None,
"dest_pre/",
[Dataset(f"gs://{TEST_BUCKET}", "/")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "dest_pre")],
),
(
"source_pre/",
None,
[Dataset(f"gs://{TEST_BUCKET}", "source_pre")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "/")],
),
(
"source_pre/",
"dest_pre/",
[Dataset(f"gs://{TEST_BUCKET}", "source_pre")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "dest_pre")],
),
(
"source_pre",
"dest_pre",
[Dataset(f"gs://{TEST_BUCKET}", "/")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "/")],
),
(
"dir1/source_pre",
"dir2/dest_pre",
[Dataset(f"gs://{TEST_BUCKET}", "dir1")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "dir2")],
),
(
"",
"/",
[Dataset(f"gs://{TEST_BUCKET}", "/")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "/")],
),
(
"source/a.txt",
"target/",
[Dataset(f"gs://{TEST_BUCKET}", "source/a.txt")],
[Dataset(f"gs://{TEST_BUCKET}_dest", "target")],
),
),
ids=(
"no prefixes",
"dest prefix only",
"source prefix only",
"both with ending slash",
"both without ending slash",
"both as directory with prefix",
"both empty or root",
"source prefix is file path",
),
)
@mock.patch("airflow.providers.google.cloud.operators.gcs.TemporaryDirectory")
@mock.patch("airflow.providers.google.cloud.operators.gcs.subprocess")
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_get_openlineage_facets_on_complete(
self, mock_hook, mock_subprocess, mock_tempdir, source_prefix, dest_prefix, inputs, outputs
):
source_bucket = TEST_BUCKET
destination_bucket = TEST_BUCKET + "_dest"
destination = "destination"
file1 = "file1"
file2 = "file2"
timespan_start = datetime(2015, 2, 1, 15, 16, 17, 345, tzinfo=timezone.utc)
timespan_end = timespan_start + timedelta(hours=1)
context = dict(
logical_date=timespan_start,
data_interval_start=timespan_start,
data_interval_end=timespan_end,
ti=mock.Mock(),
task=mock.MagicMock(),
)
mock_tempdir.return_value.__enter__.side_effect = ["source", destination]
mock_hook.return_value.list_by_timespan.return_value = [
f"{source_prefix or ''}{file1}",
f"{source_prefix or ''}{file2}",
]
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout.readline = lambda: b""
mock_proc.wait.return_value = None
mock_popen = mock.MagicMock()
mock_popen.return_value.__enter__.return_value = mock_proc
mock_subprocess.Popen = mock_popen
mock_subprocess.PIPE = "pipe"
mock_subprocess.STDOUT = "stdout"
op = GCSTimeSpanFileTransformOperator(
task_id=TASK_ID,
source_bucket=source_bucket,
source_prefix=source_prefix,
source_gcp_conn_id="",
destination_bucket=destination_bucket,
destination_prefix=dest_prefix,
destination_gcp_conn_id="",
transform_script="script.py",
)
with mock.patch.object(Path, "glob") as path_glob:
path_glob.return_value.__iter__.return_value = [
Path(f"{destination}/{file1}"),
Path(f"{destination}/{file2}"),
]
op.execute(context=context)
lineage = op.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == len(inputs)
assert len(lineage.outputs) == len(outputs)
assert all(element in lineage.inputs for element in inputs)
assert all(element in inputs for element in lineage.inputs)
assert all(element in lineage.outputs for element in outputs)
assert all(element in outputs for element in lineage.outputs)
| TestGCSTimeSpanFileTransformOperator |
python | getsentry__sentry | tests/sentry/utils/kvstore/test_common.py | {
"start": 269,
"end": 4103
} | class ____(Generic[K, V]):
store: KVStorage[K, V]
keys: Iterator[K]
values: Iterator[V]
@property
def items(self) -> Iterator[tuple[K, V]]:
return zip(self.keys, self.values)
@pytest.fixture(params=["bigtable", "cache/default", "memory", "memory+cachewrapper", "redis"])
def properties(request) -> Properties:
if request.param == "bigtable":
from tests.sentry.utils.kvstore.test_bigtable import create_store
return Properties(
create_store(request),
keys=(f"{i}" for i in itertools.count()),
values=(f"{i}".encode() for i in itertools.count()),
)
elif request.param.startswith("cache/"):
from sentry.utils.kvstore.cache import CacheKVStorage
# XXX: Currently only testing against the default cache is supported
# because testing against the Redis cache requires complex mocking of
# global state in ``sentry.utils.redis``.
[backend_label] = request.param.split("/")[1:]
if backend_label == "default":
from sentry.cache import default_cache as cache
else:
raise AssertionError("unknown cache backend label")
return Properties(
CacheKVStorage(cache),
keys=(f"kvstore/{i}" for i in itertools.count()),
values=itertools.count(),
)
elif request.param == "memory":
from sentry.utils.kvstore.memory import MemoryKVStorage
return Properties(
MemoryKVStorage(),
keys=itertools.count(),
values=itertools.count(),
)
elif request.param == "redis":
from redis import Redis
from sentry.utils.kvstore.redis import RedisKVStorage
return Properties(
RedisKVStorage(Redis(db=6)),
keys=(f"kvstore/{i}" for i in itertools.count()),
values=(f"{i}".encode() for i in itertools.count()),
)
elif request.param == "memory+cachewrapper":
from sentry.utils.kvstore.cache import CacheKeyWrapper
from sentry.utils.kvstore.memory import MemoryKVStorage
return Properties(
CacheKeyWrapper(MemoryKVStorage()),
keys=map(str, itertools.count()),
values=itertools.count(),
)
else:
raise AssertionError("unknown kvstore label")
def test_single_key_operations(properties: Properties) -> None:
store = properties.store
key, value = next(properties.keys), next(properties.values)
# Test setting a key with no prior value.
store.set(key, value)
assert store.get(key) == value
# Test overwriting a key with a prior value.
new_value = next(properties.values)
store.set(key, new_value)
assert store.get(key) == new_value
# Test overwriting a key with a new TTL.
new_value = next(properties.values)
store.set(key, new_value, ttl=timedelta(seconds=30))
assert store.get(key) == new_value
# Test deleting an existing key.
store.delete(key)
assert store.get(key) is None
# Test reading a missing key.
missing_key = next(properties.keys)
assert store.get(missing_key) is None
# Test deleting a missing key.
store.delete(missing_key)
def test_multiple_key_operations(properties: Properties) -> None:
store = properties.store
items = dict(itertools.islice(properties.items, 10))
for key, value in items.items():
store.set(key, value)
missing_keys = set(itertools.islice(properties.keys, 5))
all_keys = list(items.keys() | missing_keys)
# Test reading a combination of present and missing keys.
assert dict(store.get_many(all_keys)) == items
# Test deleting a combination of present and missing keys.
store.delete_many(all_keys)
assert dict(store.get_many(all_keys)) == {}
| Properties |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 67324,
"end": 67531
} | class ____(_PrintableStructure):
_fields_ = [
('isLicensed', c_uint8),
('licenseExpiry', c_nvmlVgpuLicenseExpiry_t),
('currentState', c_uint),
]
| c_nvmlVgpuLicenseInfo_t |
python | plotly__plotly.py | plotly/graph_objs/scattersmith/_stream.py | {
"start": 233,
"end": 3536
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattersmith"
_path_str = "scattersmith.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattersmith.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | django__django | tests/test_runner/tests.py | {
"start": 34150,
"end": 38046
} | class ____(unittest.TestCase):
def test_run_checks_raises(self):
"""
Teardown functions are run when run_checks() raises SystemCheckError.
"""
with (
mock.patch("django.test.runner.DiscoverRunner.setup_test_environment"),
mock.patch("django.test.runner.DiscoverRunner.setup_databases"),
mock.patch("django.test.runner.DiscoverRunner.build_suite"),
mock.patch(
"django.test.runner.DiscoverRunner.run_checks",
side_effect=SystemCheckError,
),
mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases"
) as teardown_databases,
mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment,
):
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertTrue(teardown_test_environment.called)
def test_run_checks_raises_and_teardown_raises(self):
"""
SystemCheckError is surfaced when run_checks() raises SystemCheckError
and teardown databases() raises ValueError.
"""
with (
mock.patch("django.test.runner.DiscoverRunner.setup_test_environment"),
mock.patch("django.test.runner.DiscoverRunner.setup_databases"),
mock.patch("django.test.runner.DiscoverRunner.build_suite"),
mock.patch(
"django.test.runner.DiscoverRunner.run_checks",
side_effect=SystemCheckError,
),
mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases",
side_effect=ValueError,
) as teardown_databases,
mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment,
):
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
def test_run_checks_passes_and_teardown_raises(self):
"""
Exceptions on teardown are surfaced if no exceptions happen during
run_checks().
"""
with (
mock.patch("django.test.runner.DiscoverRunner.setup_test_environment"),
mock.patch("django.test.runner.DiscoverRunner.setup_databases"),
mock.patch("django.test.runner.DiscoverRunner.build_suite"),
mock.patch("django.test.runner.DiscoverRunner.run_checks"),
mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases",
side_effect=ValueError,
) as teardown_databases,
mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment,
):
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(ValueError):
# Suppress the output when running TestDjangoTestCase.
with mock.patch("sys.stderr"):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
| RunTestsExceptionHandlingTests |
python | wireservice__csvkit | tests/utils.py | {
"start": 3370,
"end": 3708
} | class ____:
def test_invalid_column(self):
args = getattr(self, 'columns_args', []) + ['-c', '0', 'examples/dummy.csv']
output_file = io.StringIO()
utility = self.Utility(args, output_file)
with self.assertRaises(ColumnIdentifierError):
utility.run()
output_file.close()
| ColumnsTests |
python | fluentpython__example-code | 12-inheritance/diamond.py | {
"start": 60,
"end": 123
} | class ____(A):
def pong(self):
print('pong:', self)
| B |
python | django__django | tests/staticfiles_tests/test_finders.py | {
"start": 2600,
"end": 4362
} | class ____(SimpleTestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertIsInstance(
finders.get_finder("django.contrib.staticfiles.finders.FileSystemFinder"),
finders.FileSystemFinder,
)
def test_get_finder_bad_classname(self):
with self.assertRaises(ImportError):
finders.get_finder("django.contrib.staticfiles.finders.FooBarFinder")
def test_get_finder_bad_module(self):
with self.assertRaises(ImportError):
finders.get_finder("foo.bar.FooBarFinder")
def test_cache(self):
finders.get_finder.cache_clear()
for n in range(10):
finders.get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
cache_info = finders.get_finder.cache_info()
self.assertEqual(cache_info.hits, 9)
self.assertEqual(cache_info.currsize, 1)
def test_searched_locations(self):
finders.find("spam")
self.assertEqual(
finders.searched_locations,
[os.path.join(TEST_ROOT, "project", "documents")],
)
def test_searched_locations_find_all(self):
finders.find("spam", find_all=True)
self.assertEqual(
finders.searched_locations,
[os.path.join(TEST_ROOT, "project", "documents")],
)
@override_settings(MEDIA_ROOT="")
def test_location_empty(self):
msg = (
"The storage backend of the staticfiles finder "
"<class 'django.contrib.staticfiles.finders.DefaultStorageFinder'> "
"doesn't have a valid location."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
finders.DefaultStorageFinder()
| TestMiscFinder |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 6757,
"end": 10292
} | class ____(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np)
y_np = np.log(np.sum(np.exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=[0])
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices2(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=0)
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True)
self.assertEqual(y_tf_np.shape.rank, x_np.ndim)
y_np = np.log(np.sum(np.exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"overflow encountered in exp"):
out = np.log(np.sum(np.exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"divide by zero encountered in log"):
out = np.log(np.sum(np.exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testInfinity(self):
with test_util.use_gpu():
res = math_ops.reduce_logsumexp(-np.inf)
self.assertEqual(-np.inf, self.evaluate(res))
def testRaggedTensor(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.double]:
x_rt = ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]], dtype=dtype)
x_np = np.array(self.evaluate(x_rt.flat_values))
with test_util.use_gpu():
y_rt = math_ops.reduce_logsumexp(x_rt)
y_np = np.log(np.sum(np.exp(x_np - np.max(x_np)))) + np.max(x_np)
self.assertAllClose(y_rt, y_np)
@test_util.run_all_in_graph_and_eager_modes
| LogSumExpTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol1.py | {
"start": 232,
"end": 296
} | class ____(Protocol[T_co]):
def content(self) -> T_co: ...
| Box |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 11619,
"end": 14047
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.embeddings = BridgeTowerVisionEmbeddings(config)
self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.transformer = BridgeTowerTransformer(config)
self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.share_layernorm = config.share_layernorm
if not config.share_layernorm:
self.ln_separate = nn.ModuleList(
[nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)]
)
def forward(
self,
pixel_values: torch.Tensor,
attention_mask,
interpolate_pos_encoding: bool = False,
):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
# NLD -> LND
hidden_states = hidden_states.permute(1, 0, 2)
hidden_states = self.transformer(hidden_states, attention_mask)
# shape = [num_hidden_layers, hidden_size, *, grid ** 2]
hidden_states = torch.stack(hidden_states, dim=0)
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
hidden_states = hidden_states.permute(0, 2, 1, 3)
if self.share_layernorm:
hidden_states = self.ln_post(hidden_states)
else:
hidden_states_stack = []
for hidden_states, ln in zip(hidden_states, self.ln_separate):
hidden_states = ln(hidden_states)
hidden_states_stack.append(hidden_states)
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
hidden_states = torch.stack(hidden_states_stack, dim=0)
return hidden_states
def forward_pre(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
# NLD -> LND
hidden_states = hidden_states.permute(1, 0, 2)
return hidden_states
def forward_post(self, hidden_state: torch.Tensor):
visual_output_post = hidden_state.permute(1, 0, 2)
visual_output_post = self.ln_post(visual_output_post)
return visual_output_post
| BridgeTowerVisionTransformer |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/security.py | {
"start": 7529,
"end": 7767
} | class ____(PermittedDagFilter):
"""A parameter that filters the permitted XComs for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(XComModel.dag_id.in_(self.value or set()))
| PermittedXComFilter |
python | kamyu104__LeetCode-Solutions | Python/strictly-palindromic-number.py | {
"start": 36,
"end": 187
} | class ____(object):
def isStrictlyPalindromic(self, n):
"""
:type n: int
:rtype: bool
"""
return False
| Solution |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 57661,
"end": 58488
} | class ____(FileField):
default_error_messages = {
'invalid_image': _(
'Upload a valid image. The file you uploaded was either not an image or a corrupted image.'
),
}
def __init__(self, **kwargs):
self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)
super().__init__(**kwargs)
def to_internal_value(self, data):
# Image validation is a bit grungy, so we'll just outright
# defer to Django's implementation so we don't need to
# consider it, or treat PIL as a test dependency.
file_object = super().to_internal_value(data)
django_field = self._DjangoImageField()
django_field.error_messages = self.error_messages
return django_field.clean(file_object)
# Composite field types...
| ImageField |
python | numba__numba | numba/tests/test_errorhandling.py | {
"start": 1954,
"end": 6366
} | class ____(unittest.TestCase):
def test_use_of_exception_for_flow_control(self):
# constant inference uses exceptions with no Loc specified to determine
# flow control, this asserts that the construction of the lowering
# error context handler works in the case of an exception with no Loc
# specified. See issue #3135.
@njit
def fn(x):
return 10**x
a = np.array([1.0],dtype=np.float64)
fn(a) # should not raise
def test_commented_func_definition_is_not_a_definition(self):
# See issue #4056, the commented def should not be found as the
# definition for reporting purposes when creating the synthetic
# traceback because it is commented! Use of def in docstring would also
# cause this issue hence is tested.
def foo_commented():
#def commented_definition()
raise Exception('test_string')
def foo_docstring():
""" def docstring containing def might match function definition!"""
raise Exception('test_string')
for func in (foo_commented, foo_docstring):
with self.assertRaises(Exception) as raises:
func()
self.assertIn("test_string", str(raises.exception))
def test_use_of_ir_unknown_loc(self):
# for context see # 3390
class TestPipeline(CompilerBase):
def define_pipelines(self):
name = 'bad_DCE_pipeline'
pm = PassManager(name)
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
# remove dead before type inference so that the Arg node is
# removed and the location of the arg cannot be found
pm.add_pass(DeadCodeElimination, "DCE")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(NativeLowering, "native lowering")
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.finalize()
return [pm]
@njit(pipeline_class=TestPipeline)
def f(a):
return 0
with self.assertRaises(errors.TypingError) as raises:
f(iter([1,2])) # use a type that Numba doesn't recognize
expected = 'File "unknown location", line 0:'
self.assertIn(expected, str(raises.exception))
def check_write_to_globals(self, func):
with self.assertRaises(errors.TypingError) as raises:
func()
expected = ["The use of a", "in globals, is not supported as globals"]
for ex in expected:
self.assertIn(ex, str(raises.exception))
def test_handling_of_write_to_reflected_global(self):
from numba.tests.errorhandling_usecases import global_reflected_write
self.check_write_to_globals(njit(global_reflected_write))
def test_handling_of_write_to_typed_dict_global(self):
from numba.tests.errorhandling_usecases import global_dict_write
self.check_write_to_globals(njit(global_dict_write))
@skip_parfors_unsupported
def test_handling_forgotten_numba_internal_import(self):
@njit(parallel=True)
def foo():
for i in prange(10): # noqa: F821 prange is not imported
pass
with self.assertRaises(errors.TypingError) as raises:
foo()
expected = ("'prange' looks like a Numba internal function, "
"has it been imported")
self.assertIn(expected, str(raises.exception))
def test_handling_unsupported_generator_expression(self):
def foo():
(x for x in range(10))
expected = "The use of yield in a closure is unsupported."
for dec in jit(forceobj=True), njit:
with self.assertRaises(errors.UnsupportedError) as raises:
dec(foo)()
self.assertIn(expected, str(raises.exception))
def test_handling_undefined_variable(self):
@njit
def foo():
return a # noqa: F821
expected = "NameError: name 'a' is not defined"
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn(expected, str(raises.exception))
| TestMiscErrorHandling |
python | PrefectHQ__prefect | tests/test_flow_engine.py | {
"start": 53217,
"end": 57260
} | class ____:
async def test_generator_flow(self):
"""
Test for generator behavior including StopIteration
"""
@flow
def g():
yield 1
yield 2
gen = g()
assert next(gen) == 1
assert next(gen) == 2
with pytest.raises(StopIteration):
next(gen)
async def test_generator_flow_requires_return_type_result(self):
@flow
def g():
yield 1
with pytest.raises(
ValueError, match="The return_type for a generator flow must be 'result'"
):
for i in g(return_state=True):
pass
async def test_generator_flow_states(self, prefect_client: PrefectClient):
"""
Test for generator behavior including StopIteration
"""
@flow
def g():
yield FlowRunContext.get().flow_run.id
yield 2
gen = g()
tr_id = next(gen)
tr = await prefect_client.read_flow_run(tr_id)
assert tr.state.is_running()
# exhaust the generator
for _ in gen:
pass
tr = await prefect_client.read_flow_run(tr_id)
assert tr.state.is_completed()
async def test_generator_flow_with_return(self):
"""
If a generator returns, the return value is trapped
in its StopIteration error
"""
@flow
def g():
yield 1
return 2
gen = g()
assert next(gen) == 1
with pytest.raises(StopIteration) as exc_info:
next(gen)
assert exc_info.value.value == 2
async def test_generator_flow_with_exception(self):
@flow
def g():
yield 1
raise ValueError("xyz")
gen = g()
assert next(gen) == 1
with pytest.raises(ValueError, match="xyz"):
next(gen)
async def test_generator_flow_with_exception_is_failed(
self, prefect_client: PrefectClient, events_pipeline
):
@task
def g():
yield TaskRunContext.get().task_run.id
raise ValueError("xyz")
gen = g()
tr_id = next(gen)
with pytest.raises(ValueError, match="xyz"):
next(gen)
await events_pipeline.process_events()
tr = await prefect_client.read_task_run(tr_id)
assert tr.state.is_failed()
async def test_generator_retries(self):
"""
Test that a generator can retry and will re-emit its events
"""
@flow(retries=2)
def g():
yield 1
yield 2
raise ValueError()
values = []
try:
for v in g():
values.append(v)
except ValueError:
pass
assert values == [1, 2, 1, 2, 1, 2]
async def test_generator_timeout(self):
"""
Test that a generator can timeout
"""
@flow(timeout_seconds=0.1)
def g():
yield 1
time.sleep(2)
yield 2
values = []
with pytest.raises(TimeoutError):
for v in g():
values.append(v)
assert values == [1]
async def test_generator_doesnt_retry_on_generator_exception(self):
"""
Test that a generator doesn't retry for normal generator exceptions like StopIteration
"""
@flow(retries=2)
def g():
yield 1
yield 2
values = []
try:
for v in g():
values.append(v)
except ValueError:
pass
assert values == [1, 2]
async def test_with_default_pydantic_model_dict_params(self):
class TheModel(pydantic.BaseModel):
x: list[int]
@flow
async def g(required: str, model: TheModel = {"x": [1, 2, 3]}): # type: ignore
for i in model.x:
yield i
assert [i async for i in g("hello")] == [1, 2, 3]
| TestGenerators |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 16696,
"end": 17663
} | class ____(Protocol):
def has_metadata(self, name: str) -> bool:
"""Does the package's distribution contain the named metadata?"""
def get_metadata(self, name: str) -> str:
"""The named metadata resource as a string"""
def get_metadata_lines(self, name: str) -> Iterator[str]:
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(self, name: str) -> bool:
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(self, name: str) -> list[str]:
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(self, script_name: str, namespace: dict[str, Any]) -> None:
"""Execute the named script in the supplied namespace dictionary"""
| IMetadataProvider |
python | realpython__materials | python-class/animals.py | {
"start": 621,
"end": 703
} | class ____(Fish):
def swim(self):
print("The salmon is swimming")
| Salmon |
python | pypa__pip | src/pip/_vendor/rich/bar.py | {
"start": 502,
"end": 3231
} | class ____(JupyterMixin):
"""Renders a solid block bar.
Args:
size (float): Value for the end of the bar.
begin (float): Begin point (between 0 and size, inclusive).
end (float): End point (between 0 and size, inclusive).
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
color (Union[Color, str], optional): Color of the bar. Defaults to "default".
bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default".
"""
def __init__(
self,
size: float,
begin: float,
end: float,
*,
width: Optional[int] = None,
color: Union[Color, str] = "default",
bgcolor: Union[Color, str] = "default",
):
self.size = size
self.begin = max(begin, 0)
self.end = min(end, size)
self.width = width
self.style = Style(color=color, bgcolor=bgcolor)
def __repr__(self) -> str:
return f"Bar({self.size}, {self.begin}, {self.end})"
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = min(
self.width if self.width is not None else options.max_width,
options.max_width,
)
if self.begin >= self.end:
yield Segment(" " * width, self.style)
yield Segment.line()
return
prefix_complete_eights = int(width * 8 * self.begin / self.size)
prefix_bar_count = prefix_complete_eights // 8
prefix_eights_count = prefix_complete_eights % 8
body_complete_eights = int(width * 8 * self.end / self.size)
body_bar_count = body_complete_eights // 8
body_eights_count = body_complete_eights % 8
# When start and end fall into the same cell, we ideally should render
# a symbol that's "center-aligned", but there is no good symbol in Unicode.
# In this case, we fall back to right-aligned block symbol for simplicity.
prefix = " " * prefix_bar_count
if prefix_eights_count:
prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count]
body = FULL_BLOCK * body_bar_count
if body_eights_count:
body += END_BLOCK_ELEMENTS[body_eights_count]
suffix = " " * (width - len(body))
yield Segment(prefix + body[len(prefix) :] + suffix, self.style)
yield Segment.line()
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return (
Measurement(self.width, self.width)
if self.width is not None
else Measurement(4, options.max_width)
)
| Bar |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 49184,
"end": 49290
} | class ____(NamedTuple):
inner_name: str
other_names: list[str]
@dataclasses.dataclass
| InplacedBuffer |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_dataplex.py | {
"start": 5343,
"end": 6433
} | class ____:
@pytest.mark.db_test
def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms):
expected_url = EXPECTED_DATAPLEX_TASKS_LINK
link = DataplexTasksLink()
ti = create_task_instance_of_operator(
DataplexListTasksOperator,
dag_id="test_link_dag",
task_id="test_link_task",
region=TEST_LOCATION,
lake_id=TEST_LAKE_ID,
project_id=TEST_PROJECT_ID,
)
session.add(ti)
session.commit()
link.persist(context={"ti": ti, "task": ti.task})
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="key",
value={
"project_id": ti.task.project_id,
"lake_id": ti.task.lake_id,
"region": ti.task.region,
},
)
actual_url = link.get_link(operator=ti.task, ti_key=ti.key)
assert actual_url == expected_url
| TestDataplexTasksLink |
python | pydata__xarray | xarray/backends/zarr.py | {
"start": 61244,
"end": 72986
} | class ____(BackendEntrypoint):
"""
Backend for ".zarr" files based on the zarr package.
For more information about the underlying library, visit:
https://zarr.readthedocs.io/en/stable
See Also
--------
backends.ZarrStore
"""
description = "Open zarr files (.zarr) using zarr in Xarray"
url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ZarrBackendEntrypoint.html"
supports_groups = True
def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
if isinstance(filename_or_obj, str | os.PathLike):
# allow a trailing slash to account for an autocomplete
# adding it.
_, ext = os.path.splitext(str(filename_or_obj).rstrip("/"))
return ext in [".zarr"]
return False
def open_dataset(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group=None,
mode="r",
synchronizer=None,
consolidated=None,
chunk_store=None,
storage_options=None,
zarr_version=None,
zarr_format=None,
store=None,
engine=None,
use_zarr_fill_value_as_mask=None,
cache_members: bool = True,
) -> Dataset:
filename_or_obj = _normalize_path(filename_or_obj)
if not store:
store = ZarrStore.open_group(
filename_or_obj,
group=group,
mode=mode,
synchronizer=synchronizer,
consolidated=consolidated,
consolidate_on_close=False,
chunk_store=chunk_store,
storage_options=storage_options,
zarr_version=zarr_version,
use_zarr_fill_value_as_mask=None,
zarr_format=zarr_format,
cache_members=cache_members,
)
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
def open_datatree(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group: str | None = None,
mode="r",
synchronizer=None,
consolidated=None,
chunk_store=None,
storage_options=None,
zarr_version=None,
zarr_format=None,
) -> DataTree:
filename_or_obj = _normalize_path(filename_or_obj)
groups_dict = self.open_groups_as_dict(
filename_or_obj=filename_or_obj,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
group=group,
mode=mode,
synchronizer=synchronizer,
consolidated=consolidated,
chunk_store=chunk_store,
storage_options=storage_options,
zarr_version=zarr_version,
zarr_format=zarr_format,
)
return datatree_from_dict_with_io_cleanup(groups_dict)
def open_groups_as_dict(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group: str | None = None,
mode="r",
synchronizer=None,
consolidated=None,
chunk_store=None,
storage_options=None,
zarr_version=None,
zarr_format=None,
) -> dict[str, Dataset]:
filename_or_obj = _normalize_path(filename_or_obj)
# Check for a group and make it a parent if it exists
if group:
parent = str(NodePath("/") / NodePath(group))
else:
parent = str(NodePath("/"))
stores = ZarrStore.open_store(
filename_or_obj,
group=parent,
mode=mode,
synchronizer=synchronizer,
consolidated=consolidated,
consolidate_on_close=False,
chunk_store=chunk_store,
storage_options=storage_options,
zarr_version=zarr_version,
zarr_format=zarr_format,
)
groups_dict = {}
for path_group, store in stores.items():
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
group_ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
if group:
group_name = str(NodePath(path_group).relative_to(parent))
else:
group_name = str(NodePath(path_group))
groups_dict[group_name] = group_ds
return groups_dict
def _iter_zarr_groups(root: ZarrGroup, parent: str = "/") -> Iterable[str]:
parent_nodepath = NodePath(parent)
yield str(parent_nodepath)
for path, group in root.groups():
gpath = parent_nodepath / path
yield from _iter_zarr_groups(group, parent=str(gpath))
def _get_open_params(
store,
mode,
synchronizer,
group,
consolidated,
consolidate_on_close,
chunk_store,
storage_options,
zarr_version,
use_zarr_fill_value_as_mask,
zarr_format,
):
if TYPE_CHECKING:
import zarr
else:
zarr = attempt_import("zarr")
# zarr doesn't support pathlib.Path objects yet. zarr-python#601
if isinstance(store, os.PathLike):
store = os.fspath(store)
open_kwargs = dict(
# mode='a-' is a handcrafted xarray specialty
mode="a" if mode == "a-" else mode,
synchronizer=synchronizer,
path=group,
)
open_kwargs["storage_options"] = storage_options
zarr_format = _handle_zarr_version_or_format(
zarr_version=zarr_version, zarr_format=zarr_format
)
if _zarr_v3():
open_kwargs["zarr_format"] = zarr_format
else:
open_kwargs["zarr_version"] = zarr_format
if chunk_store is not None:
open_kwargs["chunk_store"] = chunk_store
if consolidated is None:
consolidated = False
if _zarr_v3():
# TODO: replace AssertionError after https://github.com/zarr-developers/zarr-python/issues/2821 is resolved
missing_exc = AssertionError
else:
missing_exc = zarr.errors.GroupNotFoundError
if _zarr_v3():
# zarr 3.0.8 and earlier did not support this property - it was effectively assumed true
if not getattr(store, "supports_consolidated_metadata", True):
consolidated = consolidate_on_close = False
if consolidated in [None, True]:
# open the root of the store, in case there is metadata consolidated there
group = open_kwargs.pop("path")
if consolidated:
# TODO: an option to pass the metadata_key keyword
zarr_root_group = zarr.open_consolidated(store, **open_kwargs)
elif consolidated is None:
# same but with more error handling in case no consolidated metadata found
try:
zarr_root_group = zarr.open_consolidated(store, **open_kwargs)
except (ValueError, KeyError):
# ValueError in zarr-python 3.x, KeyError in 2.x.
try:
zarr_root_group = zarr.open_group(store, **open_kwargs)
emit_user_level_warning(
"Failed to open Zarr store with consolidated metadata, "
"but successfully read with non-consolidated metadata. "
"This is typically much slower for opening a dataset. "
"To silence this warning, consider:\n"
"1. Consolidating metadata in this existing store with "
"zarr.consolidate_metadata().\n"
"2. Explicitly setting consolidated=False, to avoid trying "
"to read consolidate metadata, or\n"
"3. Explicitly setting consolidated=True, to raise an "
"error in this case instead of falling back to try "
"reading non-consolidated metadata.",
RuntimeWarning,
)
except missing_exc as err:
raise FileNotFoundError(
f"No such file or directory: '{store}'"
) from err
# but the user should still receive a DataTree whose root is the group they asked for
if group and group != "/":
zarr_group = zarr_root_group[group.removeprefix("/")]
else:
zarr_group = zarr_root_group
else:
if _zarr_v3():
# we have determined that we don't want to use consolidated metadata
# so we set that to False to avoid trying to read it
open_kwargs["use_consolidated"] = False
zarr_group = zarr.open_group(store, **open_kwargs)
close_store_on_close = zarr_group.store is not store
# we use this to determine how to handle fill_value
is_zarr_v3_format = _zarr_v3() and zarr_group.metadata.zarr_format == 3
if use_zarr_fill_value_as_mask is None:
if is_zarr_v3_format:
# for new data, we use a better default
use_zarr_fill_value_as_mask = False
else:
# this was the default for v2 and should apply to most existing Zarr data
use_zarr_fill_value_as_mask = True
return (
zarr_group,
consolidate_on_close,
close_store_on_close,
use_zarr_fill_value_as_mask,
)
def _handle_zarr_version_or_format(
*, zarr_version: ZarrFormat | None, zarr_format: ZarrFormat | None
) -> ZarrFormat | None:
"""handle the deprecated zarr_version kwarg and return zarr_format"""
if (
zarr_format is not None
and zarr_version is not None
and zarr_format != zarr_version
):
raise ValueError(
f"zarr_format {zarr_format} does not match zarr_version {zarr_version}, please only set one"
)
if zarr_version is not None:
emit_user_level_warning(
"zarr_version is deprecated, use zarr_format", FutureWarning
)
return zarr_version
return zarr_format
BACKEND_ENTRYPOINTS["zarr"] = ("zarr", ZarrBackendEntrypoint)
| ZarrBackendEntrypoint |
python | pyinstaller__pyinstaller | PyInstaller/lib/modulegraph/modulegraph.py | {
"start": 22799,
"end": 23279
} | class ____ (BadModule):
def __init__(self, relative_path, from_name):
identifier = relative_path
if relative_path.endswith('.'):
identifier += from_name
else:
identifier += '.' + from_name
super(InvalidRelativeImport, self).__init__(identifier)
self.relative_path = relative_path
self.from_name = from_name
def infoTuple(self):
return (self.relative_path, self.from_name)
| InvalidRelativeImport |
python | astropy__astropy | astropy/io/ascii/fixedwidth.py | {
"start": 15649,
"end": 17292
} | class ____(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_two_line"
_description = "Fixed width with second header line"
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(
self,
position_line=None,
position_char="-",
delimiter_pad=None,
bookend=False,
header_rows=None,
):
if len(position_char) != 1:
raise ValueError(
f'Position_char="{position_char}" must be a single character'
)
super().__init__(
delimiter_pad=delimiter_pad, bookend=bookend, header_rows=header_rows
)
if position_line is None:
position_line = len(self.header.header_rows)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
| FixedWidthTwoLine |
python | readthedocs__readthedocs.org | readthedocs/core/views/__init__.py | {
"start": 1041,
"end": 1430
} | class ____(View):
"""Conditionally redirect to dashboard or login page."""
def get(self, request, *args, **kwargs):
# Redirect to user dashboard for logged in users
if request.user.is_authenticated:
return redirect(reverse("projects_dashboard"))
# Redirect to login page if unauthed
return redirect(reverse("account_login"))
| HomepageView |
python | walkccc__LeetCode | solutions/1115. Print FooBar Alternately/1115.py | {
"start": 34,
"end": 504
} | class ____:
def __init__(self, n):
self.n = n
self.fooSemaphore = Semaphore(1)
self.barSemaphore = Semaphore(0)
def foo(self, printFoo: 'Callable[[], None]') -> None:
for _ in range(self.n):
self.fooSemaphore.acquire()
printFoo()
self.barSemaphore.release()
def bar(self, printBar: 'Callable[[], None]') -> None:
for _ in range(self.n):
self.barSemaphore.acquire()
printBar()
self.fooSemaphore.release()
| FooBar |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 84371,
"end": 84971
} | class ____(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
| BasicRDSTest |
python | openai__openai-python | examples/parsing_stream.py | {
"start": 159,
"end": 1036
} | class ____(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
with client.chat.completions.stream(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
) as stream:
for event in stream:
if event.type == "content.delta":
print(event.delta, end="", flush=True)
elif event.type == "content.done":
print("\n")
if event.parsed is not None:
print(f"answer: {event.parsed.final_answer}")
elif event.type == "refusal.delta":
print(event.delta, end="", flush=True)
elif event.type == "refusal.done":
print()
print("---------------")
rich.print(stream.get_final_completion())
| MathResponse |
python | walkccc__LeetCode | solutions/1328. Break a Palindrome/1328.py | {
"start": 0,
"end": 308
} | class ____:
def breakPalindrome(self, palindrome: str) -> str:
if len(palindrome) == 1:
return ''
ans = list(palindrome)
for i in range(len(palindrome) // 2):
if palindrome[i] != 'a':
ans[i] = 'a'
return ''.join(ans)
ans[-1] = 'b'
return ''.join(ans)
| Solution |
python | MongoEngine__mongoengine | tests/fields/test_fields.py | {
"start": 891,
"end": 61269
} | class ____(MongoDBTestCase):
def test_constructor_set_historical_behavior_is_kept(self):
class MyDoc(Document):
oid = ObjectIdField()
doc = MyDoc()
doc.oid = str(ObjectId())
assert isinstance(doc.oid, str)
# not modified on save (historical behavior)
doc.save()
assert isinstance(doc.oid, str)
# reloading goes through constructor so it is expected to go through to_python
doc.reload()
assert isinstance(doc.oid, ObjectId)
def test_constructor_set_list_field_historical_behavior_is_kept(self):
# Although the behavior is not consistent between regular field and a ListField
# This is the historical behavior so we must make sure we don't modify it (unless consciously done of course)
class MyOIDSDoc(Document):
oids = ListField(ObjectIdField())
# constructor goes through to_python so casting occurs
doc = MyOIDSDoc(oids=[str(ObjectId())])
assert isinstance(doc.oids[0], ObjectId)
# constructor goes through to_python so casting occurs
doc = MyOIDSDoc()
doc.oids = [str(ObjectId())]
assert isinstance(doc.oids[0], str)
doc.save()
assert isinstance(doc.oids[0], str)
# reloading goes through constructor so it is expected to go through to_python
# and cast
doc.reload()
assert isinstance(doc.oids[0], ObjectId)
def test_default_values_nothing_set(self):
"""Ensure that default field values are used when creating
a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: "test", required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
day = DateField(default=datetime.date.today)
person = Person(name="Ross")
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "day", "name", "userid"]
assert person.validate() is None
assert person.name == person.name
assert person.age == person.age
assert person.userid == person.userid
assert person.created == person.created
assert person.day == person.day
assert person._data["name"] == person.name
assert person._data["age"] == person.age
assert person._data["userid"] == person.userid
assert person._data["created"] == person.created
assert person._data["day"] == person.day
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "day", "name", "userid"]
def test_custom_field_validation_raise_deprecated_error_when_validation_return_something(
self,
):
# Covers introduction of a breaking change in the validation parameter (0.18)
def _not_empty(z):
return bool(z)
class Person(Document):
name = StringField(validation=_not_empty)
Person.drop_collection()
error = (
"validation argument for `name` must not return anything, "
"it should raise a ValidationError if validation fails"
)
with pytest.raises(DeprecatedError) as exc_info:
Person(name="").validate()
assert str(exc_info.value) == error
with pytest.raises(DeprecatedError) as exc_info:
Person(name="").save()
assert str(exc_info.value) == error
def test_custom_field_validation_raise_validation_error(self):
def _not_empty(z):
if not z:
raise ValidationError("cantbeempty")
class Person(Document):
name = StringField(validation=_not_empty)
Person.drop_collection()
with pytest.raises(ValidationError) as exc_info:
Person(name="").validate()
assert "ValidationError (Person:None) (cantbeempty: ['name'])" == str(
exc_info.value
)
Person(name="garbage").validate()
Person(name="garbage").save()
def test_default_values_set_to_None(self):
"""Ensure that default field values are used even when
we explicitly initialize the doc with None values.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: "test", required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
# Trying setting values to None
person = Person(name=None, age=None, userid=None, created=None)
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
assert person.validate() is None
assert person.name == person.name
assert person.age == person.age
assert person.userid == person.userid
assert person.created == person.created
assert person._data["name"] == person.name
assert person._data["age"] == person.age
assert person._data["userid"] == person.userid
assert person._data["created"] == person.created
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
def test_default_values_when_setting_to_None(self):
"""Ensure that default field values are used when creating
a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: "test", required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
person = Person()
person.name = None
person.age = None
person.userid = None
person.created = None
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
assert person.validate() is None
assert person.name is None
assert person.age == 30
assert person.userid == "test"
assert isinstance(person.created, datetime.datetime)
assert person._data["name"] == person.name
assert person._data["age"] == person.age
assert person._data["userid"] == person.userid
assert person._data["created"] == person.created
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
def test_default_value_is_not_used_when_changing_value_to_empty_list_for_strict_doc(
self,
):
"""List field with default can be set to the empty list (strict)"""
# Issue #1733
class Doc(Document):
x = ListField(IntField(), default=lambda: [42])
doc = Doc(x=[1]).save()
doc.x = []
doc.save()
reloaded = Doc.objects.get(id=doc.id)
assert reloaded.x == []
def test_default_value_is_not_used_when_changing_value_to_empty_list_for_dyn_doc(
self,
):
"""List field with default can be set to the empty list (dynamic)"""
# Issue #1733
class Doc(DynamicDocument):
x = ListField(IntField(), default=lambda: [42])
doc = Doc(x=[1]).save()
doc.x = []
doc.y = 2 # Was triggering the bug
doc.save()
reloaded = Doc.objects.get(id=doc.id)
assert reloaded.x == []
def test_default_values_when_deleting_value(self):
"""Ensure that default field values are used after non-default
values are explicitly deleted.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: "test", required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
person = Person(
name="Ross",
age=50,
userid="different",
created=datetime.datetime(2014, 6, 12),
)
del person.name
del person.age
del person.userid
del person.created
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
assert person.validate() is None
assert person.name is None
assert person.age == 30
assert person.userid == "test"
assert isinstance(person.created, datetime.datetime)
assert person.created != datetime.datetime(2014, 6, 12)
assert person._data["name"] == person.name
assert person._data["age"] == person.age
assert person._data["userid"] == person.userid
assert person._data["created"] == person.created
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
assert data_to_be_saved == ["age", "created", "userid"]
def test_required_values(self):
"""Ensure that required field constraints are enforced."""
class Person(Document):
name = StringField(required=True)
age = IntField(required=True)
userid = StringField()
person = Person(name="Test User")
with pytest.raises(ValidationError):
person.validate()
person = Person(age=30)
with pytest.raises(ValidationError):
person.validate()
def test_not_required_handles_none_in_update(self):
"""Ensure that every fields should accept None if required is
False.
"""
class HandleNoneFields(Document):
str_fld = StringField()
int_fld = IntField()
flt_fld = FloatField()
comp_dt_fld = ComplexDateTimeField()
HandleNoneFields.drop_collection()
doc = HandleNoneFields()
doc.str_fld = "spam ham egg"
doc.int_fld = 42
doc.flt_fld = 4.2
doc.com_dt_fld = datetime.datetime.utcnow()
doc.save()
res = HandleNoneFields.objects(id=doc.id).update(
set__str_fld=None,
set__int_fld=None,
set__flt_fld=None,
set__comp_dt_fld=None,
)
assert res == 1
# Retrieve data from db and verify it.
ret = HandleNoneFields.objects.all()[0]
assert ret.str_fld is None
assert ret.int_fld is None
assert ret.flt_fld is None
assert ret.comp_dt_fld is None
def test_not_required_handles_none_from_database(self):
"""Ensure that every field can handle null values from the
database.
"""
class HandleNoneFields(Document):
str_fld = StringField(required=True)
int_fld = IntField(required=True)
flt_fld = FloatField(required=True)
comp_dt_fld = ComplexDateTimeField(required=True)
HandleNoneFields.drop_collection()
doc = HandleNoneFields()
doc.str_fld = "spam ham egg"
doc.int_fld = 42
doc.flt_fld = 4.2
doc.comp_dt_fld = datetime.datetime.utcnow()
doc.save()
# Unset all the fields
HandleNoneFields._get_collection().update_one(
{"_id": doc.id},
{"$unset": {"str_fld": 1, "int_fld": 1, "flt_fld": 1, "comp_dt_fld": 1}},
)
# Retrieve data from db and verify it.
ret = HandleNoneFields.objects.first()
assert ret.str_fld is None
assert ret.int_fld is None
assert ret.flt_fld is None
assert ret.comp_dt_fld is None
# Retrieved object shouldn't pass validation when a re-save is
# attempted.
with pytest.raises(ValidationError):
ret.validate()
def test_default_id_validation_as_objectid(self):
"""Ensure that invalid values cannot be assigned to an
ObjectIdField.
"""
class Person(Document):
name = StringField()
person = Person(name="Test User")
assert person.id is None
person.id = 47
with pytest.raises(ValidationError):
person.validate()
person.id = "abc"
with pytest.raises(ValidationError):
person.validate()
person.id = str(ObjectId())
person.validate()
def test_db_field_validation(self):
"""Ensure that db_field doesn't accept invalid values."""
# dot in the name
with pytest.raises(ValueError):
class User(Document):
name = StringField(db_field="user.name")
# name starting with $
with pytest.raises(ValueError):
class UserX1(Document):
name = StringField(db_field="$name")
# name containing a null character
with pytest.raises(ValueError):
class UserX2(Document):
name = StringField(db_field="name\0")
def test_list_validation(self):
"""Ensure that a list field only accepts lists with valid elements."""
access_level_choices = (
("a", "Administration"),
("b", "Manager"),
("c", "Staff"),
)
class User(Document):
pass
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
authors = ListField(ReferenceField(User))
authors_as_lazy = ListField(LazyReferenceField(User))
generic = ListField(GenericReferenceField())
generic_as_lazy = ListField(GenericLazyReferenceField())
access_list = ListField(choices=access_level_choices, display_sep=", ")
User.drop_collection()
BlogPost.drop_collection()
post = BlogPost(content="Went for a walk today...")
post.validate()
post.tags = "fun"
with pytest.raises(ValidationError):
post.validate()
post.tags = [1, 2]
with pytest.raises(ValidationError):
post.validate()
post.tags = ["fun", "leisure"]
post.validate()
post.tags = ("fun", "leisure")
post.validate()
post.access_list = "a,b"
with pytest.raises(ValidationError):
post.validate()
post.access_list = ["c", "d"]
with pytest.raises(ValidationError):
post.validate()
post.access_list = ["a", "b"]
post.validate()
assert post.get_access_list_display() == "Administration, Manager"
post.comments = ["a"]
with pytest.raises(ValidationError):
post.validate()
post.comments = "yay"
with pytest.raises(ValidationError):
post.validate()
comments = [Comment(content="Good for you"), Comment(content="Yay.")]
post.comments = comments
post.validate()
post.authors = [Comment()]
with pytest.raises(ValidationError):
post.validate()
post.authors = [User()]
with pytest.raises(ValidationError):
post.validate()
user = User()
user.save()
post.authors = [user]
post.validate()
post.authors_as_lazy = [Comment()]
with pytest.raises(ValidationError):
post.validate()
post.authors_as_lazy = [User()]
with pytest.raises(ValidationError):
post.validate()
post.authors_as_lazy = [user]
post.validate()
post.generic = [1, 2]
with pytest.raises(ValidationError):
post.validate()
post.generic = [User(), Comment()]
with pytest.raises(ValidationError):
post.validate()
post.generic = [Comment()]
with pytest.raises(ValidationError):
post.validate()
post.generic = [user]
post.validate()
post.generic_as_lazy = [1, 2]
with pytest.raises(ValidationError):
post.validate()
post.generic_as_lazy = [User(), Comment()]
with pytest.raises(ValidationError):
post.validate()
post.generic_as_lazy = [Comment()]
with pytest.raises(ValidationError):
post.validate()
post.generic_as_lazy = [user]
post.validate()
def test_sorted_list_sorting(self):
"""Ensure that a sorted list field properly sorts values."""
class Comment(EmbeddedDocument):
order = IntField()
content = StringField()
class BlogPost(Document):
content = StringField()
comments = SortedListField(EmbeddedDocumentField(Comment), ordering="order")
tags = SortedListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content="Went for a walk today...")
post.save()
post.tags = ["leisure", "fun"]
post.save()
post.reload()
assert post.tags == ["fun", "leisure"]
comment1 = Comment(content="Good for you", order=1)
comment2 = Comment(content="Yay.", order=0)
comments = [comment1, comment2]
post.comments = comments
post.save()
post.reload()
assert post.comments[0].content == comment2.content
assert post.comments[1].content == comment1.content
post.comments[0].order = 2
post.save()
post.reload()
assert post.comments[0].content == comment1.content
assert post.comments[1].content == comment2.content
def test_reverse_list_sorting(self):
"""Ensure that a reverse sorted list field properly sorts values"""
class Category(EmbeddedDocument):
count = IntField()
name = StringField()
class CategoryList(Document):
categories = SortedListField(
EmbeddedDocumentField(Category), ordering="count", reverse=True
)
name = StringField()
CategoryList.drop_collection()
catlist = CategoryList(name="Top categories")
cat1 = Category(name="posts", count=10)
cat2 = Category(name="food", count=100)
cat3 = Category(name="drink", count=40)
catlist.categories = [cat1, cat2, cat3]
catlist.save()
catlist.reload()
assert catlist.categories[0].name == cat2.name
assert catlist.categories[1].name == cat3.name
assert catlist.categories[2].name == cat1.name
def test_list_field(self):
"""Ensure that list types work as expected."""
class BlogPost(Document):
info = ListField()
BlogPost.drop_collection()
post = BlogPost()
post.info = "my post"
with pytest.raises(ValidationError):
post.validate()
post.info = {"title": "test"}
with pytest.raises(ValidationError):
post.validate()
post.info = ["test"]
post.save()
post = BlogPost()
post.info = [{"test": "test"}]
post.save()
post = BlogPost()
post.info = [{"test": 3}]
post.save()
assert BlogPost.objects.count() == 3
assert BlogPost.objects.filter(info__exact="test").count() == 1
assert BlogPost.objects.filter(info__0__test="test").count() == 1
# Confirm handles non strings or non existing keys
assert BlogPost.objects.filter(info__0__test__exact="5").count() == 0
assert BlogPost.objects.filter(info__100__test__exact="test").count() == 0
# test queries by list
post = BlogPost()
post.info = ["1", "2"]
post.save()
post = BlogPost.objects(info=["1", "2"]).get()
post.info += ["3", "4"]
post.save()
assert BlogPost.objects(info=["1", "2", "3", "4"]).count() == 1
post = BlogPost.objects(info=["1", "2", "3", "4"]).get()
post.info *= 2
post.save()
assert (
BlogPost.objects(info=["1", "2", "3", "4", "1", "2", "3", "4"]).count() == 1
)
def test_list_field_manipulative_operators(self):
"""Ensure that ListField works with standard list operators that manipulate the list."""
class BlogPost(Document):
ref = StringField()
info = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost()
post.ref = "1234"
post.info = ["0", "1", "2", "3", "4", "5"]
post.save()
def reset_post():
post.info = ["0", "1", "2", "3", "4", "5"]
post.save()
# '__add__(listB)'
# listA+listB
# operator.add(listA, listB)
reset_post()
temp = ["a", "b"]
post.info = post.info + temp
assert post.info == ["0", "1", "2", "3", "4", "5", "a", "b"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "a", "b"]
# '__delitem__(index)'
# aka 'del list[index]'
# aka 'operator.delitem(list, index)'
reset_post()
del post.info[2] # del from middle ('2')
assert post.info == ["0", "1", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "3", "4", "5"]
# '__delitem__(slice(i, j))'
# aka 'del list[i:j]'
# aka 'operator.delitem(list, slice(i,j))'
reset_post()
del post.info[1:3] # removes '1', '2'
assert post.info == ["0", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "3", "4", "5"]
# '__iadd__'
# aka 'list += list'
reset_post()
temp = ["a", "b"]
post.info += temp
assert post.info == ["0", "1", "2", "3", "4", "5", "a", "b"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "a", "b"]
# '__imul__'
# aka 'list *= number'
reset_post()
post.info *= 2
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
# '__mul__'
# aka 'listA*listB'
reset_post()
post.info = post.info * 2
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
# '__rmul__'
# aka 'listB*listA'
reset_post()
post.info = 2 * post.info
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "0", "1", "2", "3", "4", "5"]
# '__setitem__(index, value)'
# aka 'list[index]=value'
# aka 'setitem(list, value)'
reset_post()
post.info[4] = "a"
assert post.info == ["0", "1", "2", "3", "a", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "a", "5"]
# __setitem__(index, value) with a negative index
reset_post()
post.info[-2] = "a"
assert post.info == ["0", "1", "2", "3", "a", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "a", "5"]
# '__setitem__(slice(i, j), listB)'
# aka 'listA[i:j] = listB'
# aka 'setitem(listA, slice(i, j), listB)'
reset_post()
post.info[1:3] = ["h", "e", "l", "l", "o"]
assert post.info == ["0", "h", "e", "l", "l", "o", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "h", "e", "l", "l", "o", "3", "4", "5"]
# '__setitem__(slice(i, j), listB)' with negative i and j
reset_post()
post.info[-5:-3] = ["h", "e", "l", "l", "o"]
assert post.info == ["0", "h", "e", "l", "l", "o", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "h", "e", "l", "l", "o", "3", "4", "5"]
# negative
# 'append'
reset_post()
post.info.append("h")
assert post.info == ["0", "1", "2", "3", "4", "5", "h"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "h"]
# 'extend'
reset_post()
post.info.extend(["h", "e", "l", "l", "o"])
assert post.info == ["0", "1", "2", "3", "4", "5", "h", "e", "l", "l", "o"]
post.save()
post.reload()
assert post.info == ["0", "1", "2", "3", "4", "5", "h", "e", "l", "l", "o"]
# 'insert'
# 'pop'
reset_post()
x = post.info.pop(2)
y = post.info.pop()
assert post.info == ["0", "1", "3", "4"]
assert x == "2"
assert y == "5"
post.save()
post.reload()
assert post.info == ["0", "1", "3", "4"]
# 'remove'
reset_post()
post.info.remove("2")
assert post.info == ["0", "1", "3", "4", "5"]
post.save()
post.reload()
assert post.info == ["0", "1", "3", "4", "5"]
# 'reverse'
reset_post()
post.info.reverse()
assert post.info == ["5", "4", "3", "2", "1", "0"]
post.save()
post.reload()
assert post.info == ["5", "4", "3", "2", "1", "0"]
# 'sort': though this operator method does manipulate the list, it is
# tested in the 'test_list_field_lexicograpic_operators' function
def test_list_field_invalid_operators(self):
class BlogPost(Document):
ref = StringField()
info = ListField(StringField())
post = BlogPost()
post.ref = "1234"
post.info = ["0", "1", "2", "3", "4", "5"]
# '__hash__'
# aka 'hash(list)'
with pytest.raises(TypeError):
hash(post.info)
def test_list_field_lexicographic_operators(self):
"""Ensure that ListField works with standard list operators that
do lexigraphic ordering.
"""
class BlogPost(Document):
ref = StringField()
text_info = ListField(StringField())
oid_info = ListField(ObjectIdField())
bool_info = ListField(BooleanField())
BlogPost.drop_collection()
blogSmall = BlogPost(ref="small")
blogSmall.text_info = ["a", "a", "a"]
blogSmall.bool_info = [False, False]
blogSmall.save()
blogSmall.reload()
blogLargeA = BlogPost(ref="big")
blogLargeA.text_info = ["a", "z", "j"]
blogLargeA.bool_info = [False, True]
blogLargeA.save()
blogLargeA.reload()
blogLargeB = BlogPost(ref="big2")
blogLargeB.text_info = ["a", "z", "j"]
blogLargeB.oid_info = [
"54495ad94c934721ede76f90",
"54495ad94c934721ede76d23",
"54495ad94c934721ede76d00",
]
blogLargeB.bool_info = [False, True]
blogLargeB.save()
blogLargeB.reload()
# '__eq__' aka '=='
assert blogLargeA.text_info == blogLargeB.text_info
assert blogLargeA.bool_info == blogLargeB.bool_info
# '__ge__' aka '>='
assert blogLargeA.text_info >= blogSmall.text_info
assert blogLargeA.text_info >= blogLargeB.text_info
assert blogLargeA.bool_info >= blogSmall.bool_info
assert blogLargeA.bool_info >= blogLargeB.bool_info
# '__gt__' aka '>'
assert blogLargeA.text_info >= blogSmall.text_info
assert blogLargeA.bool_info >= blogSmall.bool_info
# '__le__' aka '<='
assert blogSmall.text_info <= blogLargeB.text_info
assert blogLargeA.text_info <= blogLargeB.text_info
assert blogSmall.bool_info <= blogLargeB.bool_info
assert blogLargeA.bool_info <= blogLargeB.bool_info
# '__lt__' aka '<'
assert blogSmall.text_info < blogLargeB.text_info
assert blogSmall.bool_info < blogLargeB.bool_info
# '__ne__' aka '!='
assert blogSmall.text_info != blogLargeB.text_info
assert blogSmall.bool_info != blogLargeB.bool_info
# 'sort'
blogLargeB.bool_info = [True, False, True, False]
blogLargeB.text_info.sort()
blogLargeB.oid_info.sort()
blogLargeB.bool_info.sort()
sorted_target_list = [
ObjectId("54495ad94c934721ede76d00"),
ObjectId("54495ad94c934721ede76d23"),
ObjectId("54495ad94c934721ede76f90"),
]
assert blogLargeB.text_info == ["a", "j", "z"]
assert blogLargeB.oid_info == sorted_target_list
assert blogLargeB.bool_info == [False, False, True, True]
blogLargeB.save()
blogLargeB.reload()
assert blogLargeB.text_info == ["a", "j", "z"]
assert blogLargeB.oid_info == sorted_target_list
assert blogLargeB.bool_info == [False, False, True, True]
def test_list_assignment(self):
"""Ensure that list field element assignment and slicing work."""
class BlogPost(Document):
info = ListField()
BlogPost.drop_collection()
post = BlogPost()
post.info = ["e1", "e2", 3, "4", 5]
post.save()
post.info[0] = 1
post.save()
post.reload()
assert post.info[0] == 1
post.info[1:3] = ["n2", "n3"]
post.save()
post.reload()
assert post.info == [1, "n2", "n3", "4", 5]
post.info[-1] = "n5"
post.save()
post.reload()
assert post.info == [1, "n2", "n3", "4", "n5"]
post.info[-2] = 4
post.save()
post.reload()
assert post.info == [1, "n2", "n3", 4, "n5"]
post.info[1:-1] = [2]
post.save()
post.reload()
assert post.info == [1, 2, "n5"]
post.info[:-1] = [1, "n2", "n3", 4]
post.save()
post.reload()
assert post.info == [1, "n2", "n3", 4, "n5"]
post.info[-4:3] = [2, 3]
post.save()
post.reload()
assert post.info == [1, 2, 3, 4, "n5"]
def test_list_field_passed_in_value(self):
class Foo(Document):
bars = ListField(ReferenceField("Bar"))
class Bar(Document):
text = StringField()
bar = Bar(text="hi")
bar.save()
foo = Foo(bars=[])
foo.bars.append(bar)
assert repr(foo.bars) == "[<Bar: Bar object>]"
def test_list_field_strict(self):
"""Ensure that list field handles validation if provided
a strict field type.
"""
class Simple(Document):
mapping = ListField(field=IntField())
Simple.drop_collection()
e = Simple()
e.mapping = [1]
e.save()
# try creating an invalid mapping
with pytest.raises(ValidationError):
e.mapping = ["abc"]
e.save()
def test_list_field_max_length(self):
"""Ensure ListField's max_length is respected."""
class Foo(Document):
items = ListField(IntField(), max_length=5)
foo = Foo()
for i in range(1, 7):
foo.items.append(i)
if i < 6:
foo.save()
else:
with pytest.raises(ValidationError) as exc_info:
foo.save()
assert "List is too long" in str(exc_info.value)
def test_list_field_max_length_set_operator(self):
"""Ensure ListField's max_length is respected for a "set" operator."""
class Foo(Document):
items = ListField(IntField(), max_length=3)
foo = Foo.objects.create(items=[1, 2, 3])
with pytest.raises(ValidationError) as exc_info:
foo.modify(set__items=[1, 2, 3, 4])
assert "List is too long" in str(exc_info.value)
def test_list_field_rejects_strings(self):
"""Strings aren't valid list field data types."""
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple()
e.mapping = "hello world"
with pytest.raises(ValidationError):
e.save()
def test_complex_field_required(self):
"""Ensure required cant be None / Empty."""
class Simple(Document):
mapping = ListField(required=True)
Simple.drop_collection()
e = Simple()
e.mapping = []
with pytest.raises(ValidationError):
e.save()
class Simple(Document):
mapping = DictField(required=True)
Simple.drop_collection()
e = Simple()
e.mapping = {}
with pytest.raises(ValidationError):
e.save()
def test_complex_field_same_value_not_changed(self):
"""If a complex field is set to the same value, it should not
be marked as changed.
"""
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple().save()
e.mapping = []
assert e._changed_fields == []
class Simple(Document):
mapping = DictField()
Simple.drop_collection()
e = Simple().save()
e.mapping = {}
assert e._changed_fields == []
def test_slice_marks_field_as_changed(self):
class Simple(Document):
widgets = ListField()
simple = Simple(widgets=[1, 2, 3, 4]).save()
simple.widgets[:3] = []
assert ["widgets"] == simple._changed_fields
simple.save()
simple = simple.reload()
assert simple.widgets == [4]
def test_del_slice_marks_field_as_changed(self):
class Simple(Document):
widgets = ListField()
simple = Simple(widgets=[1, 2, 3, 4]).save()
del simple.widgets[:3]
assert ["widgets"] == simple._changed_fields
simple.save()
simple = simple.reload()
assert simple.widgets == [4]
def test_list_field_with_negative_indices(self):
class Simple(Document):
widgets = ListField()
simple = Simple(widgets=[1, 2, 3, 4]).save()
simple.widgets[-1] = 5
assert ["widgets.3"] == simple._changed_fields
simple.save()
simple = simple.reload()
assert simple.widgets == [1, 2, 3, 5]
def test_list_field_complex(self):
"""Ensure that the list fields can handle the complex types."""
class SettingBase(EmbeddedDocument):
meta = {"allow_inheritance": True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple()
e.mapping.append(StringSetting(value="foo"))
e.mapping.append(IntegerSetting(value=42))
e.mapping.append(
{
"number": 1,
"string": "Hi!",
"float": 1.001,
"complex": IntegerSetting(value=42),
"list": [IntegerSetting(value=42), StringSetting(value="foo")],
}
)
e.save()
e2 = Simple.objects.get(id=e.id)
assert isinstance(e2.mapping[0], StringSetting)
assert isinstance(e2.mapping[1], IntegerSetting)
# Test querying
assert Simple.objects.filter(mapping__1__value=42).count() == 1
assert Simple.objects.filter(mapping__2__number=1).count() == 1
assert Simple.objects.filter(mapping__2__complex__value=42).count() == 1
assert Simple.objects.filter(mapping__2__list__0__value=42).count() == 1
assert Simple.objects.filter(mapping__2__list__1__value="foo").count() == 1
# Confirm can update
Simple.objects().update(set__mapping__1=IntegerSetting(value=10))
assert Simple.objects.filter(mapping__1__value=10).count() == 1
Simple.objects().update(set__mapping__2__list__1=StringSetting(value="Boo"))
assert Simple.objects.filter(mapping__2__list__1__value="foo").count() == 0
assert Simple.objects.filter(mapping__2__list__1__value="Boo").count() == 1
def test_embedded_db_field(self):
class Embedded(EmbeddedDocument):
number = IntField(default=0, db_field="i")
class Test(Document):
embedded = EmbeddedDocumentField(Embedded, db_field="x")
Test.drop_collection()
test = Test()
test.embedded = Embedded(number=1)
test.save()
Test.objects.update_one(inc__embedded__number=1)
test = Test.objects.get()
assert test.embedded.number == 2
doc = self.db.test.find_one()
assert doc["x"]["i"] == 2
def test_double_embedded_db_field(self):
"""Make sure multiple layers of embedded docs resolve db fields
properly and can be initialized using dicts.
"""
class C(EmbeddedDocument):
txt = StringField()
class B(EmbeddedDocument):
c = EmbeddedDocumentField(C, db_field="fc")
class A(Document):
b = EmbeddedDocumentField(B, db_field="fb")
a = A(b=B(c=C(txt="hi")))
a.validate()
a = A(b={"c": {"txt": "hi"}})
a.validate()
def test_double_embedded_db_field_from_son(self):
"""Make sure multiple layers of embedded docs resolve db fields
from SON properly.
"""
class C(EmbeddedDocument):
txt = StringField()
class B(EmbeddedDocument):
c = EmbeddedDocumentField(C, db_field="fc")
class A(Document):
b = EmbeddedDocumentField(B, db_field="fb")
a = A._from_son(SON([("fb", SON([("fc", SON([("txt", "hi")]))]))]))
assert a.b.c.txt == "hi"
@pytest.mark.xfail(
reason="Using a string reference in an EmbeddedDocumentField does not work if the class isnt registerd yet",
raises=NotRegistered,
)
def test_embedded_document_field_cant_reference_using_a_str_if_it_does_not_exist_yet(
self,
):
class MyDoc2(Document):
emb = EmbeddedDocumentField("MyFunkyDoc123")
class MyFunkyDoc123(EmbeddedDocument):
name = StringField()
def test_embedded_document_validation(self):
"""Ensure that invalid embedded documents cannot be assigned to
embedded document fields.
"""
class Comment(EmbeddedDocument):
content = StringField()
class PersonPreferences(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
preferences = EmbeddedDocumentField(PersonPreferences)
Person.drop_collection()
person = Person(name="Test User")
person.preferences = "My Preferences"
with pytest.raises(ValidationError):
person.validate()
# Check that only the right embedded doc works
person.preferences = Comment(content="Nice blog post...")
with pytest.raises(ValidationError):
person.validate()
# Check that the embedded doc is valid
person.preferences = PersonPreferences()
with pytest.raises(ValidationError):
person.validate()
person.preferences = PersonPreferences(food="Cheese", number=47)
assert person.preferences.food == "Cheese"
person.validate()
def test_embedded_document_inheritance(self):
"""Ensure that subclasses of embedded documents may be provided
to EmbeddedDocumentFields of the superclass' type.
"""
class User(EmbeddedDocument):
name = StringField()
meta = {"allow_inheritance": True}
class PowerUser(User):
power = IntField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
BlogPost.drop_collection()
post = BlogPost(content="What I did today...")
post.author = PowerUser(name="Test User", power=47)
post.save()
assert 47 == BlogPost.objects.first().author.power
def test_embedded_document_inheritance_with_list(self):
"""Ensure that nested list of subclassed embedded documents is
handled correctly.
"""
class Group(EmbeddedDocument):
name = StringField()
content = ListField(StringField())
class Basedoc(Document):
groups = ListField(EmbeddedDocumentField(Group))
meta = {"abstract": True}
class User(Basedoc):
doctype = StringField(require=True, default="userdata")
User.drop_collection()
content = ["la", "le", "lu"]
group = Group(name="foo", content=content)
foobar = User(groups=[group])
foobar.save()
assert content == User.objects.first().groups[0].content
def test_reference_miss(self):
"""Ensure an exception is raised when dereferencing an unknown
document.
"""
class Foo(Document):
pass
class Bar(Document):
ref = ReferenceField(Foo)
generic_ref = GenericReferenceField()
Foo.drop_collection()
Bar.drop_collection()
foo = Foo().save()
bar = Bar(ref=foo, generic_ref=foo).save()
# Reference is no longer valid
foo.delete()
bar = Bar.objects.get()
with pytest.raises(DoesNotExist):
bar.ref
with pytest.raises(DoesNotExist):
bar.generic_ref
# When auto_dereference is disabled, there is no trouble returning DBRef
bar = Bar.objects.get()
expected = foo.to_dbref()
bar._fields["ref"].set_auto_dereferencing(False)
assert bar.ref == expected
bar._fields["generic_ref"].set_auto_dereferencing(False)
assert bar.generic_ref == {"_ref": expected, "_cls": "Foo"}
def test_list_item_dereference(self):
"""Ensure that DBRef items in ListFields are dereferenced."""
class User(Document):
name = StringField()
class Group(Document):
members = ListField(ReferenceField(User))
User.drop_collection()
Group.drop_collection()
user1 = User(name="user1")
user1.save()
user2 = User(name="user2")
user2.save()
group = Group(members=[user1, user2])
group.save()
group_obj = Group.objects.first()
assert group_obj.members[0].name == user1.name
assert group_obj.members[1].name == user2.name
def test_recursive_reference(self):
"""Ensure that ReferenceFields can reference their own documents."""
class Employee(Document):
name = StringField()
boss = ReferenceField("self")
friends = ListField(ReferenceField("self"))
Employee.drop_collection()
bill = Employee(name="Bill Lumbergh")
bill.save()
michael = Employee(name="Michael Bolton")
michael.save()
samir = Employee(name="Samir Nagheenanajar")
samir.save()
friends = [michael, samir]
peter = Employee(name="Peter Gibbons", boss=bill, friends=friends)
peter.save()
peter = Employee.objects.with_id(peter.id)
assert peter.boss == bill
assert peter.friends == friends
def test_recursive_embedding(self):
"""Ensure that EmbeddedDocumentFields can contain their own documents."""
class TreeNode(EmbeddedDocument):
name = StringField()
children = ListField(EmbeddedDocumentField("self"))
class Tree(Document):
name = StringField()
children = ListField(EmbeddedDocumentField("TreeNode"))
Tree.drop_collection()
tree = Tree(name="Tree")
first_child = TreeNode(name="Child 1")
tree.children.append(first_child)
second_child = TreeNode(name="Child 2")
first_child.children.append(second_child)
tree.save()
tree = Tree.objects.first()
assert len(tree.children) == 1
assert len(tree.children[0].children) == 1
third_child = TreeNode(name="Child 3")
tree.children[0].children.append(third_child)
tree.save()
assert len(tree.children) == 1
assert tree.children[0].name == first_child.name
assert tree.children[0].children[0].name == second_child.name
assert tree.children[0].children[1].name == third_child.name
# Test updating
tree.children[0].name = "I am Child 1"
tree.children[0].children[0].name = "I am Child 2"
tree.children[0].children[1].name = "I am Child 3"
tree.save()
assert tree.children[0].name == "I am Child 1"
assert tree.children[0].children[0].name == "I am Child 2"
assert tree.children[0].children[1].name == "I am Child 3"
# Test removal
assert len(tree.children[0].children) == 2
del tree.children[0].children[1]
tree.save()
assert len(tree.children[0].children) == 1
tree.children[0].children.pop(0)
tree.save()
assert len(tree.children[0].children) == 0
assert tree.children[0].children == []
tree.children[0].children.insert(0, third_child)
tree.children[0].children.insert(0, second_child)
tree.save()
assert len(tree.children[0].children) == 2
assert tree.children[0].children[0].name == second_child.name
assert tree.children[0].children[1].name == third_child.name
def test_drop_abstract_document(self):
"""Ensure that an abstract document cannot be dropped given it
has no underlying collection.
"""
class AbstractDoc(Document):
name = StringField()
meta = {"abstract": True}
with pytest.raises(OperationError):
AbstractDoc.drop_collection()
def test_reference_class_with_abstract_parent(self):
"""Ensure that a class with an abstract parent can be referenced."""
class Sibling(Document):
name = StringField()
meta = {"abstract": True}
class Sister(Sibling):
pass
class Brother(Sibling):
sibling = ReferenceField(Sibling)
Sister.drop_collection()
Brother.drop_collection()
sister = Sister(name="Alice")
sister.save()
brother = Brother(name="Bob", sibling=sister)
brother.save()
assert Brother.objects[0].sibling.name == sister.name
def test_reference_abstract_class(self):
"""Ensure that an abstract class instance cannot be used in the
reference of that abstract class.
"""
class Sibling(Document):
name = StringField()
meta = {"abstract": True}
class Sister(Sibling):
pass
class Brother(Sibling):
sibling = ReferenceField(Sibling)
Sister.drop_collection()
Brother.drop_collection()
sister = Sibling(name="Alice")
brother = Brother(name="Bob", sibling=sister)
with pytest.raises(ValidationError):
brother.save()
def test_abstract_reference_base_type(self):
"""Ensure that an an abstract reference fails validation when given a
Document that does not inherit from the abstract type.
"""
class Sibling(Document):
name = StringField()
meta = {"abstract": True}
class Brother(Sibling):
sibling = ReferenceField(Sibling)
class Mother(Document):
name = StringField()
Brother.drop_collection()
Mother.drop_collection()
mother = Mother(name="Carol")
mother.save()
brother = Brother(name="Bob", sibling=mother)
with pytest.raises(ValidationError):
brother.save()
def test_choices_allow_using_sets_as_choices(self):
"""Ensure that sets can be used when setting choices"""
class Shirt(Document):
size = StringField(choices={"M", "L"})
Shirt(size="M").validate()
def test_choices_validation_allow_no_value(self):
"""Ensure that .validate passes and no value was provided
for a field setup with choices
"""
class Shirt(Document):
size = StringField(choices=("S", "M"))
shirt = Shirt()
shirt.validate()
def test_choices_validation_accept_possible_value(self):
"""Ensure that value is in a container of allowed values."""
class Shirt(Document):
size = StringField(choices=("S", "M"))
shirt = Shirt(size="S")
shirt.validate()
def test_choices_validation_reject_unknown_value(self):
"""Ensure that unallowed value are rejected upon validation"""
class Shirt(Document):
size = StringField(choices=("S", "M"))
shirt = Shirt(size="XS")
with pytest.raises(ValidationError):
shirt.validate()
def test_choices_get_field_display(self):
"""Test dynamic helper for returning the display value of a choices
field.
"""
class Shirt(Document):
size = StringField(
max_length=3,
choices=(
("S", "Small"),
("M", "Medium"),
("L", "Large"),
("XL", "Extra Large"),
("XXL", "Extra Extra Large"),
),
)
style = StringField(
max_length=3,
choices=(("S", "Small"), ("B", "Baggy"), ("W", "Wide")),
default="W",
)
Shirt.drop_collection()
shirt1 = Shirt()
shirt2 = Shirt()
# Make sure get_<field>_display returns the default value (or None)
assert shirt1.get_size_display() is None
assert shirt1.get_style_display() == "Wide"
shirt1.size = "XXL"
shirt1.style = "B"
shirt2.size = "M"
shirt2.style = "S"
assert shirt1.get_size_display() == "Extra Extra Large"
assert shirt1.get_style_display() == "Baggy"
assert shirt2.get_size_display() == "Medium"
assert shirt2.get_style_display() == "Small"
# Set as Z - an invalid choice
shirt1.size = "Z"
shirt1.style = "Z"
assert shirt1.get_size_display() == "Z"
assert shirt1.get_style_display() == "Z"
with pytest.raises(ValidationError):
shirt1.validate()
def test_simple_choices_validation(self):
"""Ensure that value is in a container of allowed values."""
class Shirt(Document):
size = StringField(max_length=3, choices=("S", "M", "L", "XL", "XXL"))
Shirt.drop_collection()
shirt = Shirt()
shirt.validate()
shirt.size = "S"
shirt.validate()
shirt.size = "XS"
with pytest.raises(ValidationError):
shirt.validate()
def test_simple_choices_get_field_display(self):
"""Test dynamic helper for returning the display value of a choices
field.
"""
class Shirt(Document):
size = StringField(max_length=3, choices=("S", "M", "L", "XL", "XXL"))
style = StringField(
max_length=3, choices=("Small", "Baggy", "wide"), default="Small"
)
Shirt.drop_collection()
shirt = Shirt()
assert shirt.get_size_display() is None
assert shirt.get_style_display() == "Small"
shirt.size = "XXL"
shirt.style = "Baggy"
assert shirt.get_size_display() == "XXL"
assert shirt.get_style_display() == "Baggy"
# Set as Z - an invalid choice
shirt.size = "Z"
shirt.style = "Z"
assert shirt.get_size_display() == "Z"
assert shirt.get_style_display() == "Z"
with pytest.raises(ValidationError):
shirt.validate()
def test_simple_choices_validation_invalid_value(self):
"""Ensure that error messages are correct."""
SIZES = ("S", "M", "L", "XL", "XXL")
COLORS = (("R", "Red"), ("B", "Blue"))
SIZE_MESSAGE = "Value must be one of ('S', 'M', 'L', 'XL', 'XXL')"
COLOR_MESSAGE = "Value must be one of ['R', 'B']"
class Shirt(Document):
size = StringField(max_length=3, choices=SIZES)
color = StringField(max_length=1, choices=COLORS)
Shirt.drop_collection()
shirt = Shirt()
shirt.validate()
shirt.size = "S"
shirt.color = "R"
shirt.validate()
shirt.size = "XS"
shirt.color = "G"
try:
shirt.validate()
except ValidationError as error:
# get the validation rules
error_dict = error.to_dict()
assert error_dict["size"] == SIZE_MESSAGE
assert error_dict["color"] == COLOR_MESSAGE
def test_recursive_validation(self):
"""Ensure that a validation result to_dict is available."""
class Author(EmbeddedDocument):
name = StringField(required=True)
class Comment(EmbeddedDocument):
author = EmbeddedDocumentField(Author, required=True)
content = StringField(required=True)
class Post(Document):
title = StringField(required=True)
comments = ListField(EmbeddedDocumentField(Comment))
bob = Author(name="Bob")
post = Post(title="hello world")
post.comments.append(Comment(content="hello", author=bob))
post.comments.append(Comment(author=bob))
with pytest.raises(ValidationError):
post.validate()
try:
post.validate()
except ValidationError as error:
# ValidationError.errors property
assert hasattr(error, "errors")
assert isinstance(error.errors, dict)
assert "comments" in error.errors
assert 1 in error.errors["comments"]
assert isinstance(error.errors["comments"][1]["content"], ValidationError)
# ValidationError.schema property
error_dict = error.to_dict()
assert isinstance(error_dict, dict)
assert "comments" in error_dict
assert 1 in error_dict["comments"]
assert "content" in error_dict["comments"][1]
assert error_dict["comments"][1]["content"] == "Field is required"
post.comments[1].content = "here we go"
post.validate()
def test_tuples_as_tuples(self):
"""Ensure that tuples remain tuples when they are inside
a ComplexBaseField.
"""
class SomeField(BaseField):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def to_mongo(self, value):
return value
def to_python(self, value):
return tuple(value)
class TestDoc(Document):
items = ListField(SomeField())
TestDoc.drop_collection()
tuples = [(100, "Testing")]
doc = TestDoc()
doc.items = tuples
doc.save()
x = TestDoc.objects().get()
assert x is not None
assert len(x.items) == 1
assert tuple(x.items[0]) in tuples
assert x.items[0] in tuples
def test_dynamic_fields_class(self):
class Doc2(Document):
field_1 = StringField(db_field="f")
class Doc(Document):
my_id = IntField(primary_key=True)
embed_me = DynamicField(db_field="e")
field_x = StringField(db_field="x")
Doc.drop_collection()
Doc2.drop_collection()
doc2 = Doc2(field_1="hello")
doc = Doc(my_id=1, embed_me=doc2, field_x="x")
with pytest.raises(OperationError):
doc.save()
doc2.save()
doc.save()
doc = Doc.objects.get()
assert doc.embed_me.field_1 == "hello"
def test_dynamic_fields_embedded_class(self):
class Embed(EmbeddedDocument):
field_1 = StringField(db_field="f")
class Doc(Document):
my_id = IntField(primary_key=True)
embed_me = DynamicField(db_field="e")
field_x = StringField(db_field="x")
Doc.drop_collection()
Doc(my_id=1, embed_me=Embed(field_1="hello"), field_x="x").save()
doc = Doc.objects.get()
assert doc.embed_me.field_1 == "hello"
def test_dynamicfield_dump_document(self):
"""Ensure a DynamicField can handle another document's dump."""
class Doc(Document):
field = DynamicField()
class ToEmbed(Document):
id = IntField(primary_key=True, default=1)
recursive = DynamicField()
class ToEmbedParent(Document):
id = IntField(primary_key=True, default=1)
recursive = DynamicField()
meta = {"allow_inheritance": True}
class ToEmbedChild(ToEmbedParent):
pass
to_embed_recursive = ToEmbed(id=1).save()
to_embed = ToEmbed(id=2, recursive=to_embed_recursive).save()
doc = Doc(field=to_embed)
doc.save()
assert isinstance(doc.field, ToEmbed)
assert doc.field == to_embed
# Same thing with a Document with a _cls field
to_embed_recursive = ToEmbedChild(id=1).save()
to_embed_child = ToEmbedChild(id=2, recursive=to_embed_recursive).save()
doc = Doc(field=to_embed_child)
doc.save()
assert isinstance(doc.field, ToEmbedChild)
assert doc.field == to_embed_child
def test_cls_field(self):
class Animal(Document):
meta = {"allow_inheritance": True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
Animal.objects.delete()
Dog().save()
Fish().save()
Human().save()
assert (
Animal.objects(_cls__in=["Animal.Mammal.Dog", "Animal.Fish"]).count() == 2
)
assert Animal.objects(_cls__in=["Animal.Fish.Guppy"]).count() == 0
def test_sparse_field(self):
class Doc(Document):
name = StringField(required=False, unique=True, sparse=True)
# This would raise an exception in a non-sparse unique index
Doc().save()
Doc().save()
def test_undefined_field_exception(self):
"""Tests if a `FieldDoesNotExist` exception is raised when
trying to instantiate a document with a field that's not
defined.
"""
class Doc(Document):
foo = StringField()
with pytest.raises(FieldDoesNotExist):
Doc(bar="test")
def test_undefined_field_exception_with_strict(self):
"""Tests if a `FieldDoesNotExist` exception is raised when
trying to instantiate a document with a field that's not
defined, even when strict is set to False.
"""
class Doc(Document):
foo = StringField()
meta = {"strict": False}
with pytest.raises(FieldDoesNotExist):
Doc(bar="test")
def test_undefined_field_works_no_confusion_with_db_field(self):
class Doc(Document):
foo = StringField(db_field="bar")
with pytest.raises(FieldDoesNotExist):
Doc(bar="test")
| TestField |
python | pikepdf__pikepdf | tests/test_canvas.py | {
"start": 4142,
"end": 7835
} | class ____:
def test_init(self):
builder = ContentStreamBuilder()
assert isinstance(builder.build(), bytes)
def test_append(self):
builder = ContentStreamBuilder()
builder.push()
assert builder.build() == b'q\n'
def test_extend(self):
builder1 = ContentStreamBuilder()
builder2 = ContentStreamBuilder()
builder2.push()
builder1.extend(builder2)
assert builder1.build() == b'q\n'
@pytest.mark.parametrize(
'method,args,operator',
[
(ContentStreamBuilder.push, (), 'q'),
(ContentStreamBuilder.pop, (), 'Q'),
(ContentStreamBuilder.cm, (Matrix(),), 'cm'),
(
ContentStreamBuilder.begin_marked_content_proplist,
(Name.Test, 42),
'BDC',
),
(ContentStreamBuilder.end_marked_content, (), 'EMC'),
(ContentStreamBuilder.begin_marked_content, (Name.Foo,), 'BMC'),
(ContentStreamBuilder.begin_text, (), 'BT'),
(ContentStreamBuilder.end_text, (), 'ET'),
(ContentStreamBuilder.set_text_font, (Name.Test, 12), 'Tf'),
(ContentStreamBuilder.set_text_font, (Name.Test, 12.5), 'Tf'),
(ContentStreamBuilder.set_text_font, (Name.Test, Decimal('12.5')), 'Tf'),
(ContentStreamBuilder.set_text_char_spacing, (1,), 'Tc'),
(ContentStreamBuilder.set_text_char_spacing, (0.5,), 'Tc'),
(ContentStreamBuilder.set_text_char_spacing, (Decimal('1'),), 'Tc'),
(ContentStreamBuilder.set_text_word_spacing, (1,), 'Tw'),
(ContentStreamBuilder.set_text_word_spacing, (0.5,), 'Tw'),
(ContentStreamBuilder.set_text_word_spacing, (Decimal('1'),), 'Tw'),
(ContentStreamBuilder.set_text_leading, (13,), 'TL'),
(ContentStreamBuilder.set_text_leading, (13.5,), 'TL'),
(ContentStreamBuilder.set_text_leading, (Decimal('13.5'),), 'TL'),
(ContentStreamBuilder.set_text_matrix, (Matrix(),), "Tm"),
(ContentStreamBuilder.set_text_rendering, (3,), "Tr"),
(ContentStreamBuilder.set_text_horizontal_scaling, (100.0,), "Tz"),
(
ContentStreamBuilder.show_text_with_kerning,
(b'A', 120, b'W', 120, b'A', 95, b'Y'),
"TJ",
),
(ContentStreamBuilder.show_text_line, (b'hello world',), "'"),
(
ContentStreamBuilder.show_text_line_with_spacing,
(b'hello world', 0.25, 0.25),
'"',
),
(ContentStreamBuilder.move_cursor, (1, 2), "Td"),
(ContentStreamBuilder.move_cursor_new_line, (), "T*"),
(ContentStreamBuilder.stroke_and_close, (), "s"),
(ContentStreamBuilder.fill, (), "f"),
(ContentStreamBuilder.append_rectangle, (10, 10, 40, 40), "re"),
(ContentStreamBuilder.set_stroke_color, (1, 0, 1), "RG"),
(ContentStreamBuilder.set_fill_color, (0, 1, 0), "rg"),
(ContentStreamBuilder.set_line_width, (5,), "w"),
(ContentStreamBuilder.line, (1, 2, 3, 4), "l"),
(ContentStreamBuilder.set_dashes, (), "d"),
(ContentStreamBuilder.set_dashes, (1,), "d"),
(ContentStreamBuilder.set_dashes, ([1, 2], 1), "d"),
(ContentStreamBuilder.draw_xobject, (Name.X,), "Do"),
],
)
def test_operators(self, method, operator, args):
builder = ContentStreamBuilder()
method(builder, *args)
assert builder.build().endswith(Operator(operator).unparse() + b'\n')
| TestContentStreamBuilder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.