language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
doocs__leetcode
|
solution/2100-2199/2100.Find Good Days to Rob the Bank/Solution.py
|
{
"start": 0,
"end": 527
}
|
class ____:
def goodDaysToRobBank(self, security: List[int], time: int) -> List[int]:
n = len(security)
if n <= time * 2:
return []
left, right = [0] * n, [0] * n
for i in range(1, n):
if security[i] <= security[i - 1]:
left[i] = left[i - 1] + 1
for i in range(n - 2, -1, -1):
if security[i] <= security[i + 1]:
right[i] = right[i + 1] + 1
return [i for i in range(n) if time <= min(left[i], right[i])]
|
Solution
|
python
|
django-guardian__django-guardian
|
example_project/articles/models.py
|
{
"start": 795,
"end": 932
}
|
class ____(UserObjectPermissionBase):
content_object = models.ForeignKey(Article, on_delete=models.CASCADE)
|
ArticleUserObjectPermission
|
python
|
xlwings__xlwings
|
tests/test_table.py
|
{
"start": 120,
"end": 4968
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.book = xw.Book()
cls.sheet = cls.book.sheets[0]
cls.sheet["A1"].value = [["a", "b"], [1, 2]]
cls.test_table = cls.sheet.tables.add(source=cls.sheet["A1"].expand())
@classmethod
def tearDownClass(cls):
cls.book.close()
def test_add_table_no_name(self):
self.assertEqual(self.sheet.tables[0].name, "Table1")
def test_add_table_with_name(self):
self.sheet["A4"].value = [["a", "b"], [1, 2]]
self.sheet.tables.add(source=self.sheet["A4"].expand(), name="AABBCC")
self.assertEqual(self.sheet.tables["AABBCC"].name, "AABBCC")
def test_data_body_range(self):
self.assertEqual(self.test_table.data_body_range, self.sheet["A2:B2"])
def test_display_name(self):
origin_display_name = self.test_table.display_name
self.test_table.display_name = "ABCDE"
self.assertEqual(self.test_table.display_name, "ABCDE")
self.test_table.display_name = origin_display_name
def test_header_row_range(self):
self.assertEqual(self.test_table.header_row_range, self.sheet["A1:B1"])
self.test_table.show_headers = False
self.assertIsNone(self.test_table.header_row_range)
self.test_table.show_headers = True
def test_insert_row_range(self):
table = self.sheet.tables.add(self.sheet["A10"])
self.assertEqual(table.insert_row_range, self.sheet["A11"])
def test_insert_row_range_none(self):
self.assertIsNone(self.test_table.insert_row_range)
def test_name(self):
original_name = self.test_table.name
self.test_table.name = "XYZ"
self.assertEqual(self.test_table.name, "XYZ")
self.assertEqual(self.sheet.tables["XYZ"].name, "XYZ")
self.test_table.name = original_name
def test_parent(self):
self.assertEqual(self.test_table.parent, self.sheet)
def test_show_autofilter(self):
self.assertTrue(self.test_table.show_autofilter)
self.test_table.show_autofilter = False
self.assertFalse(self.test_table.show_autofilter)
self.test_table.show_autofilter = True
def test_show_headers(self):
self.assertTrue(self.test_table.show_headers)
self.test_table.show_headers = False
self.assertFalse(self.test_table.show_headers)
self.test_table.show_headers = True
def test_show_table_style_columns_stripes(self):
self.assertFalse(self.test_table.show_table_style_column_stripes)
self.test_table.show_table_style_column_stripes = True
self.assertTrue(self.test_table.show_table_style_column_stripes)
self.test_table.show_table_style_column_stripes = False
def test_show_table_style_first_column(self):
self.assertFalse(self.test_table.show_table_style_first_column)
self.test_table.show_table_style_first_column = True
self.assertTrue(self.test_table.show_table_style_first_column)
self.test_table.show_table_style_first_column = False
def test_show_table_style_last_column(self):
self.assertFalse(self.test_table.show_table_style_last_column)
self.test_table.show_table_style_last_column = True
self.assertTrue(self.test_table.show_table_style_last_column)
self.test_table.show_table_style_last_column = False
def test_show_table_style_row_stripes(self):
self.assertTrue(self.test_table.show_table_style_row_stripes)
self.test_table.show_table_style_row_stripes = False
self.assertFalse(self.test_table.show_table_style_row_stripes)
self.test_table.show_table_style_row_stripes = True
def test_show_totals(self):
self.assertFalse(self.test_table.show_totals)
self.test_table.show_totals = True
self.assertTrue(self.test_table.show_totals)
self.test_table.show_totals = False
def test_table_style(self):
self.assertEqual(self.test_table.table_style, "TableStyleMedium2")
self.test_table.table_style = "TableStyleMedium1"
self.assertEqual(self.test_table.table_style, "TableStyleMedium1")
self.test_table.table_style = "TableStyleMedium2"
def test_totals_row_range(self):
self.assertIsNone(self.test_table.totals_row_range)
self.test_table.show_totals = True
self.assertEqual(self.test_table.totals_row_range, self.sheet["A3:B3"])
self.test_table.show_totals = False
def test_resize(self):
self.assertEqual(self.test_table.range.address, "$A$1:$B$2")
self.test_table.resize(self.sheet["A1:C3"])
self.assertEqual(self.test_table.range.address, "$A$1:$C$3")
self.test_table.resize(self.sheet["$A$1:$B$2"])
self.assertEqual(self.test_table.range.address, "$A$1:$B$2")
|
TestTable
|
python
|
ray-project__ray
|
python/ray/data/expressions.py
|
{
"start": 31412,
"end": 36964
}
|
class ____(Expr):
"""Expression that represents all columns from the input.
This is a special expression used in projections to indicate that
all existing columns should be preserved at this position in the output.
It's typically used internally by operations like with_column() and
rename_columns() to maintain existing columns.
Example:
When with_column("new_col", expr) is called, it creates:
Project(exprs=[star(), expr.alias("new_col")])
This means: keep all existing columns, then add/overwrite "new_col"
"""
# TODO: Add UnresolvedExpr. Both StarExpr and UnresolvedExpr won't have a defined data_type.
data_type: DataType = field(default_factory=lambda: DataType(object), init=False)
def structurally_equals(self, other: Any) -> bool:
return isinstance(other, StarExpr)
@PublicAPI(stability="beta")
def col(name: str) -> ColumnExpr:
"""
Reference an existing column by name.
This is the primary way to reference columns in expressions.
The returned expression will extract values from the specified
column when evaluated.
Args:
name: The name of the column to reference
Returns:
A ColumnExpr that references the specified column
Example:
>>> from ray.data.expressions import col
>>> # Reference columns in an expression
>>> expr = col("price") * col("quantity")
>>>
>>> # Use with Dataset.with_column()
>>> import ray
>>> ds = ray.data.from_items([{"price": 10, "quantity": 2}])
>>> ds = ds.with_column("total", col("price") * col("quantity"))
"""
return ColumnExpr(name)
@PublicAPI(stability="beta")
def lit(value: Any) -> LiteralExpr:
"""
Create a literal expression from a constant value.
This creates an expression that represents a constant scalar value.
The value will be broadcast to all rows when the expression is evaluated.
Args:
value: The constant value to represent. Can be any Python object
(int, float, str, bool, etc.)
Returns:
A LiteralExpr containing the specified value
Example:
>>> from ray.data.expressions import col, lit
>>> # Create literals of different types
>>> five = lit(5)
>>> pi = lit(3.14159)
>>> name = lit("Alice")
>>> flag = lit(True)
>>>
>>> # Use in expressions
>>> expr = col("age") + lit(1) # Add 1 to age column
>>>
>>> # Use with Dataset.with_column()
>>> import ray
>>> ds = ray.data.from_items([{"age": 25}, {"age": 30}])
>>> ds = ds.with_column("age_plus_one", col("age") + lit(1))
"""
return LiteralExpr(value)
# TODO remove
@DeveloperAPI(stability="alpha")
def star() -> StarExpr:
"""
References all input columns from the input.
This is a special expression used in projections to preserve all
existing columns. It's typically used with operations that want to
add or modify columns while keeping the rest.
Returns:
A StarExpr that represents all input columns.
"""
return StarExpr()
@PublicAPI(stability="alpha")
def download(uri_column_name: str) -> DownloadExpr:
"""
Create a download expression that downloads content from URIs.
This creates an expression that will download bytes from URIs stored in
a specified column. When evaluated, it will fetch the content from each URI
and return the downloaded bytes.
Args:
uri_column_name: The name of the column containing URIs to download from
Returns:
A DownloadExpr that will download content from the specified URI column
Example:
>>> from ray.data.expressions import download
>>> import ray
>>> # Create dataset with URIs
>>> ds = ray.data.from_items([
... {"uri": "s3://bucket/file1.jpg", "id": "1"},
... {"uri": "s3://bucket/file2.jpg", "id": "2"}
... ])
>>> # Add downloaded bytes column
>>> ds_with_bytes = ds.with_column("bytes", download("uri"))
"""
return DownloadExpr(uri_column_name=uri_column_name)
# ──────────────────────────────────────
# Public API for evaluation
# ──────────────────────────────────────
# Note: Implementation details are in _expression_evaluator.py
# Re-export eval_expr for public use
__all__ = [
"Operation",
"Expr",
"ColumnExpr",
"LiteralExpr",
"BinaryExpr",
"UnaryExpr",
"UDFExpr",
"DownloadExpr",
"AliasExpr",
"StarExpr",
"pyarrow_udf",
"udf",
"col",
"lit",
"download",
"star",
"_ListNamespace",
"_StringNamespace",
"_StructNamespace",
"_DatetimeNamespace",
]
def __getattr__(name: str):
"""Lazy import of namespace classes to avoid circular imports."""
if name == "_ListNamespace":
from ray.data.namespace_expressions.list_namespace import _ListNamespace
return _ListNamespace
elif name == "_StringNamespace":
from ray.data.namespace_expressions.string_namespace import _StringNamespace
return _StringNamespace
elif name == "_StructNamespace":
from ray.data.namespace_expressions.struct_namespace import _StructNamespace
return _StructNamespace
elif name == "_DatetimeNamespace":
from ray.data.namespace_expressions.dt_namespace import _DatetimeNamespace
return _DatetimeNamespace
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
StarExpr
|
python
|
sphinx-doc__sphinx
|
sphinx/addnodes.py
|
{
"start": 2972,
"end": 3358
}
|
class ____(nodes.Element, not_smartquotable):
"""Helper base class for injecting a fixed list of classes.
Use as the first base class.
"""
classes: list[str] = []
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self['classes'].extend(self.classes)
# Top-level nodes
#################
|
_desc_classes_injector
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 5456,
"end": 5611
}
|
class ____(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
|
TestCreateValues_2_UCS4
|
python
|
astropy__astropy
|
astropy/extern/configobj/configobj.py
|
{
"start": 11977,
"end": 13202
}
|
class ____(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
|
TemplateInterpolation
|
python
|
google__pytype
|
pytype/abstract/_classes.py
|
{
"start": 41020,
"end": 47466
}
|
class ____(ParameterizedClass, mixin.HasSlots): # pytype: disable=signature-mismatch
"""The class of a heterogeneous tuple.
The formal_type_parameters attribute stores the types of the individual tuple
elements under their indices and the overall element type under "T". So for
Tuple[str, int]
formal_type_parameters is
{0: str, 1: int, T: str or int}.
Note that we can't store the individual types as a mixin.PythonConstant as we
do for Tuple, since we can't evaluate type parameters during initialization.
"""
def __init__(
self,
base_cls,
formal_type_parameters,
ctx: "context.Context",
template=None,
) -> None:
super().__init__(base_cls, formal_type_parameters, ctx, template)
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
self.set_native_slot("__add__", self.add_slot)
if isinstance(
self._formal_type_parameters, abstract_utils.LazyFormalTypeParameters
):
num_parameters = len(self._formal_type_parameters.template)
else:
num_parameters = len(self._formal_type_parameters)
# We subtract one to account for "T".
self.tuple_length = num_parameters - 1
self._instance = None
self._instance_cache = {}
self.slots = () # tuples don't have any writable attributes
def __repr__(self) -> str:
return f"TupleClass({self.formal_type_parameters})"
def compute_mro(self) -> tuple[_base.BaseValue, ...]:
# ParameterizedClass removes the base PyTDClass(tuple) from the mro; add it
# back here so that isinstance(tuple) checks work.
return (self,) + self.base_cls.mro
def get_formal_type_parameters(self) -> dict[Any, _base.BaseValue]:
return {
abstract_utils.full_type_name(
self, abstract_utils.T
): self.formal_type_parameters[abstract_utils.T]
}
def _new_instance(
self,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
),
node: cfg.CFGNode,
args: function.Args | None,
) -> _instances.Tuple:
del args # unused
if self._instance:
return self._instance
key = (container, node)
if key in self._instance_cache:
return self._instance_cache[key]
content = []
for i in range(self.tuple_length):
p = self.formal_type_parameters[i]
if container is abstract_utils.DUMMY_CONTAINER or (
isinstance(container, _instance_base.SimpleValue)
and isinstance(p, _abstract.TypeParameter)
and p.full_name in container.all_template_names
):
content.append(p.instantiate(self.ctx.root_node, container))
else:
content.append(p.instantiate(self.ctx.root_node))
# Note that we intentionally don't set self._instance to the new tuple,
# since the tuple will create and register itself with a fresh TupleClass.
instance = _instances.Tuple(tuple(content), self.ctx)
self._instance_cache[key] = instance
return instance
def instantiate(
self,
node: cfg.CFGNode,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> cfg.Variable:
return self._new_instance(container, node, None).to_variable(node)
def _instantiate_index(self, node: cfg.CFGNode, index: int) -> cfg.Variable:
if self._instance:
return self._instance.pyval[index]
else:
index %= self.tuple_length # fixes negative indices
return self.formal_type_parameters[index].instantiate(node)
def register_instance(self, instance: _instance_base.Instance) -> None:
# A TupleClass can never have more than one registered instance because the
# only direct instances of TupleClass are Tuple objects, which create their
# own class upon instantiation. We store the instance in order to track
# changes in the types of the elements (see TupleTest.testMutableItem).
assert not self._instance
self._instance = instance
def getitem_slot(
self, node: cfg.CFGNode, index_var: cfg.Variable
) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Implementation of tuple.__getitem__."""
try:
index = self.ctx.convert.value_to_constant(
abstract_utils.get_atomic_value(index_var), (int, slice)
)
except abstract_utils.ConversionError:
pass
else:
if isinstance(index, slice):
if self._instance:
slice_content = self._instance.pyval[index]
return node, self.ctx.convert.build_tuple(node, slice_content)
else:
# Constructing the tuple directly is faster than calling call_pytd.
instance = _instance_base.Instance(
self.ctx.convert.tuple_type, self.ctx
)
contained_type = self.ctx.vm.init_class(
node, self.formal_type_parameters[abstract_utils.T]
)
instance.merge_instance_type_parameter(
node, abstract_utils.T, contained_type
)
return node, instance.to_variable(node)
if -self.tuple_length <= index < self.tuple_length:
# Index out of bounds is not a pytype error because of the high
# likelihood of false positives, e.g.,
# tup = []
# idx = 0
# if idx < len(tup):
# tup[idx]
return node, self._instantiate_index(node, index)
return self.call_pytd(
node, "__getitem__", self.instantiate(node), index_var
)
def get_special_attribute(
self, node: cfg.CFGNode, name: str, valself: cfg.Variable | None
) -> cfg.Variable | None:
if (
valself
and not abstract_utils.equivalent_to(valself, self)
and name in self._slots
):
return mixin.HasSlots.get_special_attribute(self, node, name, valself)
return super().get_special_attribute(node, name, valself)
def add_slot(
self, node: cfg.CFGNode, other_var: cfg.Variable
) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Implementation of tuple.__add__."""
try:
other = abstract_utils.get_atomic_value(other_var)
except abstract_utils.ConversionError:
pass
else:
if self._instance and isinstance(other, _abstract.Tuple):
pyval = self._instance.pyval + other.pyval
ret = _instances.Tuple(pyval, self.ctx)
return node, ret.to_variable(node)
return self.call_pytd(node, "__add__", self.instantiate(node), other_var)
|
TupleClass
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 7225,
"end": 7308
}
|
class ____(PydanticTypeError):
msg_template = 'value is not a valid set'
|
SetError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/pyodbc.py
|
{
"start": 20285,
"end": 22358
}
|
class ____(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
https://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
https://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super().pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if (
self._select_lastrowid
and self.dialect.use_scope_identity
and len(self.parameters[0])
):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
rows = self.cursor.fetchall()
except self.dialect.dbapi.Error:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
else:
if not rows:
# async adapter drivers just return None here
self.cursor.nextset()
continue
row = rows[0]
break
self._lastrowid = int(row[0])
self.cursor_fetch_strategy = _cursor._NO_CURSOR_DML
else:
super().post_exec()
|
MSExecutionContext_pyodbc
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/storage.py
|
{
"start": 511,
"end": 5543
}
|
class ____(abc.ABC):
"""
Interface used by ``save_state_dict`` to write to storage.
One StorageWriter instance acts as both the coordinator and the follower
in a distributed checkpoint. As part of initialization, each instance
is told its role.
A subclass should expect the following sequence of calls.
0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id.
1) (all ranks) set_up_storage_writer()
2) (all ranks) prepare_local_plan()
3) (coordinator) prepare_global_plan()
4) (all ranks) write_data()
5) (coordinator) finish()
"""
@abc.abstractmethod
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
"""
Calls to indicates a brand new checkpoint write is going to happen.
A checkpoint_id may be present if users set the checkpoint_id for
this checkpoint write. The meaning of the checkpiont_id is
storage-dependent. It can be a path to a folder/file or a key for
a key-value storage.
Args:
checkpoint_id (Union[str, os.PathLike, None]):
The ID of this checkpoint instance. The meaning of the checkpoint_id
depends on the storage. It can be a path to a folder or to a file.
It can also be a key if the storage is a key-value store.
(Default: ``None``)
"""
...
@abc.abstractmethod
def set_up_storage_writer(
self, is_coordinator: bool, *args: Any, **kwargs: Any
) -> None:
"""
Initialize this instance.
Args:
is_coordinator (bool): Whether this instance is responsible for coordinating
the checkpoint.
"""
@abc.abstractmethod
def prepare_local_plan(self, plan: SavePlan) -> SavePlan:
"""
Perform storage-specific local planning.
While this method can produce a completely different plan, the recommended
way is to store storage specific data in SavePlan::storage_data.
Args:
plan (SavePlan): The local plan from the ``SavePlanner`` in use.
Returns:
A transformed ``SavePlan`` after storage local planning
"""
@abc.abstractmethod
def prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:
"""
Perform centralized planning of storage.
This method is only called on the coordinator instance.
While this method can produce a completely different plan, the preferred
way is to store storage specific data in SavePlan::storage_data.
Args:
plans: A list of ``SavePlan`` instances, one for each rank.
Returns:
A list of transformed ``SavePlan`` after storage global planning
"""
@abc.abstractmethod
def write_data(
self, plan: SavePlan, planner: SavePlanner
) -> Future[list[WriteResult]]:
"""
Write all items from ``plan`` using ``planner`` to resolve the data.
A subclass should call ``SavePlanner::resolve_data`` on each item
from the plan to get access to the underlying object to write.
Subclasses should lazily call `resolve_data` as it can allocate memory.
In case of tensors, make following assumptions:
- They might be on any device, including not matching the one on ``WriteItem::tensor_data``
- They might be views or not contiguous. Only the projection needs to be saved.
Args:
plan (SavePlan): The save plan to execute.
planner (SavePlanner): Planner object to be used to resolve items to data.
Returns:
A future that completes to a list of WriteResult
"""
@abc.abstractmethod
def finish(self, metadata: Metadata, results: list[list[WriteResult]]) -> None:
"""
Write the metadata and marks the current checkpoint as successful.
The actual format/schema used for serializing `metadata` is an
implementation detail. The only requirement is that it's recoverable
in to the same object graph.
Args:
metadata (Metadata): metadata for the new checkpoint
results: A list of WriteResults from all ranks.
Returns:
None
"""
@classmethod
@abc.abstractmethod
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
"""
Check if the given checkpoint_id is supported by the storage. This allow
us to enable automatic storage selection.
"""
...
def storage_meta(self) -> Optional[StorageMeta]:
"""
Return the storage-specific metadata. This is used to store additional information
in a checkpoint that can be useful for providing request-level observability. StorageMeta
is passed to the ``SavePlanner`` during save calls. Returns None by default.
TODO: provide an example
"""
return None
|
StorageWriter
|
python
|
apache__airflow
|
providers/alibaba/tests/unit/alibaba/cloud/hooks/test_oss.py
|
{
"start": 1260,
"end": 7393
}
|
class ____:
def setup_method(self):
with mock.patch(
OSS_STRING.format("OSSHook.__init__"),
new=mock_oss_hook_default_project_id,
):
self.hook = OSSHook(oss_conn_id=MOCK_OSS_CONN_ID)
def test_parse_oss_url(self):
parsed = self.hook.parse_oss_url(f"oss://{MOCK_BUCKET_NAME}/this/is/not/a-real-key.txt")
assert parsed == (MOCK_BUCKET_NAME, "this/is/not/a-real-key.txt"), "Incorrect parsing of the oss url"
def test_parse_oss_object_directory(self):
parsed = self.hook.parse_oss_url(f"oss://{MOCK_BUCKET_NAME}/this/is/not/a-real-oss-directory/")
assert parsed == (
MOCK_BUCKET_NAME,
"this/is/not/a-real-oss-directory/",
), "Incorrect parsing of the oss url"
@mock.patch(OSS_STRING.format("oss2"))
def test_get_credential(self, mock_oss2):
self.hook.get_credential()
mock_oss2.Auth.assert_called_once_with("mock_access_key_id", "mock_access_key_secret")
@mock.patch(OSS_STRING.format("OSSHook.get_credential"))
@mock.patch(OSS_STRING.format("oss2"))
def test_get_bucket(self, mock_oss2, mock_get_credential):
self.hook.get_bucket("mock_bucket_name")
mock_get_credential.assert_called_once_with()
mock_oss2.Bucket.assert_called_once_with(
mock_get_credential.return_value, "https://oss-mock_region.aliyuncs.com", MOCK_BUCKET_NAME
)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_object_exist(self, mock_service):
# Given
mock_bucket = mock_service.return_value
exists_method = mock_bucket.object_exists
exists_method.return_value = True
# When
res = self.hook.object_exists(MOCK_KEY, MOCK_BUCKET_NAME)
# Then
assert res is True
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
exists_method.assert_called_once_with(MOCK_KEY)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_load_string(self, mock_service):
self.hook.load_string(MOCK_KEY, MOCK_CONTENT, MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.put_object.assert_called_once_with(MOCK_KEY, MOCK_CONTENT)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_upload_local_file(self, mock_service):
self.hook.upload_local_file(MOCK_KEY, MOCK_FILE_PATH, MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.put_object_from_file.assert_called_once_with(MOCK_KEY, MOCK_FILE_PATH)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_download_file(self, mock_service):
self.hook.download_file(MOCK_KEY, MOCK_FILE_PATH, MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.get_object_to_file.assert_called_once_with(MOCK_KEY, MOCK_FILE_PATH)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_delete_object(self, mock_service):
self.hook.delete_object(MOCK_KEY, MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.delete_object.assert_called_once_with(MOCK_KEY)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_delete_objects(self, mock_service):
self.hook.delete_objects(MOCK_KEYS, MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.batch_delete_objects.assert_called_once_with(MOCK_KEYS)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_delete_bucket(self, mock_service):
self.hook.delete_bucket(MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.delete_bucket.assert_called_once_with()
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_create_bucket(self, mock_service):
self.hook.create_bucket(MOCK_BUCKET_NAME)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.create_bucket.assert_called_once_with()
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_append_string(self, mock_service):
self.hook.append_string(MOCK_BUCKET_NAME, MOCK_CONTENT, MOCK_KEY, 0)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.append_object.assert_called_once_with(MOCK_KEY, 0, MOCK_CONTENT)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_read_key(self, mock_service):
# Given
mock_service.return_value.get_object.return_value.read.return_value.decode.return_value = MOCK_CONTENT
# When
res = self.hook.read_key(MOCK_BUCKET_NAME, MOCK_KEY)
# Then
assert res == MOCK_CONTENT
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.get_object.assert_called_once_with(MOCK_KEY)
mock_service.return_value.get_object.return_value.read.assert_called_once_with()
mock_service.return_value.get_object.return_value.read.return_value.decode.assert_called_once_with(
"utf-8"
)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_head_key(self, mock_service):
self.hook.head_key(MOCK_BUCKET_NAME, MOCK_KEY)
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.head_object.assert_called_once_with(MOCK_KEY)
@mock.patch(OSS_STRING.format("OSSHook.get_bucket"))
def test_key_exists(self, mock_service):
# When
mock_service.return_value.object_exists.return_value = True
# Given
res = self.hook.key_exist(MOCK_BUCKET_NAME, MOCK_KEY)
# Then
assert res is True
mock_service.assert_called_once_with(MOCK_BUCKET_NAME)
mock_service.return_value.object_exists.assert_called_once_with(MOCK_KEY)
def test_get_default_region(self):
assert self.hook.get_default_region() == "mock_region"
|
TestOSSHook
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_loc.py
|
{
"start": 8917,
"end": 59817
}
|
class ____:
# Tests for loc that do not depend on subclassing Base
def test_loc_npstr(self):
# GH#45580
df = DataFrame(index=date_range("2021", "2022"))
result = df.loc[np.array(["2021/6/1"])[0] :]
expected = df.iloc[151:]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"msg, key",
[
(r"Period\('2019', 'Y-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")),
(r"Period\('2019', 'Y-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")),
(r"Period\('2019', 'Y-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")),
(
r"Period\('2018', 'Y-DEC'\), Period\('2016', 'Y-DEC'\), 'bar'",
(Period(2018), Period(2016), "bar"),
),
(r"Period\('2018', 'Y-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")),
(
r"Period\('2017', 'Y-DEC'\), 'foo', Period\('2015', 'Y-DEC'\)",
(Period(2017), "foo", Period(2015)),
),
(r"Period\('2017', 'Y-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")),
],
)
def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):
# GH#20684
"""
parse_datetime_string_with_reso return parameter if type not matched.
PeriodIndex.get_loc takes returned value from parse_datetime_string_with_reso
as a tuple.
If first argument is Period and a tuple has 3 items,
process go on not raise exception
"""
df = DataFrame(
{
"A": [Period(2019), "x1", "x2"],
"B": [Period(2018), Period(2016), "y1"],
"C": [Period(2017), "z1", Period(2015)],
"V1": [1, 2, 3],
"V2": [10, 20, 30],
}
).set_index(["A", "B", "C"])
with pytest.raises(KeyError, match=msg):
df.loc[key]
def test_loc_getitem_missing_unicode_key(self):
df = DataFrame({"a": [1]})
with pytest.raises(KeyError, match="\u05d0"):
df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
def test_loc_getitem_dups(self):
# GH 5678
# repeated getitems on a dup index returning an ndarray
df = DataFrame(
np.random.default_rng(2).random((20, 5)),
index=["ABCDE"[x % 5] for x in range(20)],
)
expected = df.loc["A", 0]
result = df.loc[:, 0].loc["A"]
tm.assert_series_equal(result, expected)
def test_loc_getitem_dups2(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame(
[[1, 2, "foo", "bar", Timestamp("20130101")]],
columns=["a", "a", "a", "a", "a"],
index=[1],
)
expected = Series(
[1, 2, "foo", "bar", Timestamp("20130101")],
index=["a", "a", "a", "a", "a"],
name=1,
)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{
"me": list("rttti"),
"foo": list("aaade"),
"bar": np.arange(5, dtype="float64") * 1.34 + 2,
"bar2": np.arange(5, dtype="float64") * -0.34 + 2,
}
).set_index("me")
indexer = (
"r",
["bar", "bar2"],
)
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = (
"r",
"bar",
)
df = df_orig.copy()
df.loc[indexer] *= 2.0
assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]
indexer = (
"t",
["bar", "bar2"],
)
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({"a": [0, 1, 1], "b": Series([100, 200, 300], dtype="uint32")})
ix = df1["a"] == 1
newb1 = df1.loc[ix, "b"] + 1
df1.loc[ix, "b"] = newb1
expected = DataFrame(
{"a": [0, 1, 1], "b": Series([100, 201, 301], dtype="uint32")}
)
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
ix = df1["a"] == 1
newb2 = df2.loc[ix, "b"]
with pytest.raises(TypeError, match="Invalid value"):
df1.loc[ix, "b"] = newb2
def test_loc_setitem_dtype(self):
# GH31340
df = DataFrame({"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]})
cols = ["a", "b", "c"]
df.loc[:, cols] = df.loc[:, cols].astype("float32")
# pre-2.0 this setting would swap in new arrays, in 2.0 it is correctly
# in-place, consistent with non-split-path
expected = DataFrame(
{
"id": ["A"],
"a": np.array([1.2], dtype="float64"),
"b": np.array([0.0], dtype="float64"),
"c": np.array([-2.5], dtype="float64"),
}
) # id is inferred as object
tm.assert_frame_equal(df, expected)
def test_getitem_label_list_with_missing(self):
s = Series(range(3), index=["a", "b", "c"])
# consistency
with pytest.raises(KeyError, match="not in index"):
s[["a", "d"]]
s = Series(range(3))
with pytest.raises(KeyError, match="not in index"):
s[[0, 3]]
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_loc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}"
with pytest.raises(IndexError, match=msg):
s.loc[index]
def test_loc_getitem_int_slice(self):
# TODO: test something here?
pass
def test_loc_to_fail(self):
# GH3449
df = DataFrame(
np.random.default_rng(2).random((3, 3)),
index=["a", "b", "c"],
columns=["e", "f", "g"],
)
msg = (
rf"\"None of \[Index\(\[1, 2\], dtype='{np.dtype(int)}'\)\] are "
r"in the \[columns\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[1, 2], [1, 2]]
def test_loc_to_fail2(self):
# GH 7496
# loc should not fallback
s = Series(dtype=object)
s.loc[1] = 1
s.loc["a"] = 2
with pytest.raises(KeyError, match=r"^-1$"):
s.loc[-1]
msg = (
rf"\"None of \[Index\(\[-1, -2\], dtype='{np.dtype(int)}'\)\] are "
r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-1, -2]]
msg = r"\"None of \[Index\(\['4'\], dtype='object'\)\] are in the \[index\]\""
with pytest.raises(KeyError, match=msg):
s.loc[Index(["4"], dtype=object)]
s.loc[-1] = 3
with pytest.raises(KeyError, match="not in index"):
s.loc[[-1, -2]]
s["a"] = 2
msg = (
rf"\"None of \[Index\(\[-2\], dtype='{np.dtype(int)}'\)\] are "
r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-2]]
del s["a"]
with pytest.raises(KeyError, match=msg):
s.loc[[-2]] = 0
def test_loc_to_fail3(self):
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"])
msg = (
rf"\"None of \[Index\(\[3\], dtype='{np.dtype(int)}'\)\] are "
r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[3], :]
with pytest.raises(KeyError, match=msg):
df.loc[[3]]
def test_loc_getitem_list_with_fail(self):
# 15747
# should KeyError if *any* missing labels
s = Series([1, 2, 3])
s.loc[[2]]
msg = "None of [RangeIndex(start=3, stop=4, step=1)] are in the [index]"
with pytest.raises(KeyError, match=re.escape(msg)):
s.loc[[3]]
# a non-match and a match
with pytest.raises(KeyError, match="not in index"):
s.loc[[2, 3]]
def test_loc_index(self):
# gh-17131
# a boolean index should index like a boolean numpy array
df = DataFrame(
np.random.default_rng(2).random(size=(5, 10)),
index=["alpha_0", "alpha_1", "alpha_2", "beta_0", "beta_1"],
)
mask = df.index.map(lambda x: "alpha" in x)
expected = df.loc[np.array(mask)]
result = df.loc[mask]
tm.assert_frame_equal(result, expected)
result = df.loc[mask.values]
tm.assert_frame_equal(result, expected)
result = df.loc[pd.array(mask, dtype="boolean")]
tm.assert_frame_equal(result, expected)
def test_loc_general(self):
df = DataFrame(
np.random.default_rng(2).random((4, 4)),
columns=["A", "B", "C", "D"],
index=["A", "B", "C", "D"],
)
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
assert (result.columns == ["A", "B"]).all()
assert (result.index == ["A", "B"]).all()
# mixed type
result = DataFrame({"a": [Timestamp("20130101")], "b": [1]}).iloc[0]
expected = Series([Timestamp("20130101"), 1], index=["a", "b"], name=0)
tm.assert_series_equal(result, expected)
assert result.dtype == object
@pytest.fixture
def frame_for_consistency(self):
return DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5"),
"val": Series(range(5), dtype=np.int64),
}
)
@pytest.mark.parametrize(
"val",
[0, np.array(0, dtype=np.int64), np.array([0, 0, 0, 0, 0], dtype=np.int64)],
)
def test_loc_setitem_consistency(self, frame_for_consistency, val):
# GH 6149
# coerce similarly for setitem and loc when rows have a null-slice
df = frame_for_consistency.copy()
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "date"] = val
def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):
# GH 6149
# coerce similarly for setitem and loc when rows have a null-slice
df = frame_for_consistency.copy()
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "date"] = "foo"
def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):
# GH 6149
# coerce similarly for setitem and loc when rows have a null-slice
df = frame_for_consistency.copy()
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "date"] = 1.0
def test_loc_setitem_consistency_single_row(self):
# GH 15494
# setting on frame with single row
df = DataFrame({"date": Series([Timestamp("20180101")])})
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "date"] = "string"
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
# before the enforcement of #45333 in 2.0, the loc.setitem here would
# change the dtype of df.x to int64
expected = DataFrame(columns=["x", "y"])
df = DataFrame(columns=["x", "y"])
with tm.assert_produces_warning(None):
df.loc[:, "x"] = 1
tm.assert_frame_equal(df, expected)
# setting with setitem swaps in a new array, so changes the dtype
df = DataFrame(columns=["x", "y"])
df["x"] = 1
expected["x"] = expected["x"].astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self, using_infer_string):
# .loc[:,column] setting with slice == len of the column
# GH10408
levels = [
["Region_1"] * 4,
["Site_1", "Site_1", "Site_2", "Site_2"],
[3987227376, 3980680971, 3977723249, 3977723089],
]
mi = MultiIndex.from_arrays(levels, names=["Region", "Site", "RespondentID"])
clevels = [
["Respondent", "Respondent", "Respondent", "OtherCat", "OtherCat"],
["Something", "StartDate", "EndDate", "Yes/No", "SomethingElse"],
]
cols = MultiIndex.from_arrays(clevels, names=["Level_0", "Level_1"])
values = [
["A", "5/25/2015 10:59", "5/25/2015 11:22", "Yes", np.nan],
["A", "5/21/2015 9:40", "5/21/2015 9:52", "Yes", "Yes"],
["A", "5/20/2015 8:27", "5/20/2015 8:41", "Yes", np.nan],
["A", "5/20/2015 8:33", "5/20/2015 9:09", "Yes", "No"],
]
df = DataFrame(values, index=mi, columns=cols)
ctx = contextlib.nullcontext()
if using_infer_string:
ctx = pytest.raises(TypeError, match="Invalid value")
with ctx:
df.loc[:, ("Respondent", "StartDate")] = to_datetime(
df.loc[:, ("Respondent", "StartDate")]
)
with ctx:
df.loc[:, ("Respondent", "EndDate")] = to_datetime(
df.loc[:, ("Respondent", "EndDate")]
)
if using_infer_string:
# infer-objects won't infer stuff anymore
return
df = df.infer_objects()
# Adding a new key
df.loc[:, ("Respondent", "Duration")] = (
df.loc[:, ("Respondent", "EndDate")]
- df.loc[:, ("Respondent", "StartDate")]
)
# timedelta64[m] -> float, so this cannot be done inplace, so
# no warning
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, ("Respondent", "Duration")] = df.loc[
:, ("Respondent", "Duration")
] / Timedelta(60_000_000_000)
@pytest.mark.parametrize("unit", ["Y", "M", "D", "h", "m", "s", "ms", "us"])
def test_loc_assign_non_ns_datetime(self, unit):
# GH 27395, non-ns dtype assignment via .loc should work
# and return the same result when using simple assignment
df = DataFrame(
{
"timestamp": [
np.datetime64("2017-02-11 12:41:29"),
np.datetime64("1991-11-07 04:22:37"),
]
}
)
df.loc[:, unit] = df.loc[:, "timestamp"].values.astype(f"datetime64[{unit}]")
df["expected"] = df.loc[:, "timestamp"].values.astype(f"datetime64[{unit}]")
expected = Series(df.loc[:, "expected"], name=unit)
tm.assert_series_equal(df.loc[:, unit], expected)
def test_loc_modify_datetime(self):
# see gh-28837
df = DataFrame.from_dict(
{"date": [1485264372711, 1485265925110, 1540215845888, 1540282121025]}
)
df["date_dt"] = to_datetime(df["date"], unit="ms", cache=True).dt.as_unit("us")
df.loc[:, "date_dt_cp"] = df.loc[:, "date_dt"]
df.loc[[2, 3], "date_dt_cp"] = df.loc[[2, 3], "date_dt"]
expected = DataFrame(
[
[1485264372711, "2017-01-24 13:26:12.711", "2017-01-24 13:26:12.711"],
[1485265925110, "2017-01-24 13:52:05.110", "2017-01-24 13:52:05.110"],
[1540215845888, "2018-10-22 13:44:05.888", "2018-10-22 13:44:05.888"],
[1540282121025, "2018-10-23 08:08:41.025", "2018-10-23 08:08:41.025"],
],
columns=["date", "date_dt", "date_dt_cp"],
)
columns = ["date_dt", "date_dt_cp"]
expected[columns] = expected[columns].apply(to_datetime)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_loc_setitem_frame_with_reindex(self, has_ref):
# GH#6254 setting issue
df = DataFrame(index=[3, 5, 4], columns=["A"], dtype=float)
if has_ref:
view = df[:] # noqa: F841
df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64")
# setting integer values into a float dataframe with loc is inplace,
# so we retain float dtype
ser = Series([2, 3, 1], index=[3, 5, 4], dtype=float)
expected = DataFrame({"A": ser})
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_with_reindex_mixed(self):
# GH#40480
df = DataFrame(index=[3, 5, 4], columns=["A", "B"], dtype=float)
df["B"] = "string"
df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64")
ser = Series([2, 3, 1], index=[3, 5, 4], dtype="int64")
# pre-2.0 this setting swapped in a new array, now it is inplace
# consistent with non-split-path
expected = DataFrame({"A": ser.astype(float)})
expected["B"] = "string"
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_with_inverted_slice(self):
# GH#40480
df = DataFrame(index=[1, 2, 3], columns=["A", "B"], dtype=float)
df["B"] = "string"
df.loc[slice(3, 0, -1), "A"] = np.array([1, 2, 3], dtype="int64")
# pre-2.0 this setting swapped in a new array, now it is inplace
# consistent with non-split-path
expected = DataFrame({"A": [3.0, 2.0, 1.0], "B": "string"}, index=[1, 2, 3])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_loc_setitem_empty_frame(self, has_ref):
# GH#6252 setting with an empty frame
keys1 = ["@" + str(i) for i in range(5)]
val1 = np.arange(5, dtype="int64")
keys2 = ["@" + str(i) for i in range(4)]
val2 = np.arange(4, dtype="int64")
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df["A"] = np.nan
if has_ref:
view = df[:] # noqa: F841
df.loc[keys1, "A"] = val1
df["B"] = np.nan
df.loc[keys2, "B"] = val2
# Because df["A"] was initialized as float64, setting values into it
# is inplace, so that dtype is retained
sera = Series(val1, index=keys1, dtype=np.float64)
serb = Series(val2, index=keys2)
expected = DataFrame({"A": sera, "B": serb}, columns=Index(["A", "B"])).reindex(
index=index
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_loc_setitem_frame(self, has_ref):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=list("abcd"),
columns=list("ABCD"),
)
if has_ref:
view = df[:] # noqa: F841
result = df.iloc[0, 0]
df.loc["a", "A"] = 1
result = df.loc["a", "A"]
assert result == 1
result = df.iloc[0, 0]
assert result == 1
df.loc[:, "B":"D"] = 0
expected = df.loc[:, "B":"D"]
result = df.iloc[:, 1:]
tm.assert_frame_equal(result, expected)
def test_loc_setitem_frame_nan_int_coercion_invalid(self):
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({"A": [1, 2, 3], "B": np.nan})
df.loc[df.B > df.A, "B"] = df.A
expected = DataFrame({"A": [1, 2, 3], "B": np.nan})
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_mixed_labels(self):
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], "a": ["a", "b"]})
result = df.loc[0, [1, 2]]
expected = Series(
[1, 3], index=Index([1, 2], dtype=object), dtype="int64", name=0
)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], "a": ["a", "b"]})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame(
{"A": ["foo", "bar", "baz"], "B": Series(range(3), dtype=np.int64)}
)
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame(
{"A": ["bar", "baz", "baz"], "B": Series([1, 2, 2], dtype=np.int64)}
)
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame(
{
"date": date_range("2000-01-01", "2000-01-5", unit="ns"),
"val": Series(range(5), dtype=np.int64),
}
)
expected = DataFrame(
{
"date": [
Timestamp("20000101"),
Timestamp("20000102"),
Timestamp("20000101"),
Timestamp("20000102"),
Timestamp("20000103"),
],
"val": Series([0, 1, 0, 1, 2], dtype=np.int64),
}
)
expected["date"] = expected["date"].astype("M8[ns]")
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"indexer", [["A"], slice(None, "A", None), np.array(["A"])]
)
@pytest.mark.parametrize("value", [["Z"], np.array(["Z"])])
def test_loc_setitem_with_scalar_index(self, indexer, value):
# GH #19474
# assigning like "df.loc[0, ['A']] = ['Z']" should be evaluated
# elementwisely, not using "setter('A', ['Z'])".
# Set object dtype to avoid upcast when setting 'Z'
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]).astype({"A": object})
df.loc[0, indexer] = value
result = df.loc[0, "A"]
assert is_scalar(result) and result == "Z"
@pytest.mark.parametrize(
"index,box,expected",
[
(
([0, 2], ["A", "B", "C", "D"]),
7,
DataFrame(
[[7, 7, 7, 7], [3, 4, np.nan, np.nan], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
(1, ["C", "D"]),
[7, 8],
DataFrame(
[[1, 2, np.nan, np.nan], [3, 4, 7, 8], [5, 6, np.nan, np.nan]],
columns=["A", "B", "C", "D"],
),
),
(
(1, ["A", "B", "C"]),
np.array([7, 8, 9], dtype=np.int64),
DataFrame(
[[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=["A", "B", "C"]
),
),
(
(slice(1, 3, None), ["B", "C", "D"]),
[[7, 8, 9], [10, 11, 12]],
DataFrame(
[[1, 2, np.nan, np.nan], [3, 7, 8, 9], [5, 10, 11, 12]],
columns=["A", "B", "C", "D"],
),
),
(
(slice(1, 3, None), ["C", "A", "D"]),
np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64),
DataFrame(
[[1, 2, np.nan, np.nan], [8, 4, 7, 9], [11, 6, 10, 12]],
columns=["A", "B", "C", "D"],
),
),
(
(slice(None, None, None), ["A", "C"]),
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_loc_setitem_missing_columns(self, index, box, expected):
# GH 29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df.loc[index] = box
tm.assert_frame_equal(df, expected)
def test_loc_coercion(self):
# GH#12411
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
def test_loc_coercion2(self):
# GH#12045
df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
def test_loc_coercion3(self):
# GH#11594
df = DataFrame({"text": ["some words"] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_new_key_tz(self, indexer_sl):
# GH#12862 should not raise on assigning the second value
vals = [
to_datetime(42).tz_localize("UTC"),
to_datetime(666).tz_localize("UTC"),
]
expected = Series(vals, index=Index(["foo", "bar"]))
ser = Series(dtype=object)
indexer_sl(ser)["foo"] = vals[0]
indexer_sl(ser)["bar"] = vals[1]
tm.assert_series_equal(ser, expected)
def test_loc_non_unique(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise because the we are non monotonic
df = DataFrame(
{"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]
)
msg = "'Cannot get left slice bound for non-unique label: 1'"
with pytest.raises(KeyError, match=msg):
df.loc[1:]
msg = "'Cannot get left slice bound for non-unique label: 0'"
with pytest.raises(KeyError, match=msg):
df.loc[0:]
msg = "'Cannot get left slice bound for non-unique label: 1'"
with pytest.raises(KeyError, match=msg):
df.loc[1:2]
# monotonic are ok
df = DataFrame(
{"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]
).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({"A": [2, 4, 5, 6], "B": [4, 6, 7, 8]}, index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({"A": [2, 4, 5], "B": [4, 6, 7]}, index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
@pytest.mark.arm_slow
@pytest.mark.slow
@pytest.mark.parametrize("length, l2", [[900, 100], [900000, 100000]])
def test_loc_non_unique_memory_error(self, length, l2):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list("ABCDEFG")
df = pd.concat(
[
DataFrame(
np.random.default_rng(2).standard_normal((length, len(columns))),
index=np.arange(length),
columns=columns,
),
DataFrame(np.ones((l2, len(columns))), index=[0] * l2, columns=columns),
]
)
assert df.index.is_unique is False
mask = np.arange(l2)
result = df.loc[mask]
expected = pd.concat(
[
df.take([0]),
DataFrame(
np.ones((len(mask), len(columns))),
index=[0] * len(mask),
columns=columns,
),
df.take(mask[1:]),
]
)
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = "index_name"
result = df.iloc[[0, 1]].index.name
assert result == "index_name"
result = df.loc[[0, 1]].index.name
assert result == "index_name"
def test_loc_empty_list_indexer_is_ok(self):
df = DataFrame(
np.ones((5, 2)),
index=Index([f"i-{i}" for i in range(5)], name="a"),
columns=Index([f"i-{i}" for i in range(2)], name="a"),
)
# vertical empty
tm.assert_frame_equal(
df.loc[:, []], df.iloc[:, :0], check_index_type=True, check_column_type=True
)
# horizontal empty
tm.assert_frame_equal(
df.loc[[], :], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
# horizontal empty
tm.assert_frame_equal(
df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.loc[:]
assert sliced_df is not original_df
assert original_df[:] is not original_df
assert original_df.loc[:, :] is not original_df
# should be a shallow copy
assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values)
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
# depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
assert (sliced_df["a"] == [1, 2, 3]).all()
# These should not return copies
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
assert df[0] is not df.loc[:, 0]
# Same tests for Series
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.loc[:]
assert sliced_series is not original_series
assert original_series[:] is not original_series
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [1, 2, 3])
def test_loc_copy_vs_view(self, request):
# GH 15631
x = DataFrame(zip(range(3), range(3)), columns=["a", "b"])
y = x.copy()
q = y.loc[:, "a"]
q += 2
tm.assert_frame_equal(x, y)
z = x.copy()
q = z.loc[x.index, "a"]
q += 2
tm.assert_frame_equal(x, z)
def test_loc_uint64(self):
# GH20722
# Test whether loc accept uint64 max value as index.
umax = np.iinfo("uint64").max
ser = Series([1, 2], index=[umax - 1, umax])
result = ser.loc[umax - 1]
expected = ser.iloc[0]
assert result == expected
result = ser.loc[[umax - 1]]
expected = ser.iloc[[0]]
tm.assert_series_equal(result, expected)
result = ser.loc[[umax - 1, umax]]
tm.assert_series_equal(result, ser)
def test_loc_uint64_disallow_negative(self):
# GH#41775
umax = np.iinfo("uint64").max
ser = Series([1, 2], index=[umax - 1, umax])
with pytest.raises(KeyError, match="-1"):
# don't wrap around
ser.loc[-1]
with pytest.raises(KeyError, match="-1"):
# don't wrap around
ser.loc[[-1]]
def test_loc_setitem_empty_append_expands_rows(self):
# GH6173, various appends to an empty dataframe
data = [1, 2, 3]
expected = DataFrame(
{"x": data, "y": np.array([np.nan] * len(data), dtype=object)}
)
# appends to fit length of data
df = DataFrame(columns=["x", "y"])
df.loc[:, "x"] = data
tm.assert_frame_equal(df, expected)
def test_loc_setitem_empty_append_expands_rows_mixed_dtype(self):
# GH#37932 same as test_loc_setitem_empty_append_expands_rows
# but with mixed dtype so we go through take_split_path
data = [1, 2, 3]
expected = DataFrame(
{"x": data, "y": np.array([np.nan] * len(data), dtype=object)}
)
df = DataFrame(columns=["x", "y"])
df["x"] = df["x"].astype(np.int64)
df.loc[:, "x"] = data
tm.assert_frame_equal(df, expected)
def test_loc_setitem_empty_append_single_value(self):
# only appends one value
expected = DataFrame({"x": [1.0], "y": [np.nan]})
df = DataFrame(columns=["x", "y"], dtype=float)
df.loc[0, "x"] = expected.loc[0, "x"]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
data = [1, 2]
df = DataFrame(columns=["x", "y"])
df.index = df.index.astype(np.int64)
msg = r"None of .*Index.* are in the \[index\]"
with pytest.raises(KeyError, match=msg):
df.loc[[0, 1], "x"] = data
msg = "setting an array element with a sequence."
with pytest.raises(ValueError, match=msg):
df.loc[0:2, "x"] = data
def test_indexing_zerodim_np_array(self):
# GH24924
df = DataFrame([[1, 2], [3, 4]])
result = df.loc[np.array(0)]
s = Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
# GH24924
s = Series([1, 2])
result = s.loc[np.array(0)]
assert result == 1
def test_loc_reverse_assignment(self):
# GH26939
data = [1, 2, 3, 4, 5, 6] + [None] * 4
expected = Series(data, index=range(2010, 2020))
result = Series(index=range(2010, 2020), dtype=np.float64)
result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1]
tm.assert_series_equal(result, expected)
def test_loc_setitem_str_to_small_float_conversion_type(self, using_infer_string):
# GH#20388
col_data = [str(np.random.default_rng(2).random() * 1e-12) for _ in range(5)]
result = DataFrame(col_data, columns=["A"])
expected = DataFrame(col_data, columns=["A"])
tm.assert_frame_equal(result, expected)
# assigning with loc/iloc attempts to set the values inplace, which
# in this case is successful
if using_infer_string:
with pytest.raises(TypeError, match="Invalid value"):
result.loc[result.index, "A"] = [float(x) for x in col_data]
else:
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = DataFrame(col_data, columns=["A"], dtype=float).astype(object)
tm.assert_frame_equal(result, expected)
# assigning the entire column using __setitem__ swaps in the new array
# GH#???
result["A"] = [float(x) for x in col_data]
expected = DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_time_object(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
mask = (rng.hour == 9) & (rng.minute == 30)
obj = DataFrame(
np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng
)
obj = tm.get_obj(obj, frame_or_series)
result = obj.loc[time(9, 30)]
exp = obj.loc[mask]
tm.assert_equal(result, exp)
chunk = obj.loc["1/4/2000":]
result = chunk.loc[time(9, 30)]
expected = result[-1:]
# Without resetting the freqs, these are 5 min and 1440 min, respectively
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
@pytest.mark.parametrize("dtype", [np.complex128, np.float64, np.int64, bool])
def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
sp_sparse = pytest.importorskip("scipy.sparse")
spmatrix_t = getattr(sp_sparse, spmatrix_t)
# The bug is triggered by a sparse matrix with purely sparse columns. So the
# recipe below generates a rectangular matrix of dimension (5, 7) where all the
# diagonal cells are ones, meaning the last two columns are purely sparse.
rows, cols = 5, 7
spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)
df = DataFrame.sparse.from_spmatrix(spmatrix)
# regression test for GH#34526
itr_idx = range(2, rows)
result = np.nan_to_num(df.loc[itr_idx].values)
expected = spmatrix.toarray()[itr_idx]
tm.assert_numpy_array_equal(result, expected)
# regression test for GH#34540
result = df.loc[itr_idx].dtypes.values
expected = np.full(cols, SparseDtype(dtype))
tm.assert_numpy_array_equal(result, expected)
def test_loc_getitem_listlike_all_retains_sparse(self):
df = DataFrame({"A": pd.array([0, 0], dtype=SparseDtype("int64"))})
result = df.loc[[0, 1]]
tm.assert_frame_equal(result, df)
def test_loc_getitem_sparse_frame(self):
# GH34687
sp_sparse = pytest.importorskip("scipy.sparse")
df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5, dtype=np.int64))
result = df.loc[range(2)]
expected = DataFrame(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]],
dtype=SparseDtype(np.int64),
)
tm.assert_frame_equal(result, expected)
result = df.loc[range(2)].loc[range(1)]
expected = DataFrame([[1, 0, 0, 0, 0]], dtype=SparseDtype(np.int64))
tm.assert_frame_equal(result, expected)
def test_loc_getitem_sparse_series(self):
# GH34687
s = Series([1.0, 0.0, 0.0, 0.0, 0.0], dtype=SparseDtype("float64", 0.0))
result = s.loc[range(2)]
expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0))
tm.assert_series_equal(result, expected)
result = s.loc[range(3)].loc[range(2)]
expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("indexer", ["loc", "iloc"])
def test_getitem_single_row_sparse_df(self, indexer):
# GH#46406
df = DataFrame([[1.0, 0.0, 1.5], [0.0, 2.0, 0.0]], dtype=SparseDtype(float))
result = getattr(df, indexer)[0]
expected = Series([1.0, 0.0, 1.5], dtype=SparseDtype(float), name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("key_type", [iter, np.array, Series, Index])
def test_loc_getitem_iterable(self, float_frame, key_type):
idx = key_type(["A", "B", "C"])
result = float_frame.loc[:, idx]
expected = float_frame.loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_loc_getitem_timedelta_0seconds(self):
# GH#10583
df = DataFrame(np.random.default_rng(2).normal(size=(10, 4)))
df.index = timedelta_range(start="0s", periods=10, freq="s")
expected = df.loc[Timedelta("0s") :, :]
result = df.loc["0s":, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("val,expected", [(2**63 - 1, 1), (2**63, 2)])
def test_loc_getitem_uint64_scalar(self, val, expected):
# see GH#19399
df = DataFrame([1, 2], index=[2**63 - 1, 2**63])
result = df.loc[val]
expected = Series([expected])
expected.name = val
tm.assert_series_equal(result, expected)
def test_loc_setitem_int_label_with_float_index(self, float_numpy_dtype):
# note labels are floats
dtype = float_numpy_dtype
ser = Series(["a", "b", "c"], index=Index([0, 0.5, 1], dtype=dtype))
expected = ser.copy()
ser.loc[1] = "zoo"
expected.iloc[2] = "zoo"
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"indexer, expected",
[
# The test name is a misnomer in the 0 case as df.index[indexer]
# is a scalar.
(0, [20, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
(slice(4, 8), [0, 1, 2, 3, 20, 20, 20, 20, 8, 9]),
([3, 5], [0, 1, 2, 20, 4, 20, 6, 7, 8, 9]),
],
)
def test_loc_setitem_listlike_with_timedelta64index(self, indexer, expected):
# GH#16637
tdi = to_timedelta(range(10), unit="s")
df = DataFrame({"x": range(10)}, dtype="int64", index=tdi)
df.loc[df.index[indexer], "x"] = 20
expected = DataFrame(
expected,
index=tdi,
columns=["x"],
dtype="int64",
)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_categorical_values_partial_column_slice(self):
# Assigning a Category to parts of an int/... column uses the values of
# the Categorical
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
with pytest.raises(TypeError, match="Invalid value"):
df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
def test_loc_setitem_single_row_categorical(self, using_infer_string):
# GH#25495
df = DataFrame({"Alpha": ["a"], "Numeric": [0]})
categories = Categorical(df["Alpha"], categories=["a", "b", "c"])
# pre-2.0 this swapped in a new array, in 2.0 it operates inplace,
# consistent with non-split-path
df.loc[:, "Alpha"] = categories
result = df["Alpha"]
expected = Series(categories, index=df.index, name="Alpha").astype(
object if not using_infer_string else "str"
)
tm.assert_series_equal(result, expected)
# double-check that the non-loc setting retains categoricalness
df["Alpha"] = categories
tm.assert_series_equal(df["Alpha"], Series(categories, name="Alpha"))
def test_loc_setitem_datetime_coercion(self):
# GH#1048
df = DataFrame({"c": [Timestamp("2010-10-01")] * 3})
df.loc[0:1, "c"] = np.datetime64("2008-08-08")
assert Timestamp("2008-08-08") == df.loc[0, "c"]
assert Timestamp("2008-08-08") == df.loc[1, "c"]
with pytest.raises(TypeError, match="Invalid value"):
df.loc[2, "c"] = date(2005, 5, 5)
@pytest.mark.parametrize("idxer", ["var", ["var"]])
def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
# GH#11365
tz = tz_naive_fixture
idx = date_range(start="2015-07-12", periods=3, freq="h", tz=tz)
expected = DataFrame(1.2, index=idx, columns=["var"])
# if result started off with object dtype, then the .loc.__setitem__
# below would retain object dtype
result = DataFrame(index=idx, columns=["var"], dtype=np.float64)
if idxer == "var":
with pytest.raises(TypeError, match="Invalid value"):
result.loc[:, idxer] = expected
else:
# See https://github.com/pandas-dev/pandas/issues/56223
result.loc[:, idxer] = expected
tm.assert_frame_equal(result, expected)
def test_loc_setitem_time_key(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(
np.random.default_rng(2).standard_normal((len(index), 5)), index=index
)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
tm.assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
tm.assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("key", ["A", ["A"], ("A", slice(None))])
def test_loc_setitem_unsorted_multiindex_columns(self, key):
# GH#38601
mi = MultiIndex.from_tuples([("A", 4), ("B", "3"), ("A", "2")])
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi)
obj = df.copy()
obj.loc[:, key] = np.zeros((2, 2), dtype="int64")
expected = DataFrame([[0, 2, 0], [0, 5, 0]], columns=mi)
tm.assert_frame_equal(obj, expected)
df = df.sort_index(axis=1)
df.loc[:, key] = np.zeros((2, 2), dtype="int64")
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_uint_drop(self, any_int_numpy_dtype):
# see GH#18311
# assigning series.loc[0] = 4 changed series.dtype to int
series = Series([1, 2, 3], dtype=any_int_numpy_dtype)
series.loc[0] = 4
expected = Series([4, 2, 3], dtype=any_int_numpy_dtype)
tm.assert_series_equal(series, expected)
def test_loc_setitem_td64_non_nano(self):
# GH#14155
ser = Series(10 * [np.timedelta64(10, "m")])
ser.loc[[1, 2, 3]] = np.timedelta64(20, "m")
expected = Series(10 * [np.timedelta64(10, "m")])
expected.loc[[1, 2, 3]] = Timedelta(np.timedelta64(20, "m"))
tm.assert_series_equal(ser, expected)
def test_loc_setitem_2d_to_1d_raises(self):
data = np.random.default_rng(2).standard_normal((2, 2))
# float64 dtype to avoid upcast when trying to set float data
ser = Series(range(2), dtype="float64")
msg = "setting an array element with a sequence."
with pytest.raises(ValueError, match=msg):
ser.loc[range(2)] = data
with pytest.raises(ValueError, match=msg):
ser.loc[:] = data
def test_loc_getitem_interval_index(self):
# GH#19977
index = pd.interval_range(start=0, periods=3)
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"]
)
expected = 1
result = df.loc[0.5, "A"]
tm.assert_almost_equal(result, expected)
def test_loc_getitem_interval_index2(self):
# GH#19977
index = pd.interval_range(start=0, periods=3, closed="both")
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"]
)
index_exp = pd.interval_range(start=0, periods=2, freq=1, closed="both")
expected = Series([1, 4], index=index_exp, name="A")
result = df.loc[1, "A"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tpl", [(1,), (1, 2)])
def test_loc_getitem_index_single_double_tuples(self, tpl):
# GH#20991
idx = Index(
[(1,), (1, 2)],
name="A",
tupleize_cols=False,
)
df = DataFrame(index=idx)
result = df.loc[[tpl]]
idx = Index([tpl], name="A", tupleize_cols=False)
expected = DataFrame(index=idx)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_index_namedtuple(self):
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2], name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_loc_setitem_single_column_mixed(self, using_infer_string):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["str"] = "qux"
df.loc[df.index[::2], "str"] = np.nan
expected = Series(
[np.nan, "qux", np.nan, "qux", np.nan],
dtype=object if not using_infer_string else "str",
).values
tm.assert_almost_equal(df["str"].values, expected)
def test_loc_setitem_cast2(self):
# GH#7704
# dtype conversion on setting
df = DataFrame(np.random.default_rng(2).random((30, 3)), columns=tuple("ABC"))
df["event"] = np.nan
with pytest.raises(TypeError, match="Invalid value"):
df.loc[10, "event"] = "foo"
def test_loc_setitem_cast3(self):
# Test that data type is preserved . GH#5782
df = DataFrame({"one": np.arange(6, dtype=np.int8)})
df.loc[1, "one"] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_loc_setitem_range_key(self, frame_or_series):
# GH#45479 don't treat range key as positional
obj = frame_or_series(range(5), index=[3, 4, 1, 0, 2])
values = [9, 10, 11]
if obj.ndim == 2:
values = [[9], [10], [11]]
obj.loc[range(3)] = values
expected = frame_or_series([0, 1, 10, 9, 11], index=obj.index)
tm.assert_equal(obj, expected)
def test_loc_setitem_numpy_frame_categorical_value(self):
# GH#52927
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": ["a", "a", "a", "a", "a"]})
df.loc[1:2, "a"] = Categorical([2, 2], categories=[1, 2])
expected = DataFrame({"a": [1, 2, 2, 1, 1], "b": ["a", "a", "a", "a", "a"]})
tm.assert_frame_equal(df, expected)
def test_loc_with_nat_in_tzaware_index(self):
# GH#54409
timestamp = to_datetime("2023-01-01", utc=True)
df = DataFrame(
{
"index": Series([pd.NaT, timestamp]),
"value": Series([0, 1]),
}
).set_index("index")
# Works fine when mixing NaT and valid values
result = df.loc[
Series([pd.NaT, timestamp, timestamp], dtype=df.index.dtype),
"value",
]
expected = [0, 1, 1]
assert result.tolist() == expected
# Regression check: all-NaT lookup should return [0], not raise
result = df.loc[
Series([pd.NaT], dtype=df.index.dtype),
"value",
]
assert result.tolist() == [0]
|
TestLocBaseIndependent
|
python
|
TheAlgorithms__Python
|
machine_learning/polynomial_regression.py
|
{
"start": 1346,
"end": 7650
}
|
class ____:
__slots__ = "degree", "params"
def __init__(self, degree: int) -> None:
"""
@raises ValueError: if the polynomial degree is negative
"""
if degree < 0:
raise ValueError("Polynomial degree must be non-negative")
self.degree = degree
self.params = None
@staticmethod
def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray:
"""
Constructs a polynomial regression design matrix for the given input data. For
input data x = (x₁, x₂, ..., xₙ) and polynomial degree m, the design matrix is
the Vandermonde matrix
|1 x₁ x₁² ⋯ x₁ᵐ|
X = |1 x₂ x₂² ⋯ x₂ᵐ|
|⋮ ⋮ ⋮ ⋱ ⋮ |
|1 xₙ xₙ² ⋯ xₙᵐ|
Reference: https://en.wikipedia.org/wiki/Vandermonde_matrix
@param data: the input predictor values x, either for model fitting or for
prediction
@param degree: the polynomial degree m
@returns: the Vandermonde matrix X (see above)
@raises ValueError: if input data is not N x 1
>>> x = np.array([0, 1, 2])
>>> PolynomialRegression._design_matrix(x, degree=0)
array([[1],
[1],
[1]])
>>> PolynomialRegression._design_matrix(x, degree=1)
array([[1, 0],
[1, 1],
[1, 2]])
>>> PolynomialRegression._design_matrix(x, degree=2)
array([[1, 0, 0],
[1, 1, 1],
[1, 2, 4]])
>>> PolynomialRegression._design_matrix(x, degree=3)
array([[1, 0, 0, 0],
[1, 1, 1, 1],
[1, 2, 4, 8]])
>>> PolynomialRegression._design_matrix(np.array([[0, 0], [0 , 0]]), degree=3)
Traceback (most recent call last):
...
ValueError: Data must have dimensions N x 1
"""
_rows, *remaining = data.shape
if remaining:
raise ValueError("Data must have dimensions N x 1")
return np.vander(data, N=degree + 1, increasing=True)
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
"""
Computes the polynomial regression model parameters using ordinary least squares
(OLS) estimation:
β = (XᵀX)⁻¹Xᵀy = X⁺y
where X⁺ denotes the Moore-Penrose pseudoinverse of the design matrix X. This
function computes X⁺ using singular value decomposition (SVD).
References:
- https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
- https://en.wikipedia.org/wiki/Singular_value_decomposition
- https://en.wikipedia.org/wiki/Multicollinearity
@param x_train: the predictor values x for model fitting
@param y_train: the response values y for model fitting
@raises ArithmeticError: if X isn't full rank, then XᵀX is singular and β
doesn't exist
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
>>> y = x**3 - 2 * x**2 + 3 * x - 5
>>> poly_reg = PolynomialRegression(degree=3)
>>> poly_reg.fit(x, y)
>>> poly_reg.params
array([-5., 3., -2., 1.])
>>> poly_reg = PolynomialRegression(degree=20)
>>> poly_reg.fit(x, y)
Traceback (most recent call last):
...
ArithmeticError: Design matrix is not full rank, can't compute coefficients
Make sure errors don't grow too large:
>>> coefs = np.array([-250, 50, -2, 36, 20, -12, 10, 2, -1, -15, 1])
>>> y = PolynomialRegression._design_matrix(x, len(coefs) - 1) @ coefs
>>> poly_reg = PolynomialRegression(degree=len(coefs) - 1)
>>> poly_reg.fit(x, y)
>>> np.allclose(poly_reg.params, coefs, atol=10e-3)
True
"""
X = PolynomialRegression._design_matrix(x_train, self.degree) # noqa: N806
_, cols = X.shape
if np.linalg.matrix_rank(X) < cols:
raise ArithmeticError(
"Design matrix is not full rank, can't compute coefficients"
)
# np.linalg.pinv() computes the Moore-Penrose pseudoinverse using SVD
self.params = np.linalg.pinv(X) @ y_train
def predict(self, data: np.ndarray) -> np.ndarray:
"""
Computes the predicted response values y for the given input data by
constructing the design matrix X and evaluating y = Xβ.
@param data: the predictor values x for prediction
@returns: the predicted response values y = Xβ
@raises ArithmeticError: if this function is called before the model
parameters are fit
>>> x = np.array([0, 1, 2, 3, 4])
>>> y = x**3 - 2 * x**2 + 3 * x - 5
>>> poly_reg = PolynomialRegression(degree=3)
>>> poly_reg.fit(x, y)
>>> poly_reg.predict(np.array([-1]))
array([-11.])
>>> poly_reg.predict(np.array([-2]))
array([-27.])
>>> poly_reg.predict(np.array([6]))
array([157.])
>>> PolynomialRegression(degree=3).predict(x)
Traceback (most recent call last):
...
ArithmeticError: Predictor hasn't been fit yet
"""
if self.params is None:
raise ArithmeticError("Predictor hasn't been fit yet")
return PolynomialRegression._design_matrix(data, self.degree) @ self.params
def main() -> None:
"""
Fit a polynomial regression model to predict fuel efficiency using seaborn's mpg
dataset
>>> pass # Placeholder, function is only for demo purposes
"""
import seaborn as sns
mpg_data = sns.load_dataset("mpg")
poly_reg = PolynomialRegression(degree=2)
poly_reg.fit(mpg_data.weight, mpg_data.mpg)
weight_sorted = np.sort(mpg_data.weight)
predictions = poly_reg.predict(weight_sorted)
plt.scatter(mpg_data.weight, mpg_data.mpg, color="gray", alpha=0.5)
plt.plot(weight_sorted, predictions, color="red", linewidth=3)
plt.title("Predicting Fuel Efficiency Using Polynomial Regression")
plt.xlabel("Weight (lbs)")
plt.ylabel("Fuel Efficiency (mpg)")
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
PolynomialRegression
|
python
|
kamyu104__LeetCode-Solutions
|
Python/power-of-heroes.py
|
{
"start": 59,
"end": 381
}
|
class ____(object):
def sumOfPower(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
nums.sort()
result = dp = 0
for x in nums:
result = (result+(x**2)*(dp+x))%MOD
dp = (dp+(dp+x))%MOD
return result
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/db/models/fields/bounded.py
|
{
"start": 1011,
"end": 1764
}
|
class ____(models.IntegerField):
"""
This type allows storing a full unsigned `u32` value by manually wrapping it around,
so it is stored as a signed `i32` value in the database.
"""
MIN_VALUE = 0
MAX_VALUE = U32_MAX
def get_prep_value(self, value: int) -> int:
if value:
value = int(value)
assert self.MIN_VALUE <= value <= self.MAX_VALUE
if value > I32_MAX:
value = value - 2**32
return super().get_prep_value(value)
def from_db_value(self, value: int | None, expression, connection) -> int | None:
if value is None:
return None
if value < 0:
return value + 2**32
return value
|
WrappingU32IntegerField
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/marketing_platform/operators/search_ads.py
|
{
"start": 6257,
"end": 8246
}
|
class ____(_GoogleSearchAdsBaseOperator):
"""
Retrieve metadata for resource(s) or field(s) by the query syntax.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/reporting/api/reference/rest/v0/searchAds360Fields/search
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsSearchFieldsOperator`
:param query: The query string to execute.
:param page_token: Token of the page to retrieve. If not specified, the first page of results will be
returned. Use the value obtained from `next_page_token` in the previous response
in order to request the next page of results.
:param page_size: Number of elements to retrieve in a single page. When too large a page is requested,
the server may decide to further limit the number of returned resources.
Default 10000.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: The version of the API that will be requested for example 'v0'.
"""
template_fields: Sequence[str] = (
*_GoogleSearchAdsBaseOperator.template_fields,
"page_token",
"page_size",
)
def __init__(
self,
*,
query: str,
page_token: str | None = None,
page_size: int = 10000,
**kwargs,
):
super().__init__(**kwargs)
self.query = query
self.page_token = page_token
self.page_size = page_size
def execute(self, context: Context) -> Any:
self.log.info("Retrieving the metadata for %s", self.query)
response = self.hook.search_fields(
query=self.query,
page_token=self.page_token,
page_size=self.page_size,
)
self.log.info("Num of fields retrieved, #%d", len(response["results"]))
return response
|
GoogleSearchAdsSearchFieldsOperator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/image_ops/decode_raw_op_test.py
|
{
"start": 984,
"end": 4869
}
|
class ____(test.TestCase):
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
for dtype in [dtypes.bool, dtypes.int8, dtypes.uint8, dtypes.int16,
dtypes.uint16, dtypes.int32, dtypes.int64, dtypes.float16,
dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]:
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, dtype)
self.assertEqual([None, None], decode.get_shape().as_list())
def testToUint8(self):
self.assertAllEqual(
[[ord("A")], [ord("a")]],
parsing_ops.decode_raw(["A", "a"], dtypes.uint8))
self.assertAllEqual(
[[ord("w"), ord("e"), ord("r")], [ord("X"), ord("Y"), ord("Z")]],
parsing_ops.decode_raw(["wer", "XYZ"], dtypes.uint8))
with self.assertRaisesOpError(
"DecodeRaw requires input strings to all be the same size, but "
"element 1 has size 5 != 6"):
self.evaluate(parsing_ops.decode_raw(["short", "longer"], dtypes.uint8))
def testToInt16(self):
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]],
parsing_ops.decode_raw(["AaBC"], dtypes.uint16))
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of int16"):
self.evaluate(parsing_ops.decode_raw(["123", "456"], dtypes.int16))
def testEndianness(self):
self.assertAllEqual(
[[0x04030201]],
parsing_ops.decode_raw(
["\x01\x02\x03\x04"], dtypes.int32, little_endian=True))
self.assertAllEqual(
[[0x01020304]],
parsing_ops.decode_raw(
["\x01\x02\x03\x04"], dtypes.int32, little_endian=False))
self.assertAllEqual([[1 + 2j]],
parsing_ops.decode_raw([b"\x00\x00\x80?\x00\x00\x00@"],
dtypes.complex64,
little_endian=True))
self.assertAllEqual([[1 + 2j]],
parsing_ops.decode_raw([b"?\x80\x00\x00@\x00\x00\x00"],
dtypes.complex64,
little_endian=False))
def testToFloat16(self):
result = np.matrix([[1, -2, -3, 4]], dtype="<f2")
self.assertAllEqual(
result, parsing_ops.decode_raw([result.tobytes()], dtypes.float16))
def testToBool(self):
result = np.matrix([[True, False, False, True]], dtype="<b1")
self.assertAllEqual(result,
parsing_ops.decode_raw([result.tobytes()], dtypes.bool))
def testToComplex64(self):
result = np.matrix([[1 + 1j, 2 - 2j, -3 + 3j, -4 - 4j]], dtype="<c8")
self.assertAllEqual(
result, parsing_ops.decode_raw([result.tobytes()], dtypes.complex64))
def testToComplex128(self):
result = np.matrix([[1 + 1j, 2 - 2j, -3 + 3j, -4 - 4j]], dtype="<c16")
self.assertAllEqual(
result, parsing_ops.decode_raw([result.tobytes()], dtypes.complex128))
def testEmptyStringInput(self):
for num_inputs in range(3):
result = parsing_ops.decode_raw([""] * num_inputs, dtypes.float16)
self.assertEqual((num_inputs, 0), self.evaluate(result).shape)
def testToUInt16(self):
# Use FF/EE/DD/CC so that decoded value is higher than 32768 for uint16
self.assertAllEqual(
[[0xFF + 0xEE * 256, 0xDD + 0xCC * 256]],
parsing_ops.decode_raw([b"\xFF\xEE\xDD\xCC"], dtypes.uint16))
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of uint16"):
self.evaluate(parsing_ops.decode_raw(["123", "456"], dtypes.uint16))
if __name__ == "__main__":
test.main()
|
DecodeRawOpTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDict9.py
|
{
"start": 531,
"end": 597
}
|
class ____(TypedDict):
y: str
z: Literal[""] | Inner3
|
Outer2
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_utils.py
|
{
"start": 18575,
"end": 22402
}
|
class ____(TestCase):
def test_compute_global_tensor_info_unsupported_placement(self):
class MockDeviceMesh:
def size(self, x):
return x
class FakePlacement(Placement):
pass
device_mesh: Any = MockDeviceMesh()
local_tensor = torch.tensor([1])
with self.assertRaises(RuntimeError):
compute_global_tensor_info(local_tensor, device_mesh, [FakePlacement()])
def test_compute_global_tensor_info_non_shard_placements(self):
class MockDeviceMesh:
def size(self, x):
return x
device_mesh: Any = MockDeviceMesh()
local_tensor = torch.tensor([[1], [2]])
global_size, global_stride = compute_global_tensor_info(
local_tensor, device_mesh, [Replicate(), Partial()]
)
self.assertEqual(global_size, local_tensor.size())
self.assertEqual(global_stride, local_tensor.stride())
def test_compute_global_tensor_info_shard_placement(self):
class MockDeviceMesh:
def size(self, dim):
return dim + 2
device_mesh: Any = MockDeviceMesh()
local_tensor = torch.tensor([[[1], [2], [3]], [[4], [5], [6]]])
global_size, global_stride = compute_global_tensor_info(
local_tensor, device_mesh, [Shard(0), Shard(1), Shard(2)]
)
self.assertEqual(
global_size, [(i + 2) * x for (i, x) in enumerate(local_tensor.size())]
)
self.assertEqual(global_stride[0], local_tensor.stride()[0] * 3 * 4)
self.assertEqual(global_stride[1], local_tensor.stride()[1])
self.assertEqual(global_stride[2], local_tensor.stride()[2] * 3)
def test_compute_tensor_info(self):
from torch.testing._internal.distributed.fake_pg import FakeStore
world_size = 256
fake_store = FakeStore()
torch.distributed.init_process_group(
"fake", store=fake_store, rank=0, world_size=world_size
)
mesh = torch.distributed.device_mesh.init_device_mesh(
"cpu",
(8, 8, 4),
mesh_dim_names=(
"dp",
"tp",
"cp",
),
)
assert world_size == mesh.shape[0] * mesh.shape[1] * mesh.shape[2]
# Add Partial() when we are allowed to redistribute to it
options = [Shard(0), Shard(1), Shard(2), Replicate()]
all_placements = [tuple(p) for p in itertools.product(options, repeat=3)]
for placements in all_placements:
local_tensor = torch.empty_strided(
(4, 4, 4),
(16, 4, 1),
)
local_dt = DTensor.from_local(local_tensor, mesh, placements)
global_shape, global_stride = compute_global_tensor_info(
local_tensor, mesh, placements
)
global_dt = local_dt.redistribute(mesh, [Replicate()] * mesh.ndim)
self.assertEqual(global_shape, global_dt.size())
self.assertEqual(global_stride, global_dt.stride())
global_tensor = torch.empty_strided(
global_shape,
global_stride,
)
new_local_shape, new_local_stride = compute_local_tensor_info(
global_tensor,
mesh,
placements,
)
self.assertEqual(new_local_shape, local_tensor.size())
self.assertEqual(new_local_stride, local_tensor.stride())
new_local_dt = global_dt.redistribute(mesh, placements)
self.assertEqual(new_local_shape, new_local_dt.to_local().size())
self.assertEqual(new_local_stride, new_local_dt.to_local().stride())
torch.distributed.destroy_process_group()
|
UtilSingleDeviceTest
|
python
|
google__jax
|
jax/_src/core.py
|
{
"start": 113570,
"end": 113957
}
|
class ____:
def __init__(self, obj):
self.id = id(obj)
def __repr__(self):
return f'<axis {hex(self.id)}>'
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return type(other) is _TempAxisName and self.id == other.id
def __lt__(self, other):
return type(other) is _TempAxisName and self.id < other.id
@dataclass(frozen=True)
|
_TempAxisName
|
python
|
kamyu104__LeetCode-Solutions
|
Python/product-of-two-run-length-encoded-arrays.py
|
{
"start": 33,
"end": 885
}
|
class ____(object):
def findRLEArray(self, encoded1, encoded2):
"""
:type encoded1: List[List[int]]
:type encoded2: List[List[int]]
:rtype: List[List[int]]
"""
result = []
i = j = remain1 = remain2 = 0
while (remain1 or i < len(encoded1)) and (remain2 or j < len(encoded2)):
if not remain1:
remain1 = encoded1[i][1]
i += 1
if not remain2:
remain2 = encoded2[j][1]
j += 1
cnt = min(remain1, remain2)
remain1 -= cnt
remain2 -= cnt
if result and result[-1][0] == encoded1[i-1][0]*encoded2[j-1][0]:
result[-1][1] += cnt
else:
result.append([encoded1[i-1][0]*encoded2[j-1][0], cnt])
return result
|
Solution
|
python
|
pytorch__pytorch
|
torch/utils/_contextlib.py
|
{
"start": 4819,
"end": 5915
}
|
class ____:
"""Allow a context manager to be used as a decorator."""
def __call__(self, orig_func: F) -> F:
if inspect.isclass(orig_func):
warnings.warn(
"Decorating classes is deprecated and will be disabled in "
"future versions. You should only decorate functions or methods. "
"To preserve the current behavior of class decoration, you can "
"directly decorate the `__init__` method and nothing else.",
FutureWarning,
stacklevel=2,
)
func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs))
else:
func = orig_func
return cast(F, context_decorator(self.clone, func))
def __enter__(self) -> None:
raise NotImplementedError
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
raise NotImplementedError
def clone(self):
# override this method if your children class takes __init__ parameters
return self.__class__()
|
_DecoratorContextManager
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI052.py
|
{
"start": 3708,
"end": 3750
}
|
class ____(Enum):
FOO = 0
BAR = 1
|
Foo
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/discogs/provider.py
|
{
"start": 433,
"end": 696
}
|
class ____(OAuthProvider):
id = "discogs"
name = "discogs"
account_class = DiscogsAccount
oauth_adapter_class = DiscogsOAuthAdapter
def extract_uid(self, data):
return str(data["id"])
provider_classes = [DiscogsProvider]
|
DiscogsProvider
|
python
|
getsentry__sentry
|
src/sentry/integrations/bitbucket/webhook.py
|
{
"start": 2220,
"end": 2431
}
|
class ____(SentryAPIException):
status_code = 400
code = f"{PROVIDER_NAME}.webhook.unsupported-signature-method"
message = "Signature method is not supported"
|
WebhookUnsupportedSignatureMethodException
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/losses.py
|
{
"start": 31273,
"end": 33323
}
|
class ____(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.SquaredHinge()
>>> h(y_true, y_pred).numpy()
1.86
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.73
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
3.72
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.46, 2.26], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
"""Initializes `SquaredHinge` instance.
Args:
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance. Defaults to 'squared_hinge'.
"""
super().__init__(squared_hinge, name=name, reduction=reduction)
|
SquaredHinge
|
python
|
huggingface__transformers
|
src/transformers/models/modernbert/modeling_modernbert.py
|
{
"start": 60067,
"end": 64414
}
|
class ____(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
sliding_window_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
indices: Optional[torch.Tensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
batch_size: Optional[int] = None,
seq_len: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
outputs = self.model(
input_ids,
attention_mask=attention_mask,
sliding_window_mask=sliding_window_mask,
position_ids=position_ids,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
batch_size=batch_size,
seq_len=seq_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0]
last_hidden_state = self.head(last_hidden_state)
last_hidden_state = self.drop(last_hidden_state)
logits = self.classifier(last_hidden_state)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
loss = None
if start_positions is not None and end_positions is not None:
loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs)
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return QuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The ModernBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.
"""
)
|
ModernBertForQuestionAnswering
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/execution_api/versions/v2025_08_10.py
|
{
"start": 1128,
"end": 1912
}
|
class ____(VersionChange):
"""Add the `state` field to DagRun model and `/dag-runs/{dag_id}/previous` endpoint."""
description = __doc__
instructions_to_migrate_to_previous_version = (
schema(DagRun).field("state").didnt_exist,
endpoint("/dag-runs/{dag_id}/previous", ["GET"]).didnt_exist,
)
@convert_response_to_previous_version_for(TIRunContext) # type: ignore[arg-type]
def remove_state_from_dag_run(response: ResponseInfo) -> None: # type: ignore[misc]
"""Remove the `state` field from the dag_run object when converting to the previous version."""
if "dag_run" in response.body and isinstance(response.body["dag_run"], dict):
response.body["dag_run"].pop("state", None)
|
AddDagRunStateFieldAndPreviousEndpoint
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/v1/input_lib.py
|
{
"start": 13079,
"end": 14666
}
|
class ____(input_lib._SingleWorkerDatasetIteratorBase): # pylint: disable=protected-access
"""Iterator for a single DistributedDatasetV1 instance."""
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
max_buffer_size=self._options.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
)
def initialize(self):
"""Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
|
_SingleWorkerDatasetIterator
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/pandas_vb_common.py
|
{
"start": 1329,
"end": 1740
}
|
class ____:
"""
Base class for IO benchmarks
"""
fname = None
def remove(self, f):
"""Remove created files"""
try:
os.remove(f)
except OSError:
# On Windows, attempting to remove a file that is in use
# causes an exception to be raised
pass
def teardown(self, *args, **kwargs):
self.remove(self.fname)
|
BaseIO
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
|
{
"start": 8612,
"end": 8939
}
|
class ____(graphene.ObjectType):
"""Output indicating that a run failed to terminate."""
run = graphene.NonNull(GrapheneRun)
message = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneTerminatePipelineExecutionFailure,)
name = "TerminateRunFailure"
|
GrapheneTerminateRunFailure
|
python
|
astropy__astropy
|
astropy/io/ascii/tdat.py
|
{
"start": 25311,
"end": 26881
}
|
class ____(core.TableOutputter):
"""
Output the table as an astropy.table.Table object.
"""
def __call__(self, cols, meta):
"""
READ: Override the default outputter.
TDAT files may (optionally) specify which field lines should be used as
the primary index and secondary indices Astropy tables support adding
indices after creation. This overwrite adds labeled indices on read.
"""
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [
np.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, "mask") and np.any(x.mask)
else x.data
for x in cols
]
out = core.Table(t_cols, names=[x.name for x in cols], meta=meta["table"])
indices = []
for col, out_col in zip(cols, out.columns.values()):
for attr in ("format", "unit", "description"):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, "meta"):
out_col.meta.update(col.meta)
if "index" in col.meta:
if col.meta["index"] == "key":
indices.insert(0, col.name)
else:
indices.append(col.name)
# Add indices, if specified
if len(indices) > 0:
for name in indices:
out.add_index(name)
return out
|
TdatOutputter
|
python
|
pydantic__pydantic
|
pydantic/networks.py
|
{
"start": 27609,
"end": 28128
}
|
class ____(_BaseMultiHostUrl):
"""A type that will accept any NATS DSN.
NATS is a connective technology built for the ever increasingly hyper-connected world.
It is a single technology that enables applications to securely communicate across
any combination of cloud vendors, on-premise, edge, web and mobile, and devices.
More: https://nats.io
"""
_constraints = UrlConstraints(
allowed_schemes=['nats', 'tls', 'ws', 'wss'], default_host='localhost', default_port=4222
)
|
NatsDsn
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/plugins/precision/test_double.py
|
{
"start": 944,
"end": 1298
}
|
class ____(Dataset):
def __init__(self, size, length):
self.len = length
self.float_data = torch.randn(length, size)
self.int_data = torch.randint(10, (length, 1))
def __getitem__(self, index):
return self.float_data[index], self.int_data[index]
def __len__(self):
return self.len
|
RandomFloatIntDataset
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-gcp/prefect_gcp/models/cloud_run_v2.py
|
{
"start": 527,
"end": 8210
}
|
class ____(BaseModel):
"""
JobV2 is a data model for a job that will be run on Cloud Run with the V2 API.
"""
name: str
uid: str
generation: str
labels: Dict[str, str] = Field(default_factory=dict)
annotations: Dict[str, str] = Field(default_factory=dict)
createTime: str
updateTime: str
deleteTime: Optional[str] = Field(None)
expireTime: Optional[str] = Field(None)
creator: Optional[str] = Field(None)
lastModifier: Optional[str] = Field(None)
client: Optional[str] = Field(None)
clientVersion: Optional[str] = Field(None)
launchStage: Literal[
"ALPHA",
"BETA",
"GA",
"DEPRECATED",
"EARLY_ACCESS",
"PRELAUNCH",
"UNIMPLEMENTED",
"LAUNCH_TAG_UNSPECIFIED",
]
binaryAuthorization: Dict = Field(default_factory=dict)
template: Dict = Field(default_factory=dict)
observedGeneration: Optional[str] = Field(None)
terminalCondition: Dict = Field(default_factory=dict)
conditions: List[Dict] = Field(default_factory=list)
executionCount: int
latestCreatedExecution: Dict = Field(default_factory=dict)
reconciling: bool = Field(False)
satisfiesPzs: bool = Field(False)
etag: Optional[str] = Field(None)
def is_ready(self) -> bool:
"""
Check if the job is ready to run.
Returns:
Whether the job is ready to run.
"""
ready_condition = self.get_ready_condition()
if self._is_missing_container(ready_condition=ready_condition):
raise Exception(f"{ready_condition.get('message')}")
return ready_condition.get("state") == "CONDITION_SUCCEEDED"
def get_ready_condition(self) -> Dict:
"""
Get the ready condition for the job.
Returns:
The ready condition for the job.
"""
if self.terminalCondition.get("type") == "Ready":
return self.terminalCondition
return {}
@classmethod
def get(
cls,
cr_client: Resource,
project: str,
location: str,
job_name: str,
):
"""
Get a job from Cloud Run with the V2 API.
Args:
cr_client: The base client needed for interacting with GCP
Cloud Run V2 API.
project: The GCP project ID.
location: The GCP region.
job_name: The name of the job to get.
"""
# noinspection PyUnresolvedReferences
request = cr_client.jobs().get(
name=f"projects/{project}/locations/{location}/jobs/{job_name}",
)
response = request.execute()
return cls(
name=response["name"],
uid=response["uid"],
generation=response["generation"],
labels=response.get("labels", {}),
annotations=response.get("annotations", {}),
createTime=response["createTime"],
updateTime=response["updateTime"],
deleteTime=response.get("deleteTime"),
expireTime=response.get("expireTime"),
creator=response.get("creator"),
lastModifier=response.get("lastModifier"),
client=response.get("client"),
clientVersion=response.get("clientVersion"),
launchStage=response.get("launchStage", "GA"),
binaryAuthorization=response.get("binaryAuthorization", {}),
template=response.get("template"),
observedGeneration=response.get("observedGeneration"),
terminalCondition=response.get("terminalCondition", {}),
conditions=response.get("conditions", []),
executionCount=response.get("executionCount", 0),
latestCreatedExecution=response["latestCreatedExecution"],
reconciling=response.get("reconciling", False),
satisfiesPzs=response.get("satisfiesPzs", False),
etag=response["etag"],
)
@staticmethod
def create(
cr_client: Resource,
project: str,
location: str,
job_id: str,
body: Dict,
) -> Dict:
"""
Create a job on Cloud Run with the V2 API.
Args:
cr_client: The base client needed for interacting with GCP
Cloud Run V2 API.
project: The GCP project ID.
location: The GCP region.
job_id: The ID of the job to create.
body: The job body.
Returns:
The response from the Cloud Run V2 API.
"""
# noinspection PyUnresolvedReferences
request = cr_client.jobs().create(
parent=f"projects/{project}/locations/{location}",
jobId=job_id,
body=body,
)
response = request.execute()
return response
@staticmethod
def delete(
cr_client: Resource,
project: str,
location: str,
job_name: str,
) -> Dict:
"""
Delete a job on Cloud Run with the V2 API.
Args:
cr_client (Resource): The base client needed for interacting with GCP
Cloud Run V2 API.
project: The GCP project ID.
location: The GCP region.
job_name: The name of the job to delete.
Returns:
Dict: The response from the Cloud Run V2 API.
"""
# noinspection PyUnresolvedReferences
list_executions_request = (
cr_client.jobs()
.executions()
.list(
parent=f"projects/{project}/locations/{location}/jobs/{job_name}",
)
)
list_executions_response = list_executions_request.execute()
for execution_to_delete in list_executions_response.get("executions", []):
# noinspection PyUnresolvedReferences
delete_execution_request = (
cr_client.jobs()
.executions()
.delete(
name=execution_to_delete["name"],
)
)
delete_execution_request.execute()
# Sleep 3 seconds so that the execution is deleted before deleting the job
time.sleep(3)
# noinspection PyUnresolvedReferences
request = cr_client.jobs().delete(
name=f"projects/{project}/locations/{location}/jobs/{job_name}",
)
response = request.execute()
return response
@staticmethod
def run(
cr_client: Resource,
project: str,
location: str,
job_name: str,
):
"""
Run a job on Cloud Run with the V2 API.
Args:
cr_client: The base client needed for interacting with GCP
Cloud Run V2 API.
project: The GCP project ID.
location: The GCP region.
job_name: The name of the job to run.
"""
# noinspection PyUnresolvedReferences
request = cr_client.jobs().run(
name=f"projects/{project}/locations/{location}/jobs/{job_name}",
)
response = request.execute()
return response
@staticmethod
def _is_missing_container(ready_condition: Dict) -> bool:
"""
Check if the job is missing a container.
Args:
ready_condition: The ready condition for the job.
Returns:
Whether the job is missing a container.
"""
if (
ready_condition.get("state") == "CONTAINER_FAILED"
and ready_condition.get("reason") == "ContainerMissing"
):
return True
return False
|
JobV2
|
python
|
numba__numba
|
numba/cuda/stubs.py
|
{
"start": 4835,
"end": 5131
}
|
class ____(Stub):
'''
match_any_sync(mask, value)
Nvvm intrinsic for performing a compare and broadcast across a warp.
Returns a mask of threads that have same value as the given value from
within the masked warp.
'''
_description_ = '<match_any_sync()>'
|
match_any_sync
|
python
|
readthedocs__readthedocs.org
|
readthedocs/integrations/migrations/0009_migrate_headers_data.py
|
{
"start": 972,
"end": 1178
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("integrations", "0008_add_new_jsonfields"),
]
operations = [migrations.RunPython(forwards_func)]
|
Migration
|
python
|
apache__airflow
|
providers/apache/pinot/tests/unit/apache/pinot/hooks/test_pinot.py
|
{
"start": 14885,
"end": 16256
}
|
class ____:
def setup_method(self):
self.conn = conn = mock.MagicMock()
self.conn.host = "host"
self.conn.port = "1000"
self.conn.conn_type = "http"
self.conn.login = "user"
self.conn.password = "pwd"
self.conn.extra_dejson = {"endpoint": "query/sql"}
self.cur = mock.MagicMock(rowcount=0)
self.conn.cursor.return_value = self.cur
self.conn.__enter__.return_value = self.cur
self.conn.__exit__.return_value = None
class TestPinotDBApiHook(PinotDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestPinotDBApiHook
def test_get_uri_with_auth(self):
"""
Test on getting a pinot connection uri
"""
db_hook = self.db_hook()
assert db_hook.get_uri() == "http://user:pwd@host:1000/query/sql"
def test_get_conn_with_auth(self):
"""
Test on getting a pinot connection
"""
conn = self.db_hook().get_conn()
assert conn.host == "host"
assert conn.port == "1000"
assert conn.login == "user"
assert conn.password == "pwd"
assert conn.conn_type == "http"
assert conn.extra_dejson.get("endpoint") == "query/sql"
|
TestPinotDbApiHookWithAuth
|
python
|
SmileyChris__easy-thumbnails
|
easy_thumbnails/tests/apps.py
|
{
"start": 36,
"end": 150
}
|
class ____(AppConfig):
name = 'easy_thumbnails.tests'
label = 'easy_thumbnails_tests'
|
EasyThumbnailsTestConfig
|
python
|
django__django
|
tests/check_framework/test_security.py
|
{
"start": 7441,
"end": 8281
}
|
class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0,
)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(base.check_sts(None), [base.W004])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middleware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware
isn't installed.
"""
self.assertEqual(base.check_sts(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts(self):
self.assertEqual(base.check_sts(None), [])
|
CheckStrictTransportSecurityTest
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/suite/transfers/local_to_drive.py
|
{
"start": 1263,
"end": 5850
}
|
class ____(BaseOperator):
"""
Upload a list of files to a Google Drive folder.
This operator uploads a list of local files to a Google Drive folder.
The local files can optionally be deleted after upload.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LocalFilesystemToGoogleDriveOperator`
:param local_paths: Python list of local file paths
:param drive_folder: path of the Drive folder, if *folder_id* is given,
*drive_folder* is a sub path of the folder.
:param gcp_conn_id: Airflow Connection ID for GCP.
:param delete: Should the local files be deleted after upload?
:param ignore_if_missing: If *True*, don't fail even if some files can't be
uploaded.
:param chunk_size: File will be uploaded in chunks of this many bytes. Only
used when *resumable* is set to *True*. Pass in a value of -1 if the
file is to be uploaded as a single chunk. Note that Google App Engine
has a 5MB limit on request size, so you should never set your chunk size
larger than 5MB, or to -1.
:param resumable: True if this is a resumable upload. False means upload
in a single request.
:param impersonation_chain: Optional service account to impersonate using
short-term credentials, or chained list of accounts required to get the
access token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the
originating account the Service Account Token Creator IAM role. If set
as a sequence, the identities from the list must grant Service Account
Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account
:param folder_id: The base/root folder id for each local path in the Drive
folder.
:param show_full_target_path: If true then it reveals full available file
path in the logs.
:return: Remote file ids after upload.
"""
template_fields = (
"local_paths",
"drive_folder",
)
def __init__(
self,
local_paths: Sequence[Path] | Sequence[str],
drive_folder: Path | str,
gcp_conn_id: str = "google_cloud_default",
delete: bool = False,
ignore_if_missing: bool = False,
chunk_size: int = 100 * 1024 * 1024,
resumable: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
folder_id: str = "root",
show_full_target_path: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.local_paths = local_paths
self.drive_folder = drive_folder
self.gcp_conn_id = gcp_conn_id
self.delete = delete
self.ignore_if_missing = ignore_if_missing
self.chunk_size = chunk_size
self.resumable = resumable
self.impersonation_chain = impersonation_chain
self.folder_id = folder_id
self.show_full_target_path = show_full_target_path
def execute(self, context: Context) -> list[str]:
hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
remote_file_ids = []
for local_path in self.local_paths:
self.log.info("Uploading file to Google Drive: %s", local_path)
try:
remote_file_id = hook.upload_file(
local_location=str(local_path),
remote_location=str(Path(self.drive_folder) / Path(local_path).name),
chunk_size=self.chunk_size,
resumable=self.resumable,
folder_id=self.folder_id,
show_full_target_path=self.show_full_target_path,
)
remote_file_ids.append(remote_file_id)
if self.delete:
os.remove(local_path)
self.log.info("Deleted local file: %s", local_path)
except FileNotFoundError:
self.log.warning("File can't be found: %s", local_path)
except OSError:
self.log.warning("An OSError occurred for file: %s", local_path)
if not self.ignore_if_missing and len(remote_file_ids) < len(self.local_paths):
raise AirflowFailException("Some files couldn't be uploaded")
return remote_file_ids
|
LocalFilesystemToGoogleDriveOperator
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/client_modules.py
|
{
"start": 277,
"end": 4304
}
|
class ____(object):
def __init__(self, loader, prefix, exports, client):
self._loader = loader
self._prefix = prefix
self._client = client
is_match = re.compile(
r"^%s\.([a-zA-Z_][a-zA-Z0-9_]*)$" % prefix.replace(".", r"\.") # noqa W605
)
self._exports = {}
self._aliases = exports.get("aliases", [])
for k in ("classes", "functions", "values"):
result = []
for item in exports.get(k, []):
m = is_match.match(item)
if m:
result.append(m.group(1))
self._exports[k] = result
result = []
for item, _ in exports.get("exceptions", []):
m = is_match.match(item)
if m:
result.append(m.group(1))
self._exports["exceptions"] = result
def __getattr__(self, name):
if name == "__loader__":
return self._loader
if name == "__spec__":
return importlib.util.spec_from_loader(self._prefix, self._loader)
if name in ("__name__", "__package__"):
return self._prefix
if name in ("__file__", "__path__"):
return self._client.name
# Make the name canonical because the prefix is also canonical.
name = get_canonical_name(self._prefix + "." + name, self._aliases)[
len(self._prefix) + 1 :
]
if name in self._exports["classes"] or name in self._exports["exceptions"]:
# We load classes and exceptions lazily
return self._client.get_local_class("%s.%s" % (self._prefix, name))
elif name in self._exports["functions"]:
# TODO: Grab doc back from the remote side like in _make_method
def func(*args, **kwargs):
return self._client.stub_request(
None, OP_CALLFUNC, "%s.%s" % (self._prefix, name), *args, **kwargs
)
func.__name__ = name
func.__doc__ = "Unknown (TODO)"
return func
elif name in self._exports["values"]:
return self._client.stub_request(
None, OP_GETVAL, "%s.%s" % (self._prefix, name)
)
else:
# Try to see if this is a submodule that we can load
m = None
try:
submodule_name = ".".join([self._prefix, name])
m = importlib.import_module(submodule_name)
except ImportError:
pass
if m is None:
raise AttributeError(
"module '%s' has no attribute '%s' -- contact the author of the "
"configuration if this is something "
"you expect to work (support may be added if it exists in the "
"original library)" % (self._prefix, name)
)
return m
def __setattr__(self, name, value):
if name in (
"package",
"__spec__",
"_loader",
"_prefix",
"_client",
"_exports",
"_exception_classes",
"_aliases",
):
object.__setattr__(self, name, value)
return
if isinstance(value, _WrappedModule):
# This is when a module sets itself as an attribute of another
# module when loading
object.__setattr__(self, name, value)
return
# Make the name canonical because the prefix is also canonical.
name = get_canonical_name(self._prefix + "." + name, self._aliases)[
len(self._prefix) + 1 :
]
if name in self._exports["values"]:
self._client.stub_request(
None, OP_SETVAL, "%s.%s" % (self._prefix, name), value
)
elif name in self._exports["classes"] or name in self._exports["functions"]:
raise ValueError
else:
raise AttributeError(name)
|
_WrappedModule
|
python
|
kamyu104__LeetCode-Solutions
|
Python/satisfiability-of-equality-equations.py
|
{
"start": 1141,
"end": 2338
}
|
class ____(object):
def equationsPossible(self, equations):
"""
:type equations: List[str]
:rtype: bool
"""
graph = [[] for _ in xrange(26)]
for eqn in equations:
x = ord(eqn[0]) - ord('a')
y = ord(eqn[3]) - ord('a')
if eqn[1] == '!':
if x == y:
return False
else:
graph[x].append(y)
graph[y].append(x)
color = [None]*26
c = 0
for i in xrange(26):
if color[i] is not None:
continue
c += 1
stack = [i]
while stack:
node = stack.pop()
for nei in graph[node]:
if color[nei] is not None:
continue
color[nei] = c
stack.append(nei)
for eqn in equations:
if eqn[1] != '!':
continue
x = ord(eqn[0]) - ord('a')
y = ord(eqn[3]) - ord('a')
if color[x] is not None and \
color[x] == color[y]:
return False
return True
|
Solution2
|
python
|
getsentry__sentry
|
tests/sentry/services/test_organization_actions.py
|
{
"start": 9610,
"end": 11401
}
|
class ____(TestCase):
def test_slug_under_size_limit(self) -> None:
slug = generate_deterministic_organization_slug(
desired_slug_base="santry", desired_org_name="santry", owning_user_id=42
)
assert slug == "santry-095a9012d"
def test_slug_above_size_limit(self) -> None:
slug = generate_deterministic_organization_slug(
desired_slug_base="areallylongsentryorgnamethatiswaytoolong",
desired_org_name="santry",
owning_user_id=42,
)
assert len(slug) == 30
assert slug == "areallylongsentryorg-945bda148"
def test_slug_with_mixed_casing(self) -> None:
slug = generate_deterministic_organization_slug(
desired_slug_base="A mixed CASING str",
desired_org_name="santry",
owning_user_id=42,
)
assert slug == "a-mixed-casing-str-9e9173167"
def test_slug_with_unicode_chars(self) -> None:
unicoded_str = "Sí Señtry 😅"
slug = generate_deterministic_organization_slug(
desired_slug_base=unicoded_str, desired_org_name=unicoded_str, owning_user_id=42
)
assert slug == "si-sentry-3471b1b85"
def test_slug_with_0_length(self) -> None:
unicoded_str = "😅"
slug = generate_deterministic_organization_slug(
desired_slug_base=unicoded_str, desired_org_name=unicoded_str, owning_user_id=42
)
random_slug_regex = re.compile(r"^[a-f0-9]{10}-[a-f0-9]{9}")
assert random_slug_regex.match(slug)
slug = generate_deterministic_organization_slug(
desired_slug_base="", desired_org_name=unicoded_str, owning_user_id=42
)
assert random_slug_regex.match(slug)
|
TestGenerateDeterministicOrganizationSlug
|
python
|
automl__auto-sklearn
|
test/test_pipeline/components/classification/test_multinomial_nb.py
|
{
"start": 272,
"end": 1832
}
|
class ____(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.97999999999999998
res["iris_n_calls"] = None
res["default_iris_iterative"] = 0.97999999999999998
res["default_iris_proba"] = 0.5865733413579101
res["default_iris_sparse"] = 0.54
res["default_digits"] = 0.89496053430479661
res["digits_n_calls"] = None
res["default_digits_iterative"] = 0.89496053430479661
res["default_digits_binary"] = 0.98967820279295693
res["default_digits_multilabel"] = 0.70484946987667163
res["default_digits_multilabel_proba"] = 0.80324074074074081
sk_mod = sklearn.naive_bayes.MultinomialNB
module = MultinomialNB
def test_default_configuration_negative_values(self):
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = MultinomialNB.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
cls = MultinomialNB(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None
},
)
cls = cls.fit(X_train, Y_train)
prediction = cls.predict(X_test)
self.assertAlmostEqual(np.nanmean(prediction == Y_test), 0.88888888888888884)
|
MultinomialNBComponentTest
|
python
|
spack__spack
|
lib/spack/spack/vendor/pyrsistent/_field_common.py
|
{
"start": 5667,
"end": 11569
}
|
class ____(TypeError):
"""
Raised when trying to assign a value with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the record
field -- Field name
expected_types -- Types allowed for the field
actual_type -- The non matching type
"""
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
# Global dictionary to hold auto-generated field types: used for unpickling
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
"""Unpickling function for auto-generated PVec/PSet field types."""
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
"""Convert a tuple of types to a human-readable string."""
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type):
"""Create a subclass of the given checked class with the given item type."""
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory.
:return: A ``field`` containing a checked class.
"""
TheType = _make_seq_field_type(checked_class, item_type)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=()):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional,
initial)
def pvector_field(item_type, optional=False, initial=()):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional,
initial)
_valid = lambda item: (True, "")
# Global dictionary to hold auto-generated field types: used for unpickling
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
"""Unpickling function for auto-generated PMap field types."""
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
"""Create a subclass of CheckedPMap with the given key and value types."""
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
:param key: The required type for the keys of the map.
:param value: The required type for the values of the map.
:param optional: If true, ``None`` can be used as a value for
this field.
:param invariant: Pass-through to ``field``.
:return: A ``field`` containing a ``CheckedPMap``.
"""
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant)
|
PTypeError
|
python
|
gevent__gevent
|
src/gevent/tests/known_failures.py
|
{
"start": 4249,
"end": 4454
}
|
class ____(_Action):
__slots__ = ()
def __init__(self, reason='', when=ALWAYS, ignore_coverage=NEVER):
_Action.__init__(self, reason, run_alone=when, ignore_coverage=ignore_coverage)
|
RunAlone
|
python
|
pypa__installer
|
tests/test_core.py
|
{
"start": 1051,
"end": 3286
}
|
class ____(WheelSource):
def __init__(self, *, distribution, version, regular_files, dist_info_files):
super().__init__(distribution, version)
self.dist_info_files = {
file: textwrap.dedent(content.decode("utf-8"))
for file, content in dist_info_files.items()
}
self.regular_files = {
file: textwrap.dedent(content.decode("utf-8")).encode("utf-8")
for file, content in regular_files.items()
}
# Compute RECORD file.
_records = [record for record, _, _ in self.get_contents()]
self.dist_info_files["RECORD"] = "\n".join(
sorted(
",".join([file, "sha256=" + hash_, str(size)])
for file, hash_, size in _records
)
)
@property
def dist_info_filenames(self):
return list(self.dist_info_files)
def read_dist_info(self, filename):
return self.dist_info_files[filename]
def validate_record(self) -> None:
# Skip validation since the logic is different.
return
def get_contents(self):
# Sort for deterministic behaviour for Python versions that do not preserve
# insertion order for dictionaries.
for file, content in sorted(self.regular_files.items()):
hashed, size = hash_and_size(content)
record = (file, f"sha256={hashed}", str(size))
with BytesIO(content) as stream:
yield record, stream, False
# Sort for deterministic behaviour for Python versions that do not preserve
# insertion order for dictionaries.
for file, text in sorted(self.dist_info_files.items()):
content = text.encode("utf-8")
hashed, size = hash_and_size(content)
record = (
self.dist_info_dir + "/" + file,
f"sha256={hashed}",
str(size),
)
with BytesIO(content) as stream:
yield record, stream, False
# --------------------------------------------------------------------------------------
# Actual Tests
# --------------------------------------------------------------------------------------
|
FakeWheelSource
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_image12.py
|
{
"start": 315,
"end": 916
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(1, 75)
worksheet.set_column("C:C", 32)
worksheet.insert_image("C2", self.image_dir + "logo.png")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/datasets.py
|
{
"start": 204945,
"end": 212694
}
|
class ____(Response):
"""
Response of datasets.get_versions endpoint.
:param versions: List of versions
:type versions: Sequence[Version]
"""
_service = "datasets"
_action = "get_versions"
_version = "2.23"
_schema = {
"definitions": {
"stat_count": {
"properties": {
"count": {
"description": "Item name",
"type": ["integer", "null"],
},
"name": {
"description": "Number of appearances",
"type": ["string", "null"],
},
},
"type": "object",
},
"statistics": {
"properties": {
"content_types": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of content type counts for the version (e.g.\n 'image/jpeg',"
" 'image/png', 'video/mp4')"
),
},
"type": ["array", "null"],
},
"frames": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of frame counts, indicating the\n type of frames included in"
" the version (annotated/"
),
},
"type": ["array", "null"],
},
"labels": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of labels' counts,\n indicating the categories included in the"
" version"
),
},
"type": ["array", "null"],
},
},
"type": "object",
},
"version": {
"properties": {
"comment": {
"description": "Version comment",
"type": ["string", "null"],
},
"committed": {
"description": "Commit time",
"format": "date-time",
"type": ["string", "null"],
},
"committed_frames_ts": {
"description": "Timestamp of last committed frame",
"type": ["number", "null"],
},
"committed_rois_ts": {
"description": "Timestamp of last committed ROI",
"type": ["number", "null"],
},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"created": {
"description": "Version creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"dataset": {
"description": "Datset ID",
"type": ["string", "null"],
},
"es_index": {
"description": "Name of elasticsearch index",
"type": ["string", "null"],
},
"id": {"description": "Version ID", "type": ["string", "null"]},
"last_frames_update": {
"description": "Last time version was created, committed or frames were updated or saved",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": True,
"description": "User-provided metadata",
"type": ["object", "null"],
},
"name": {
"description": "Version name",
"type": ["string", "null"],
},
"parent": {
"description": "Version parent ID",
"type": ["string", "null"],
},
"published": {
"description": "Publish time",
"format": "date-time",
"type": ["string", "null"],
},
"stats": {
"description": "Version statistics",
"oneOf": [
{"$ref": "#/definitions/statistics"},
{"type": "null"},
],
},
"status": {
"description": "Version status",
"oneOf": [
{"$ref": "#/definitions/version_status_enum"},
{"type": "null"},
],
},
"system_tags": {
"description": (
"List of system tags. This field is reserved for system use, please don't use it."
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of user-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of the task which created the version",
"type": ["string", "null"],
},
"user": {
"description": "Associated user ID",
"type": ["string", "null"],
},
},
"type": "object",
},
"version_status_enum": {
"enum": ["draft", "committing", "committed", "published"],
"type": "string",
},
},
"properties": {
"versions": {
"description": "List of versions",
"items": {"$ref": "#/definitions/version"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, versions=None, **kwargs):
super(GetVersionsResponse, self).__init__(**kwargs)
self.versions = versions
@schema_property("versions")
def versions(self):
return self._property_versions
@versions.setter
def versions(self, value):
if value is None:
self._property_versions = None
return
self.assert_isinstance(value, "versions", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Version.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "versions", Version, is_array=True)
self._property_versions = value
|
GetVersionsResponse
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1537755,
"end": 1538849
}
|
class ____(sgqlc.types.Type, Node):
"""Represents an 'unmarked_as_duplicate' event on a given issue or
pull request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "canonical", "created_at", "duplicate", "is_cross_repository")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
canonical = sgqlc.types.Field("IssueOrPullRequest", graphql_name="canonical")
"""The authoritative issue or pull request which has been duplicated
by another.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
duplicate = sgqlc.types.Field("IssueOrPullRequest", graphql_name="duplicate")
"""The issue or pull request which has been marked as a duplicate of
another.
"""
is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository")
"""Canonical and duplicate belong to different repositories."""
|
UnmarkedAsDuplicateEvent
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType32.py
|
{
"start": 286,
"end": 370
}
|
class ____(Foo[T]): ...
def func(x: Contra[Foo[int]]):
v: Contra[Bar[int]] = x
|
Bar
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_default_format05.py
|
{
"start": 315,
"end": 1137
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_format05.xlsx")
def test_create_file(self):
"""Test the creation of a file with user defined default format"""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {
"font_name": "MS Gothic",
"font_size": 11,
},
"default_row_height": 18,
"default_column_width": 72,
},
)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
sphinx-doc__sphinx
|
sphinx/ext/linkcode.py
|
{
"start": 872,
"end": 3040
}
|
class ____(SphinxError):
category = 'linkcode error'
def doctree_read(app: Sphinx, doctree: Node) -> None:
env = app.env
resolve_target = getattr(env.config, 'linkcode_resolve', None)
if not callable(env.config.linkcode_resolve):
msg = 'Function `linkcode_resolve` is not given in conf.py'
raise LinkcodeError(msg)
assert resolve_target is not None # for mypy
# By default, the linkcode extension will only inject references
# for an ``html`` builder. If a builder wishes to support managing
# references generated by linkcode as well, it can define the
# ``supported_linkcode`` attribute.
node_only_expr = getattr(app.builder, 'supported_linkcode', 'html')
for objnode in list(doctree.findall(addnodes.desc)):
domain = objnode.get('domain')
uris: set[str] = set()
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
# Convert signode to a specified format
info = {}
for key in _DOMAIN_KEYS.get(domain, ()):
value = signode.get(key)
if not value:
value = ''
info[key] = value
if not info:
continue
# Call user code to resolve the link
uri = resolve_target(domain, info)
if not uri:
# no source
continue
if uri in uris or not uri:
# only one link per name, please
continue
uris.add(uri)
inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
onlynode = addnodes.only(expr=node_only_expr)
onlynode += nodes.reference('', '', inline, internal=False, refuri=uri)
signode += onlynode
def setup(app: Sphinx) -> ExtensionMetadata:
app.connect('doctree-read', doctree_read)
app.add_config_value(
'linkcode_resolve', None, '', types=frozenset({FunctionType, NoneType})
)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
|
LinkcodeError
|
python
|
zarr-developers__zarr-python
|
src/zarr/codecs/sharding.py
|
{
"start": 7434,
"end": 25286
}
|
class ____(
ArrayBytesCodec, ArrayBytesCodecPartialDecodeMixin, ArrayBytesCodecPartialEncodeMixin
):
"""Sharding codec"""
chunk_shape: tuple[int, ...]
codecs: tuple[Codec, ...]
index_codecs: tuple[Codec, ...]
index_location: ShardingCodecIndexLocation = ShardingCodecIndexLocation.end
def __init__(
self,
*,
chunk_shape: ShapeLike,
codecs: Iterable[Codec | dict[str, JSON]] = (BytesCodec(),),
index_codecs: Iterable[Codec | dict[str, JSON]] = (BytesCodec(), Crc32cCodec()),
index_location: ShardingCodecIndexLocation | str = ShardingCodecIndexLocation.end,
) -> None:
chunk_shape_parsed = parse_shapelike(chunk_shape)
codecs_parsed = parse_codecs(codecs)
index_codecs_parsed = parse_codecs(index_codecs)
index_location_parsed = parse_index_location(index_location)
object.__setattr__(self, "chunk_shape", chunk_shape_parsed)
object.__setattr__(self, "codecs", codecs_parsed)
object.__setattr__(self, "index_codecs", index_codecs_parsed)
object.__setattr__(self, "index_location", index_location_parsed)
# Use instance-local lru_cache to avoid memory leaks
# numpy void scalars are not hashable, which means an array spec with a fill value that is
# a numpy void scalar will break the lru_cache. This is commented for now but should be
# fixed. See https://github.com/zarr-developers/zarr-python/issues/3054
# object.__setattr__(self, "_get_chunk_spec", lru_cache()(self._get_chunk_spec))
object.__setattr__(self, "_get_index_chunk_spec", lru_cache()(self._get_index_chunk_spec))
object.__setattr__(self, "_get_chunks_per_shard", lru_cache()(self._get_chunks_per_shard))
# todo: typedict return type
def __getstate__(self) -> dict[str, Any]:
return self.to_dict()
def __setstate__(self, state: dict[str, Any]) -> None:
config = state["configuration"]
object.__setattr__(self, "chunk_shape", parse_shapelike(config["chunk_shape"]))
object.__setattr__(self, "codecs", parse_codecs(config["codecs"]))
object.__setattr__(self, "index_codecs", parse_codecs(config["index_codecs"]))
object.__setattr__(self, "index_location", parse_index_location(config["index_location"]))
# Use instance-local lru_cache to avoid memory leaks
# object.__setattr__(self, "_get_chunk_spec", lru_cache()(self._get_chunk_spec))
object.__setattr__(self, "_get_index_chunk_spec", lru_cache()(self._get_index_chunk_spec))
object.__setattr__(self, "_get_chunks_per_shard", lru_cache()(self._get_chunks_per_shard))
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(data, "sharding_indexed")
return cls(**configuration_parsed) # type: ignore[arg-type]
@property
def codec_pipeline(self) -> CodecPipeline:
return get_pipeline_class().from_codecs(self.codecs)
def to_dict(self) -> dict[str, JSON]:
return {
"name": "sharding_indexed",
"configuration": {
"chunk_shape": self.chunk_shape,
"codecs": tuple(s.to_dict() for s in self.codecs),
"index_codecs": tuple(s.to_dict() for s in self.index_codecs),
"index_location": self.index_location.value,
},
}
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
shard_spec = self._get_chunk_spec(array_spec)
evolved_codecs = tuple(c.evolve_from_array_spec(array_spec=shard_spec) for c in self.codecs)
if evolved_codecs != self.codecs:
return replace(self, codecs=evolved_codecs)
return self
def validate(
self,
*,
shape: tuple[int, ...],
dtype: ZDType[TBaseDType, TBaseScalar],
chunk_grid: ChunkGrid,
) -> None:
if len(self.chunk_shape) != len(shape):
raise ValueError(
"The shard's `chunk_shape` and array's `shape` need to have the same number of dimensions."
)
if not isinstance(chunk_grid, RegularChunkGrid):
raise TypeError("Sharding is only compatible with regular chunk grids.")
if not all(
s % c == 0
for s, c in zip(
chunk_grid.chunk_shape,
self.chunk_shape,
strict=False,
)
):
raise ValueError(
f"The array's `chunk_shape` (got {chunk_grid.chunk_shape}) "
f"needs to be divisible by the shard's inner `chunk_shape` (got {self.chunk_shape})."
)
async def _decode_single(
self,
shard_bytes: Buffer,
shard_spec: ArraySpec,
) -> NDBuffer:
shard_shape = shard_spec.shape
chunk_shape = self.chunk_shape
chunks_per_shard = self._get_chunks_per_shard(shard_spec)
chunk_spec = self._get_chunk_spec(shard_spec)
indexer = BasicIndexer(
tuple(slice(0, s) for s in shard_shape),
shape=shard_shape,
chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape),
)
# setup output array
out = chunk_spec.prototype.nd_buffer.empty(
shape=shard_shape,
dtype=shard_spec.dtype.to_native_dtype(),
order=shard_spec.order,
)
shard_dict = await _ShardReader.from_bytes(shard_bytes, self, chunks_per_shard)
if shard_dict.index.is_all_empty():
out.fill(shard_spec.fill_value)
return out
# decoding chunks and writing them into the output buffer
await self.codec_pipeline.read(
[
(
_ShardingByteGetter(shard_dict, chunk_coords),
chunk_spec,
chunk_selection,
out_selection,
is_complete_shard,
)
for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer
],
out,
)
return out
async def _decode_partial_single(
self,
byte_getter: ByteGetter,
selection: SelectorTuple,
shard_spec: ArraySpec,
) -> NDBuffer | None:
shard_shape = shard_spec.shape
chunk_shape = self.chunk_shape
chunks_per_shard = self._get_chunks_per_shard(shard_spec)
chunk_spec = self._get_chunk_spec(shard_spec)
indexer = get_indexer(
selection,
shape=shard_shape,
chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape),
)
# setup output array
out = shard_spec.prototype.nd_buffer.empty(
shape=indexer.shape,
dtype=shard_spec.dtype.to_native_dtype(),
order=shard_spec.order,
)
indexed_chunks = list(indexer)
all_chunk_coords = {chunk_coords for chunk_coords, *_ in indexed_chunks}
# reading bytes of all requested chunks
shard_dict: ShardMapping = {}
if self._is_total_shard(all_chunk_coords, chunks_per_shard):
# read entire shard
shard_dict_maybe = await self._load_full_shard_maybe(
byte_getter=byte_getter,
prototype=chunk_spec.prototype,
chunks_per_shard=chunks_per_shard,
)
if shard_dict_maybe is None:
return None
shard_dict = shard_dict_maybe
else:
# read some chunks within the shard
shard_index = await self._load_shard_index_maybe(byte_getter, chunks_per_shard)
if shard_index is None:
return None
shard_dict = {}
for chunk_coords in all_chunk_coords:
chunk_byte_slice = shard_index.get_chunk_slice(chunk_coords)
if chunk_byte_slice:
chunk_bytes = await byte_getter.get(
prototype=chunk_spec.prototype,
byte_range=RangeByteRequest(chunk_byte_slice[0], chunk_byte_slice[1]),
)
if chunk_bytes:
shard_dict[chunk_coords] = chunk_bytes
# decoding chunks and writing them into the output buffer
await self.codec_pipeline.read(
[
(
_ShardingByteGetter(shard_dict, chunk_coords),
chunk_spec,
chunk_selection,
out_selection,
is_complete_shard,
)
for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer
],
out,
)
if hasattr(indexer, "sel_shape"):
return out.reshape(indexer.sel_shape)
else:
return out
async def _encode_single(
self,
shard_array: NDBuffer,
shard_spec: ArraySpec,
) -> Buffer | None:
shard_shape = shard_spec.shape
chunk_shape = self.chunk_shape
chunks_per_shard = self._get_chunks_per_shard(shard_spec)
chunk_spec = self._get_chunk_spec(shard_spec)
indexer = list(
BasicIndexer(
tuple(slice(0, s) for s in shard_shape),
shape=shard_shape,
chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape),
)
)
shard_builder = dict.fromkeys(morton_order_iter(chunks_per_shard))
await self.codec_pipeline.write(
[
(
_ShardingByteSetter(shard_builder, chunk_coords),
chunk_spec,
chunk_selection,
out_selection,
is_complete_shard,
)
for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer
],
shard_array,
)
return await self._encode_shard_dict(
shard_builder,
chunks_per_shard=chunks_per_shard,
buffer_prototype=default_buffer_prototype(),
)
async def _encode_partial_single(
self,
byte_setter: ByteSetter,
shard_array: NDBuffer,
selection: SelectorTuple,
shard_spec: ArraySpec,
) -> None:
shard_shape = shard_spec.shape
chunk_shape = self.chunk_shape
chunks_per_shard = self._get_chunks_per_shard(shard_spec)
chunk_spec = self._get_chunk_spec(shard_spec)
shard_reader = await self._load_full_shard_maybe(
byte_getter=byte_setter,
prototype=chunk_spec.prototype,
chunks_per_shard=chunks_per_shard,
)
shard_reader = shard_reader or _ShardReader.create_empty(chunks_per_shard)
shard_dict = {k: shard_reader.get(k) for k in morton_order_iter(chunks_per_shard)}
indexer = list(
get_indexer(
selection, shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape)
)
)
await self.codec_pipeline.write(
[
(
_ShardingByteSetter(shard_dict, chunk_coords),
chunk_spec,
chunk_selection,
out_selection,
is_complete_shard,
)
for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer
],
shard_array,
)
buf = await self._encode_shard_dict(
shard_dict,
chunks_per_shard=chunks_per_shard,
buffer_prototype=default_buffer_prototype(),
)
if buf is None:
await byte_setter.delete()
else:
await byte_setter.set(buf)
async def _encode_shard_dict(
self,
map: ShardMapping,
chunks_per_shard: tuple[int, ...],
buffer_prototype: BufferPrototype,
) -> Buffer | None:
index = _ShardIndex.create_empty(chunks_per_shard)
buffers = []
template = buffer_prototype.buffer.create_zero_length()
chunk_start = 0
for chunk_coords in morton_order_iter(chunks_per_shard):
value = map.get(chunk_coords)
if value is None:
continue
if len(value) == 0:
continue
chunk_length = len(value)
buffers.append(value)
index.set_chunk_slice(chunk_coords, slice(chunk_start, chunk_start + chunk_length))
chunk_start += chunk_length
if len(buffers) == 0:
return None
index_bytes = await self._encode_shard_index(index)
if self.index_location == ShardingCodecIndexLocation.start:
empty_chunks_mask = index.offsets_and_lengths[..., 0] == MAX_UINT_64
index.offsets_and_lengths[~empty_chunks_mask, 0] += len(index_bytes)
index_bytes = await self._encode_shard_index(
index
) # encode again with corrected offsets
buffers.insert(0, index_bytes)
else:
buffers.append(index_bytes)
return template.combine(buffers)
def _is_total_shard(
self, all_chunk_coords: set[tuple[int, ...]], chunks_per_shard: tuple[int, ...]
) -> bool:
return len(all_chunk_coords) == product(chunks_per_shard) and all(
chunk_coords in all_chunk_coords for chunk_coords in c_order_iter(chunks_per_shard)
)
async def _decode_shard_index(
self, index_bytes: Buffer, chunks_per_shard: tuple[int, ...]
) -> _ShardIndex:
index_array = next(
iter(
await get_pipeline_class()
.from_codecs(self.index_codecs)
.decode(
[(index_bytes, self._get_index_chunk_spec(chunks_per_shard))],
)
)
)
assert index_array is not None
return _ShardIndex(index_array.as_numpy_array())
async def _encode_shard_index(self, index: _ShardIndex) -> Buffer:
index_bytes = next(
iter(
await get_pipeline_class()
.from_codecs(self.index_codecs)
.encode(
[
(
get_ndbuffer_class().from_numpy_array(index.offsets_and_lengths),
self._get_index_chunk_spec(index.chunks_per_shard),
)
],
)
)
)
assert index_bytes is not None
assert isinstance(index_bytes, Buffer)
return index_bytes
def _shard_index_size(self, chunks_per_shard: tuple[int, ...]) -> int:
return (
get_pipeline_class()
.from_codecs(self.index_codecs)
.compute_encoded_size(
16 * product(chunks_per_shard), self._get_index_chunk_spec(chunks_per_shard)
)
)
def _get_index_chunk_spec(self, chunks_per_shard: tuple[int, ...]) -> ArraySpec:
return ArraySpec(
shape=chunks_per_shard + (2,),
dtype=UInt64(endianness="little"),
fill_value=MAX_UINT_64,
config=ArrayConfig(
order="C", write_empty_chunks=False
), # Note: this is hard-coded for simplicity -- it is not surfaced into user code,
prototype=default_buffer_prototype(),
)
def _get_chunk_spec(self, shard_spec: ArraySpec) -> ArraySpec:
return ArraySpec(
shape=self.chunk_shape,
dtype=shard_spec.dtype,
fill_value=shard_spec.fill_value,
config=shard_spec.config,
prototype=shard_spec.prototype,
)
def _get_chunks_per_shard(self, shard_spec: ArraySpec) -> tuple[int, ...]:
return tuple(
s // c
for s, c in zip(
shard_spec.shape,
self.chunk_shape,
strict=False,
)
)
async def _load_shard_index_maybe(
self, byte_getter: ByteGetter, chunks_per_shard: tuple[int, ...]
) -> _ShardIndex | None:
shard_index_size = self._shard_index_size(chunks_per_shard)
if self.index_location == ShardingCodecIndexLocation.start:
index_bytes = await byte_getter.get(
prototype=numpy_buffer_prototype(),
byte_range=RangeByteRequest(0, shard_index_size),
)
else:
index_bytes = await byte_getter.get(
prototype=numpy_buffer_prototype(), byte_range=SuffixByteRequest(shard_index_size)
)
if index_bytes is not None:
return await self._decode_shard_index(index_bytes, chunks_per_shard)
return None
async def _load_shard_index(
self, byte_getter: ByteGetter, chunks_per_shard: tuple[int, ...]
) -> _ShardIndex:
return (
await self._load_shard_index_maybe(byte_getter, chunks_per_shard)
) or _ShardIndex.create_empty(chunks_per_shard)
async def _load_full_shard_maybe(
self, byte_getter: ByteGetter, prototype: BufferPrototype, chunks_per_shard: tuple[int, ...]
) -> _ShardReader | None:
shard_bytes = await byte_getter.get(prototype=prototype)
return (
await _ShardReader.from_bytes(shard_bytes, self, chunks_per_shard)
if shard_bytes
else None
)
def compute_encoded_size(self, input_byte_length: int, shard_spec: ArraySpec) -> int:
chunks_per_shard = self._get_chunks_per_shard(shard_spec)
return input_byte_length + self._shard_index_size(chunks_per_shard)
|
ShardingCodec
|
python
|
OmkarPathak__pygorithm
|
tests/test_geometry.py
|
{
"start": 22527,
"end": 39892
}
|
class ____(unittest.TestCase):
def setUp(self):
random.seed()
def test_constructor_standard(self):
poly = polygon2.Polygon2([ vector2.Vector2(0, 1),
vector2.Vector2(1, 1),
vector2.Vector2(1, 0),
vector2.Vector2(0, 0) ])
self.assertEqual(4, len(poly.points))
self.assertEqual(4, len(poly.lines))
self.assertEqual(2, len(poly.normals))
self.assertEqual(0, poly.points[0].x)
self.assertEqual(1, poly.points[0].y)
self.assertEqual(1, poly.points[1].x)
self.assertEqual(1, poly.points[1].y)
self.assertEqual(1, poly.points[2].x)
self.assertEqual(0, poly.points[2].y)
self.assertEqual(0, poly.points[3].x)
self.assertEqual(0, poly.points[3].y)
self.assertEqual(0, poly.lines[0].start.x)
self.assertEqual(1, poly.lines[0].start.y)
self.assertEqual(1, poly.lines[0].end.x)
self.assertEqual(1, poly.lines[0].end.y)
self.assertEqual(1, poly.lines[1].start.x)
self.assertEqual(1, poly.lines[1].start.y)
self.assertEqual(1, poly.lines[1].end.x)
self.assertEqual(0, poly.lines[1].end.y)
self.assertEqual(1, poly.lines[2].start.x)
self.assertEqual(0, poly.lines[2].start.y)
self.assertEqual(0, poly.lines[2].end.x)
self.assertEqual(0, poly.lines[2].end.y)
self.assertEqual(0, poly.lines[3].start.x)
self.assertEqual(0, poly.lines[3].start.y)
self.assertEqual(0, poly.lines[3].end.x)
self.assertEqual(1, poly.lines[3].end.y)
self.assertIsNotNone(next((vec for vec in poly.normals if vec.y == 0), None))
self.assertIsNotNone(next((vec for vec in poly.normals if vec.x == 0), None))
self.assertAlmostEqual(0.5, poly.center.x)
self.assertAlmostEqual(0.5, poly.center.y)
poly2 = polygon2.Polygon2([ (0, 1), (1, 1), (1, 0), (0, 0) ])
self.assertEqual(4, len(poly2.points))
self.assertEqual(4, len(poly2.lines))
self.assertEqual(2, len(poly2.normals))
with self.assertRaises(StopIteration):
next(i for i in range(4) if poly.points[i].x != poly2.points[i].x or poly.points[i].y != poly2.points[i].y)
def test_constructor_repeated(self):
with self.assertRaises(ValueError):
poly = polygon2.Polygon2([ (0, 1), (1, 1), (1, 0), (0, 0), (0, 1) ])
def test_constructor_two_points(self):
with self.assertRaises(ValueError):
poly = polygon2.Polygon2([ (0, 1), (1, 1) ])
def test_constructor_not_convex(self):
with self.assertRaises(ValueError):
poly = polygon2.Polygon2([ (0, 1), (0.5, 0.8), (1, 1), (1, 0), (0, 0) ])
def test_constructor_not_clockwise(self):
with self.assertRaises(ValueError):
poly = polygon2.Polygon2([ (0, 0), (1, 0), (1, 1), (0, 1) ])
def test_from_regular(self):
diamond = polygon2.Polygon2.from_regular(4, 1.414213562373095)
self.assertAlmostEqual(2, diamond.points[0].x)
self.assertAlmostEqual(1, diamond.points[0].y)
self.assertAlmostEqual(1, diamond.points[1].x)
self.assertAlmostEqual(0, diamond.points[1].y)
self.assertAlmostEqual(0, diamond.points[2].x)
self.assertAlmostEqual(1, diamond.points[2].y)
self.assertAlmostEqual(1, diamond.points[3].x)
self.assertAlmostEqual(2, diamond.points[3].y)
diamond_shifted = polygon2.Polygon2.from_regular(4, 1.414213562373095, center = vector2.Vector2(0, 0))
for i in range(4):
self.assertAlmostEqual(diamond.points[i].x, diamond_shifted.points[i].x + 1)
self.assertAlmostEqual(diamond.points[i].y, diamond_shifted.points[i].y + 1)
square = polygon2.Polygon2.from_regular(4, 1, math.pi / 4)
self.assertAlmostEqual(1, square.points[0].x)
self.assertAlmostEqual(1, square.points[0].y)
self.assertAlmostEqual(1, square.points[1].x)
self.assertAlmostEqual(0, square.points[1].y)
self.assertAlmostEqual(0, square.points[2].x)
self.assertAlmostEqual(0, square.points[2].y)
self.assertAlmostEqual(0, square.points[3].x)
self.assertAlmostEqual(1, square.points[3].y)
square2 = polygon2.Polygon2.from_regular(4, 1, start_degs = 45)
for i in range(4):
self.assertAlmostEqual(square.points[i].x, square2.points[i].x)
self.assertAlmostEqual(square.points[i].y, square2.points[i].y)
def test_from_regular_center(self):
for i in range(3, 13):
_poly = polygon2.Polygon2.from_regular(i, 1)
foundx0 = False
foundy0 = False
for p in _poly.points:
if math.isclose(p.x, 0, abs_tol=1e-07):
foundx0 = True
if foundy0:
break
if math.isclose(p.y, 0, abs_tol=1e-07):
foundy0 = True
if foundx0:
break
helpmsg = "\ni={}\nfoundx0={}, foundy0={}, center={}\nrepr={}\n\nstr={}".format(i, foundx0, foundy0, _poly.center, repr(_poly), str(_poly))
self.assertTrue(foundx0, msg=helpmsg)
self.assertTrue(foundy0, msg=helpmsg)
def test_from_rotated(self):
# isos triangle
# weighted total = (0 + 1 + 2, 0 + 1 + 1) = (3, 2)
# center = (1, 2/3)
triangle = polygon2.Polygon2([ (0, 0), (1, 1), (2, 1) ])
triangle_rot = polygon2.Polygon2.from_rotated(triangle, math.pi / 4)
# example of how to calculate:
# shift so you rotate about origin (subtract center)
# (0, 0) - (1, 2/3) = (-1, -2/3)
# rotate 45 degrees clockwise = (-1 * cos(45) - (-2/3) * sin(45), (-2/3) * cos(45) + (-1) * sin(45)) = (-0.23570226039, -1.17851130198)
# shift back (add center): (0.76429773961, -0.51184463531)
self.assertAlmostEqual(0.76429773961, triangle_rot.points[0].x, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(-0.51184463531, triangle_rot.points[0].y, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(0.76429773960, triangle_rot.points[1].x, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(0.90236892706, triangle_rot.points[1].y, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(1.47140452079, triangle_rot.points[2].x, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(1.60947570825, triangle_rot.points[2].y, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(1, triangle_rot.center.x, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
self.assertAlmostEqual(0.66666666667, triangle_rot.center.y, msg='original={}\n\nrotated={}'.format(triangle, triangle_rot))
def test_area(self):
# https://www.calculatorsoup.com/calculators/geometry-plane/polygon.php helpful for checking
poly = polygon2.Polygon2.from_regular(4, 1)
self.assertAlmostEqual(1, poly.area)
poly2 = polygon2.Polygon2.from_regular(4, 2)
self.assertAlmostEqual(4, poly2.area)
poly3 = polygon2.Polygon2.from_regular(8, 3.7)
self.assertAlmostEqual(66.1011673, poly3.area, msg=str(poly3))
poly4 = polygon2.Polygon2([ (0, 0), (1, 1), (2, 1) ])
self.assertAlmostEqual(0.5, poly4.area)
poly5 = polygon2.Polygon2([ (0, 0), (1, 1), (2, 1), (1, -0.25) ])
self.assertAlmostEqual(1.25, poly5.area)
def _proj_onto_axis_fuzzer(self, points, axis, expected):
for i in range(3):
offset = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
new_points = []
for pt in points:
new_points.append(pt - offset)
new_poly = polygon2.Polygon2(new_points)
proj = polygon2.Polygon2.project_onto_axis(new_poly, offset, axis)
help_msg = "points={}, axis={}, expected={} proj={} [offset = {}, new_points={}]".format(points, axis, expected, proj, offset, new_points)
self.assertAlmostEqual(expected.min, proj.min, help_msg)
self.assertAlmostEqual(expected.max, proj.max, help_msg)
def test_project_onto_axis(self):
poly = polygon2.Polygon2.from_regular(4, 1, math.pi / 4)
_axis = vector2.Vector2(0, 1)
self._proj_onto_axis_fuzzer(poly.points, _axis, axisall.AxisAlignedLine(_axis, 0, 1))
_axis2 = vector2.Vector2(1, 0)
self._proj_onto_axis_fuzzer(poly.points, _axis2, axisall.AxisAlignedLine(_axis2, 0, 1))
_axis3 = vector2.Vector2(0.70710678118, 0.70710678118)
self._proj_onto_axis_fuzzer(poly.points, _axis3, axisall.AxisAlignedLine(_axis3, 0, 1.41421356236))
def _contains_point_fuzzer(self, points, point, expected_edge, expected_contains):
for i in range(3):
offset = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
new_points = []
for pt in points:
new_points.append(pt - offset)
new_poly = polygon2.Polygon2(new_points)
edge, cont = polygon2.Polygon2.contains_point(new_poly, offset, point)
help_msg = "points={}, point={}, offset={}, expected_edge={}, expected_contains={}, edge={}, contains={}".format(points, point, repr(offset), expected_edge, expected_contains, edge, cont)
self.assertEqual(expected_edge, edge, msg=help_msg)
self.assertEqual(expected_contains, cont, msg=help_msg)
def test_contains_point_regressions(self):
# the fuzzer actually caught an error. put them in here to ensure they don't
# come back. The first issue was math.isclose without abs_tol on values close
# to 0 is too strict
poly = polygon2.Polygon2([ (2, 3), (3, 5), (5, 4), (3, 2) ])
regression_tests = [ (poly.points, vector2.Vector2(4, 3), True, False, vector2.Vector2(-509.47088031477625, 57.99699262312129)) ]
for regression in regression_tests:
points = regression[0]
point = regression[1]
expected_edge = regression[2]
expected_contains = regression[3]
offset = regression[4]
new_points = []
for pt in points:
new_points.append(pt - offset)
new_poly = polygon2.Polygon2(new_points)
edge, cont = polygon2.Polygon2.contains_point(new_poly, offset, point)
help_msg = "regression failed.\n\npoints={}, point={}, offset={}, expected_edge={}, expected_contains={}, edge={}, contains={}".format(points, point, offset, expected_edge, expected_contains, edge, cont)
self.assertEqual(expected_edge, edge, msg=help_msg)
self.assertEqual(expected_contains, cont, msg=help_msg)
def test_contains_point_false(self):
poly = polygon2.Polygon2([ (1, 1), (2, 3), (4, 0) ])
self._contains_point_fuzzer(poly.points, vector2.Vector2(1, 2), False, False)
self._contains_point_fuzzer(poly.points, vector2.Vector2(4, 2), False, False)
self._contains_point_fuzzer(poly.points, vector2.Vector2(3, 0), False, False)
def test_contains_point_edge(self):
poly = polygon2.Polygon2([ (2, 3), (3, 5), (5, 4), (3, 2) ])
self._contains_point_fuzzer(poly.points, vector2.Vector2(4, 3), True, False)
self._contains_point_fuzzer(poly.points, vector2.Vector2(2.5, 2.5), True, False)
self._contains_point_fuzzer(poly.points, vector2.Vector2(4, 4.5), True, False)
def test_contains_point_contained(self):
poly = polygon2.Polygon2([ (-3, -6), (-2, -3), (2, -2), (0, -5) ])
self._contains_point_fuzzer(poly.points, vector2.Vector2(-1, -4), False, True)
self._contains_point_fuzzer(poly.points, vector2.Vector2(-1, -5), False, True)
self._contains_point_fuzzer(poly.points, vector2.Vector2(1, -3), False, True)
def _find_intersection_fuzzer(self, points1, points2, exp_touching, exp_overlap, exp_mtv):
if type(points1) != list:
points1 = points1.points
if type(points2) != list:
points2 = points2.points
for i in range(3):
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
new_points1 = []
for pt in points1:
new_points1.append(pt - offset1)
new_points2 = []
for pt in points2:
new_points2.append(pt - offset2)
new_poly1 = polygon2.Polygon2(new_points1)
new_poly2 = polygon2.Polygon2(new_points2)
touch, overlap, mtv = polygon2.Polygon2.find_intersection(new_poly1, new_poly2, offset1, offset2, True)
_invtouch, _invoverlap, _invmtv = polygon2.Polygon2.find_intersection(new_poly2, new_poly1, offset2, offset1, True)
help_msg = "\n\npoints1={}, points2={}, offset1={}, offset2={}\n\nexp_touching={}, " \
"exp_overlap={}, exp_mtv={}\n\ntouch={}, overlap={}, mtv={}\n\n" \
"_invtouch={}, _invoverlap={}, _invmtv={}\n\n" \
"orig_poly1={}\n\n" \
"orig_poly2={}\n\n".format(points1, points2, offset1,
offset2, exp_touching, exp_overlap, exp_mtv, touch, overlap, mtv,
_invtouch, _invoverlap, _invmtv, polygon2.Polygon2(points1),
polygon2.Polygon2(points2))
self.assertEqual(exp_touching, touch, msg=help_msg)
self.assertEqual(exp_overlap, overlap, msg=help_msg)
self.assertEqual(exp_touching, _invtouch, msg=help_msg)
self.assertEqual(exp_overlap, _invoverlap, msg=help_msg)
if exp_mtv is not None:
self.assertIsNotNone(mtv, msg=help_msg)
exp_mult_x = exp_mtv[0] * exp_mtv[1].x
exp_mult_y = exp_mtv[0] * exp_mtv[1].y
mult_x = mtv[0] * mtv[1].x
mult_y = mtv[0] * mtv[1].y
self.assertAlmostEqual(exp_mult_x, mult_x, msg=help_msg)
self.assertAlmostEqual(exp_mult_y, mult_y, msg=help_msg)
self.assertIsNotNone(_invmtv, msg=help_msg)
inv_mult_x = _invmtv[0] * _invmtv[1].x
inv_mult_y = _invmtv[0] * _invmtv[1].y
self.assertAlmostEqual(-exp_mult_x, inv_mult_x, msg=help_msg)
self.assertAlmostEqual(-exp_mult_y, inv_mult_y, msg=help_msg)
else:
self.assertIsNone(mtv, msg=help_msg)
self.assertIsNone(_invmtv, msg=help_msg)
_touch, _overlap, _mtv = polygon2.Polygon2.find_intersection(new_poly1, new_poly2, offset1, offset2, False)
self.assertEqual(exp_touching, _touch, msg=help_msg)
self.assertEqual(exp_overlap, _overlap, msg=help_msg)
self.assertIsNone(_mtv, msg=help_msg)
def test_find_intersection_false(self):
poly1 = polygon2.Polygon2([ (0, 1), (0, 3), (5, 3), (5, 1) ])
poly2 = polygon2.Polygon2([ (3, 4), (2, 6), (7, 5) ])
poly3 = polygon2.Polygon2([ (6, 2), (9, 3), (9, 1) ])
self._find_intersection_fuzzer(poly1, poly2, False, False, None)
self._find_intersection_fuzzer(poly1, poly3, False, False, None)
self._find_intersection_fuzzer(poly2, poly3, False, False, None)
def test_find_intersection_touching(self):
poly1 = polygon2.Polygon2([ (3, 3), (3, 6), (7, 5), (5, 3) ])
poly2 = polygon2.Polygon2([ (4, 3), (8, 2), (6, -1) ])
poly3 = polygon2.Polygon2([ (5, 5.5), (1, 6.5), (3, 7), (7, 6) ])
self._find_intersection_fuzzer(poly1, poly2, True, False, None)
self._find_intersection_fuzzer(poly1, poly3, True, False, None)
def test_find_intersection_overlapping(self):
poly1 = polygon2.Polygon2([ (2, 1), (4, 3), (6, 3), (6, 1) ])
poly2 = polygon2.Polygon2([ (5, 2.5), (5, 5), (7, 5) ])
poly3 = polygon2.Polygon2([ (1, 3), (3, 3), (3, 1), (1, 1) ])
self._find_intersection_fuzzer(poly1, poly2, False, True, (0.5, vector2.Vector2(0, -1)))
self._find_intersection_fuzzer(poly1, poly3, False, True, (0.70710678118, vector2.Vector2(0.70710678118, -0.70710678118)))
|
TestPolygon
|
python
|
pytorch__pytorch
|
test/inductor/test_quantization.py
|
{
"start": 727,
"end": 1200
}
|
class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(1, 64)
self.fc2 = torch.nn.Linear(64, 64)
self.fc3 = torch.nn.Linear(64, 64)
self.fc4 = torch.nn.Linear(64, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
tanh_x = torch.tanh(x)
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(tanh_x))
x = self.fc4(x)
return x
|
FeedforwardNN
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/oracle/test_types.py
|
{
"start": 2254,
"end": 9280
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = oracle.OracleDialect()
def test_no_clobs_for_string_params(self):
"""test that simple string params get a DBAPI type of
VARCHAR, not CLOB. This is to prevent setinputsizes
from setting up cx_oracle.CLOBs on
string-based bind params [ticket:793]."""
class FakeDBAPI:
def __getattr__(self, attr):
return attr
dialect = oracle.OracleDialect()
dbapi = FakeDBAPI()
b = bindparam("foo", "hello world!")
eq_(b.type.dialect_impl(dialect).get_dbapi_type(dbapi), "STRING")
b = bindparam("foo", "hello world!")
eq_(b.type.dialect_impl(dialect).get_dbapi_type(dbapi), "STRING")
def test_long(self):
self.assert_compile(oracle.LONG(), "LONG")
@testing.combinations(
(Date(), cx_oracle._CXOracleDate),
(oracle.OracleRaw(), cx_oracle._OracleRaw),
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(DATE(), cx_oracle._CXOracleDate),
(oracle.DATE(), oracle.DATE),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleUnicodeStringCHAR),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeTextCLOB),
(CHAR(), cx_oracle._OracleChar),
(NCHAR(), cx_oracle._OracleNChar),
(NVARCHAR(), cx_oracle._OracleUnicodeStringNCHAR),
(oracle.RAW(50), cx_oracle._OracleRaw),
argnames="start, test",
)
@testing.combinations(cx_oracle, oracledb, argnames="module")
def test_type_adapt(self, start, test, module):
dialect = module.dialect()
assert isinstance(
start.dialect_impl(dialect), test
), "wanted %r got %r" % (test, start.dialect_impl(dialect))
@testing.combinations(
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleUnicodeStringNCHAR),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeTextNCLOB),
(NCHAR(), cx_oracle._OracleNChar),
(NVARCHAR(), cx_oracle._OracleUnicodeStringNCHAR),
argnames="start, test",
)
@testing.combinations(cx_oracle, oracledb, argnames="module")
def test_type_adapt_nchar(self, start, test, module):
dialect = module.dialect(use_nchar_for_unicode=True)
assert isinstance(
start.dialect_impl(dialect), test
), "wanted %r got %r" % (test, start.dialect_impl(dialect))
def test_raw_compile(self):
self.assert_compile(oracle.RAW(), "RAW")
self.assert_compile(oracle.RAW(35), "RAW(35)")
def test_char_length(self):
self.assert_compile(VARCHAR(50), "VARCHAR(50 CHAR)")
oracle8dialect = oracle.dialect()
oracle8dialect.server_version_info = (8, 0)
self.assert_compile(VARCHAR(50), "VARCHAR(50)", dialect=oracle8dialect)
self.assert_compile(NVARCHAR(50), "NVARCHAR2(50)")
self.assert_compile(CHAR(50), "CHAR(50)")
@testing.combinations(
(String(50), "VARCHAR2(50 CHAR)"),
(Unicode(50), "VARCHAR2(50 CHAR)"),
(NVARCHAR(50), "NVARCHAR2(50)"),
(VARCHAR(50), "VARCHAR(50 CHAR)"),
(oracle.NVARCHAR2(50), "NVARCHAR2(50)"),
(oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"),
(String(), "VARCHAR2"),
(Unicode(), "VARCHAR2"),
(NVARCHAR(), "NVARCHAR2"),
(VARCHAR(), "VARCHAR"),
(oracle.NVARCHAR2(), "NVARCHAR2"),
(oracle.VARCHAR2(), "VARCHAR2"),
)
def test_varchar_types(self, typ, exp):
dialect = oracle.dialect()
self.assert_compile(typ, exp, dialect=dialect)
@testing.combinations(
(String(50), "VARCHAR2(50 CHAR)"),
(Unicode(50), "NVARCHAR2(50)"),
(NVARCHAR(50), "NVARCHAR2(50)"),
(VARCHAR(50), "VARCHAR(50 CHAR)"),
(oracle.NVARCHAR2(50), "NVARCHAR2(50)"),
(oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"),
(String(), "VARCHAR2"),
(Unicode(), "NVARCHAR2"),
(NVARCHAR(), "NVARCHAR2"),
(VARCHAR(), "VARCHAR"),
(oracle.NVARCHAR2(), "NVARCHAR2"),
(oracle.VARCHAR2(), "VARCHAR2"),
)
def test_varchar_use_nchar_types(self, typ, exp):
dialect = oracle.dialect(use_nchar_for_unicode=True)
self.assert_compile(typ, exp, dialect=dialect)
@testing.combinations(
(oracle.INTERVAL(), "INTERVAL DAY TO SECOND"),
(oracle.INTERVAL(day_precision=3), "INTERVAL DAY(3) TO SECOND"),
(oracle.INTERVAL(second_precision=5), "INTERVAL DAY TO SECOND(5)"),
(
oracle.INTERVAL(day_precision=2, second_precision=5),
"INTERVAL DAY(2) TO SECOND(5)",
),
(
sqltypes.Interval(day_precision=9, second_precision=3),
"INTERVAL DAY(9) TO SECOND(3)",
),
)
def test_interval(self, type_, expected):
self.assert_compile(type_, expected)
def test_interval_coercion_literal(self):
expr = column("bar", oracle.INTERVAL) == datetime.timedelta(days=1)
eq_(expr.right.type._type_affinity, sqltypes.Interval)
@testing.combinations(
("sa", sqltypes.Float(), "FLOAT"),
("sa", sqltypes.Double(), "DOUBLE PRECISION"),
("sa", sqltypes.FLOAT(), "FLOAT"),
("sa", sqltypes.REAL(), "REAL"),
("sa", sqltypes.DOUBLE(), "DOUBLE"),
("sa", sqltypes.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("oracle", oracle.FLOAT(), "FLOAT"),
("oracle", oracle.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("oracle", oracle.REAL(), "REAL"),
("oracle", oracle.BINARY_DOUBLE(), "BINARY_DOUBLE"),
("oracle", oracle.BINARY_FLOAT(), "BINARY_FLOAT"),
id_="ira",
)
def test_float_type_compile(self, type_, sql_text):
self.assert_compile(type_, sql_text)
@testing.combinations(
(
text("select :parameter from dual").bindparams(
parameter=datetime.timedelta(days=2)
),
"select NUMTODSINTERVAL(172800.0, 'SECOND') from dual",
),
(
text("SELECT :parameter from dual").bindparams(
parameter=datetime.timedelta(days=1, minutes=3, seconds=4)
),
"SELECT NUMTODSINTERVAL(86584.0, 'SECOND') from dual",
),
(
text("select :parameter - :parameter2 from dual").bindparams(
parameter=datetime.timedelta(days=1, minutes=3, seconds=4),
parameter2=datetime.timedelta(days=0, minutes=1, seconds=4),
),
(
"select NUMTODSINTERVAL(86584.0, 'SECOND') - "
"NUMTODSINTERVAL(64.0, 'SECOND') from dual"
),
),
)
def test_interval_literal_processor(self, type_, expected):
self.assert_compile(type_, expected, literal_binds=True)
|
DialectTypesTest
|
python
|
huggingface__transformers
|
tests/models/sam2/test_processor_sam2.py
|
{
"start": 1034,
"end": 4595
}
|
class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Sam2Processor
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = torch.randint(0, 256, size=(1, 3, 30, 400), dtype=torch.uint8)
# image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = torch.randint(0, 256, size=(1, 30, 400), dtype=torch.uint8)
# mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
def test_image_processor_no_masks(self):
image_processor = self.get_component("image_processor")
processor = self.get_processor()
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input)
input_processor = processor(images=image_input)
for key in input_feat_extract.keys():
if key == "pixel_values":
for input_feat_extract_item, input_processor_item in zip(
input_feat_extract[key], input_processor[key]
):
np.testing.assert_array_equal(input_feat_extract_item, input_processor_item)
else:
self.assertEqual(input_feat_extract[key], input_processor[key])
for image in input_feat_extract.pixel_values:
self.assertEqual(image.shape, (3, 1024, 1024))
for original_size in input_feat_extract.original_sizes:
np.testing.assert_array_equal(original_size, np.array([30, 400]))
def test_image_processor_with_masks(self):
image_processor = self.get_component("image_processor")
processor = self.get_processor()
image_input = self.prepare_image_inputs()
mask_input = self.prepare_mask_inputs()
input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
for label in input_feat_extract.labels:
self.assertEqual(label.shape, (256, 256))
@require_torch
def test_post_process_masks(self):
processor = self.get_processor()
dummy_masks = [torch.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
masks = processor.post_process_masks(dummy_masks, original_sizes)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(dummy_masks, torch.tensor(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(TypeError):
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
|
Sam2ProcessorTest
|
python
|
sphinx-doc__sphinx
|
tests/test_ext_napoleon/test_ext_napoleon.py
|
{
"start": 2590,
"end": 3888
}
|
class ____:
def test_unknown_app_type(self) -> None:
setup(object()) # type: ignore[arg-type]
def test_add_config_values(self) -> None:
app = mock.Mock(Sphinx)
setup(app)
for name, _default, _rebuild, _types in Config._config_values:
has_config = False
for method_name, args, _kwargs in app.method_calls:
if method_name == 'add_config_value' and args[0] == name:
has_config = True
if not has_config:
pytest.fail('Config value was not added to app %s' % name)
has_process_docstring = False
has_skip_member = False
for method_name, args, _kwargs in app.method_calls:
if method_name == 'connect':
if (
args[0] == 'autodoc-process-docstring'
and args[1] == _process_docstring
):
has_process_docstring = True
elif args[0] == 'autodoc-skip-member' and args[1] == _skip_member:
has_skip_member = True
if not has_process_docstring:
pytest.fail('autodoc-process-docstring never connected')
if not has_skip_member:
pytest.fail('autodoc-skip-member never connected')
|
TestSetup
|
python
|
cython__cython
|
Cython/Compiler/PyrexTypes.py
|
{
"start": 81307,
"end": 84221
}
|
class ____(CIntLike, CNumericType):
is_int = 1
typedef_flag = 0
exception_value = -1
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
return "PyLong_FromLong"
# PyLong_From[Unsigned]Long[Long]
SignWord = "" if self.signed else "Unsigned"
TypeName = "Long"
if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
TypeName = "LongLong"
return f"PyLong_From{SignWord}{TypeName}"
def assignable_from_resolved_type(self, src_type):
return src_type.is_int or src_type.is_enum or src_type is error_type
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.empty_declaration_code()
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load_cached(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
if type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': name}))
elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': name}))
elif self.rank <= 1:
# sizeof(short) < sizeof(int)
return "__Pyx_%s_%s_no_overflow" % (binop, name)
else:
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load_cached(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load_cached(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def _load_overflow_base(env):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
for type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': type.replace(' ', '_')}))
for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': type.replace(' ', '_')}))
|
CIntType
|
python
|
django-mptt__django-mptt
|
tests/myapp/tests.py
|
{
"start": 17607,
"end": 17665
}
|
class ____(TreeTestCase):
pass
|
IntraTreeMovementTestCase
|
python
|
ansible__ansible
|
test/units/playbook/test_base.py
|
{
"start": 8992,
"end": 9343
}
|
class ____(base.Base):
test_attr_parent_string = FieldAttribute(isa='string', default='A string attr for a class that may be a parent for testing')
def __init__(self):
super(ExampleParentBaseSubClass, self).__init__()
self._dep_chain = None
def get_dep_chain(self):
return self._dep_chain
|
ExampleParentBaseSubClass
|
python
|
pytorch__pytorch
|
torch/_inductor/cpu_vec_isa.py
|
{
"start": 12073,
"end": 16673
}
|
class ____(VecISA):
_bit_width = 0
_macro = [""]
_arch_flags = ""
_dtype_nelements = {}
def __str__(self) -> str:
return "INVALID_VEC_ISA"
def __bool__(self) -> bool: # type: ignore[override]
return False
__hash__: Callable[[VecISA], Any] = VecISA.__hash__ # type: ignore[assignment]
def x86_isa_checker() -> list[str]:
supported_isa: list[str] = []
def _check_and_append_supported_isa(
dest: list[str], isa_supported: bool, isa_name: str
) -> None:
if isa_supported:
dest.append(isa_name)
Arch = platform.machine()
"""
Arch value is x86_64 on Linux, and the value is AMD64 on Windows.
"""
if Arch != "x86_64" and Arch != "AMD64":
return supported_isa
avx2 = torch.cpu._is_avx2_supported()
avx512 = torch.cpu._is_avx512_supported()
amx_tile = torch.cpu._is_amx_tile_supported()
_check_and_append_supported_isa(supported_isa, avx2, "avx2")
_check_and_append_supported_isa(supported_isa, avx512, "avx512")
_check_and_append_supported_isa(supported_isa, amx_tile, "amx_tile")
return supported_isa
invalid_vec_isa = InvalidVecISA()
supported_vec_isa_list = [
VecAMX(),
VecAVX512(),
VecAVX2(),
VecNEON(),
VecSVE256(),
]
def get_isa_from_cpu_capability(
capability: Union[str, None],
vec_isa_list: list[VecISA],
invalid_vec_isa: InvalidVecISA,
):
# AMX setting is not supported in eager
# VecAMX will be prioritized for selection when setting ATEN_CPU_CAPABILITY to avx512
# TODO add sve256 support
capability_to_isa_str = {
"default": "INVALID_VEC_ISA",
"zvector": "zvector",
"vsx": "vsx",
"avx2": "avx2",
"avx512": "avx512",
}
if capability in capability_to_isa_str:
# pyrefly: ignore [index-error]
isa_str = capability_to_isa_str[capability]
if isa_str == "INVALID_VEC_ISA":
return invalid_vec_isa
for vec_isa in vec_isa_list:
if isa_str in str(vec_isa):
return vec_isa
if capability:
warnings.warn(f"ignoring invalid value for ATEN_CPU_CAPABILITY {capability}")
return vec_isa_list[0]
# Cache the cpuinfo to avoid I/O overhead. Meanwhile, the cpuinfo content
# might have too much redundant content that is useless for ISA check. Hence,
# we only cache some key isa information.
@functools.cache
def valid_vec_isa_list() -> list[VecISA]:
isa_list: list[VecISA] = []
if sys.platform == "darwin" and platform.processor() == "arm":
isa_list.append(VecNEON())
if sys.platform not in ["linux", "win32"]:
return isa_list
arch = platform.machine()
if arch == "s390x":
with open("/proc/cpuinfo") as _cpu_info:
while True:
line = _cpu_info.readline()
if not line:
break
# process line
featuresmatch = re.match(r"^features\s*:\s*(.*)$", line)
if featuresmatch:
for group in featuresmatch.groups():
if re.search(r"[\^ ]+vxe[\$ ]+", group):
isa_list.append(VecZVECTOR())
break
elif arch == "ppc64le":
isa_list.append(VecVSX())
elif arch == "aarch64":
if torch.backends.cpu.get_cpu_capability() == "SVE256":
isa_list.append(VecSVE256())
else:
isa_list.append(VecNEON())
elif arch in ["x86_64", "AMD64"]:
"""
arch value is x86_64 on Linux, and the value is AMD64 on Windows.
"""
_cpu_supported_x86_isa = x86_isa_checker()
isa_list.extend(
isa
for isa in supported_vec_isa_list
if all(flag in _cpu_supported_x86_isa for flag in str(isa).split()) and isa
)
return isa_list
def pick_vec_isa() -> VecISA:
if config.is_fbcode() and (platform.machine() in ["x86_64", "AMD64"]):
return VecAVX2()
_valid_vec_isa_list: list[VecISA] = valid_vec_isa_list()
if not _valid_vec_isa_list:
return invalid_vec_isa
# If the simdlen is None, set simdlen based on the environment ATEN_CPU_CAPABILITY
# to control CPU vec ISA
if config.cpp.simdlen is None:
return get_isa_from_cpu_capability(
os.getenv("ATEN_CPU_CAPABILITY"), _valid_vec_isa_list, invalid_vec_isa
)
for isa in _valid_vec_isa_list:
if config.cpp.simdlen == isa.bit_width():
return isa
return invalid_vec_isa
|
InvalidVecISA
|
python
|
tiangolo__fastapi
|
tests/test_security_openid_connect_optional.py
|
{
"start": 298,
"end": 2488
}
|
class ____(BaseModel):
username: str
def get_current_user(oauth_header: Optional[str] = Security(oid)):
if oauth_header is None:
return None
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: Optional[User] = Depends(get_current_user)):
if current_user is None:
return {"msg": "Create an account first"}
return current_user
client = TestClient(app)
def test_security_oauth2():
response = client.get("/users/me", headers={"Authorization": "Bearer footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Bearer footokenbar"}
def test_security_oauth2_password_other_header():
response = client.get("/users/me", headers={"Authorization": "Other footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Other footokenbar"}
def test_security_oauth2_password_bearer_no_header():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"OpenIdConnect": []}],
}
}
},
"components": {
"securitySchemes": {
"OpenIdConnect": {
"type": "openIdConnect",
"openIdConnectUrl": "/openid",
}
}
},
}
|
User
|
python
|
facebook__pyre-check
|
client/commands/initialize.py
|
{
"start": 817,
"end": 9241
}
|
class ____(Exception):
pass
def _create_source_directory_element(source: str) -> Union[str, Dict[str, str]]:
if source == ".":
return source
if not Path(source).is_dir():
raise InitializationException(f"No directory found at `{source}`.")
# Imports are likely relative to the parent of the package.
package_root = find_parent_directory_containing_file(Path(source), "__init__.py")
if package_root is not None:
return {
"import_root": os.path.relpath(str(package_root.parent), "."),
"source": source,
}
else:
return source
def _check_configuration_file_location(
configuration_path: Path, current_directory: Path, global_root: Optional[Path]
) -> None:
if os.path.isfile(configuration_path):
if global_root:
error = (
"Local configurations must be created in subdirectories of "
+ f"`{str(current_directory)}` as it already contains a "
+ "`.pyre_configuration`."
)
else:
error = (
"A pyre configuration already exists at "
+ f"`{str(configuration_path)}`."
)
raise InitializationException(error)
local_configuration_path = current_directory / LOCAL_CONFIGURATION_FILE
if local_configuration_path.is_file():
raise InitializationException(
"A local pyre configuration already exists at "
+ f"`{str(local_configuration_path)}`."
)
def _get_local_configuration(
current_directory: Path, buck_root: Optional[Path]
) -> Dict[str, Any]:
configuration: Dict[str, Any] = {}
using_targets = log.get_yes_no_input("Is your project built with Buck?")
if using_targets:
targets = log.get_input(
"Which buck target(s) should pyre analyze?\n"
+ " Default: Analyze all targets under the configuration (assume fbcode).\n"
+ " (Ex. `fbcode//target:a, fbsource//target/b/...`)\n"
).strip()
if len(targets) == 0:
if buck_root:
root = current_directory.relative_to(buck_root)
configuration["targets"] = [f"fbcode//{str(root)}/..."]
else:
raise InitializationException(
"No `.buckconfig` found with which to create a default target."
)
else:
configuration["targets"] = [target.strip() for target in targets.split(",")]
else:
source_directories = log.get_input(
"Which directory(ies) should pyre analyze?\n"
)
configuration["source_directories"] = [
directory.strip() for directory in source_directories.split(",")
]
# TODO(T132432706): Ask for oncall in global configuration, but not in OSS.
oncall = log.get_input("What oncall is responsible for this project?\n").strip()
if oncall:
configuration["oncall"] = oncall
return configuration
def _create_watchman_configuration() -> None:
watchman_configuration_path = os.path.abspath(".watchmanconfig")
watchman_path = shutil.which("watchman")
if watchman_path is not None and log.get_yes_no_input(
"Also initialize watchman in the current directory?"
):
try:
if not os.path.isfile(watchman_configuration_path):
with open(watchman_configuration_path, "w+") as configuration_file:
configuration_file.write("{}\n")
LOG.warning(
"Created basic `.watchmanconfig` at "
+ f"{watchman_configuration_path}"
)
subprocess.run(
[watchman_path, "watch-project", "."],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
LOG.warning("Current directory is being watched by `watchman`.")
except IsADirectoryError:
LOG.warning(f"Unable to write to {watchman_configuration_path}.")
except subprocess.CalledProcessError:
LOG.warning("Failed to run `watchman watch-project .`.")
def _get_configuration(
taint_models_directory_required: Optional[bool] = False,
) -> Dict[str, Any]:
configuration: Dict[str, Any] = {}
_create_watchman_configuration()
binary_path = shutil.which(BINARY_NAME)
if binary_path is None:
binary_path = shutil.which(
os.path.join(os.path.dirname(sys.argv[0]), BINARY_NAME)
)
if binary_path is None:
binary_path = os.path.abspath(
log.get_input(f"No `{BINARY_NAME}` found, enter the path manually: ")
)
if not os.path.isfile(binary_path):
raise InitializationException(
f"Unable to locate binary at `{binary_path}`."
)
configuration["binary"] = binary_path
else:
LOG.info(f"Binary found at `{binary_path}`")
typeshed: Optional[Path] = find_typeshed()
if typeshed is None:
typeshed = Path(
log.get_input("Unable to locate typeshed, please enter its root: ")
).resolve()
if not typeshed.is_dir():
raise InitializationException(
f"No typeshed directory found at `{typeshed}`."
)
configuration["typeshed"] = str(typeshed)
else:
LOG.info(f"Typeshed found at `{typeshed}``")
if taint_models_directory_required:
taint_models_paths = find_taint_models_directories()
if taint_models_paths is None:
taint_models_paths = [
Path(
log.get_input(
"Unable to find taint models directory, please enter its root: "
)
).resolve()
]
configuration["taint_models_path"] = [str(path) for path in taint_models_paths]
source_directory_input = log.get_optional_input(
"Which directory(ies) should pyre analyze?", "."
)
source_directory_paths = [
directory.strip() for directory in source_directory_input.split(",")
]
configuration["source_directories"] = [
_create_source_directory_element(path) for path in source_directory_paths
]
LOG.info(
"Pyre will automatically add typed pacakages installed on your system "
"as type checking dependencies, according to PEP561. You can edit the "
"configuration file if you want to change the behavior. "
)
configuration["site_package_search_strategy"] = "pep561"
return configuration
def get_configuration_and_path(
taint_models_directory_required: Optional[bool] = False,
) -> Tuple[Dict[str, Any], Path]:
global_root: Optional[Path] = find_global_root(Path("."))
buck_root: Optional[Path] = find_parent_directory_containing_file(
Path("."), ".buckconfig"
)
current_directory: Path = Path(os.getcwd())
configuration_path = current_directory / JSON_CONFIGURATION_FILE
_check_configuration_file_location(
configuration_path, current_directory, global_root
)
local_configuration_path = current_directory / LOCAL_CONFIGURATION_FILE
if global_root:
configuration_path = local_configuration_path
configuration = _get_local_configuration(current_directory, buck_root)
else:
configuration = _get_configuration(taint_models_directory_required)
return configuration, configuration_path
def write_configuration(
configuration: Dict[str, Any], configuration_path: Path
) -> None:
with open(configuration_path, "w+") as configuration_file:
json.dump(configuration, configuration_file, sort_keys=True, indent=2)
configuration_file.write("\n")
def run() -> commands.ExitCode:
try:
configuration, configuration_path = get_configuration_and_path()
write_configuration(configuration, configuration_path)
LOG.log(
log.SUCCESS,
"Successfully initialized pyre!\n"
+ f" You can view the configuration at `{configuration_path}`.\n"
+ " You can now run the type checker with `pyre`.",
)
return commands.ExitCode.SUCCESS
except InitializationException as error:
LOG.error(f"{error}")
return commands.ExitCode.FAILURE
|
InitializationException
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/lite_flex_test.py
|
{
"start": 1864,
"end": 5472
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testFlexMode(self, enable_mlir):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[1, 4], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check the model works with TensorFlow ops.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testFlexWithAutomaticPassThrough(self):
# Create a graph that has one L2Loss op.
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[4], dtype=dtypes.float32, name='input')
out_tensor = nn_ops.l2_loss(in_tensor)
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
converter._experimental_allow_all_select_tf_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertIn('FlexL2Loss', tflite_test_util.get_ops_list(tflite_model))
def testDeprecatedFlags(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[1, 4], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])
# Ensure `target_ops` is set to the correct value after flag deprecation.
self.assertEqual(converter.target_ops, set([lite.OpsSet.SELECT_TF_OPS]))
self.assertEqual(converter.target_spec.supported_ops,
set([lite.OpsSet.SELECT_TF_OPS]))
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check the model works with TensorFlow ops.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
|
FromSessionTest
|
python
|
python-markdown__markdown
|
tests/test_syntax/extensions/test_code_hilite.py
|
{
"start": 29738,
"end": 29922
}
|
class ____(treeprocessors.Treeprocessor):
def run(self, root: etree.Element):
pre = etree.SubElement(root, 'pre')
etree.SubElement(pre, 'code')
|
_AddCodeTagTreeprocessor
|
python
|
chardet__chardet
|
chardet/chardistribution.py
|
{
"start": 4663,
"end": 5429
}
|
class ____(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
self._table_size = EUCTW_TABLE_SIZE
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # type: ignore[reportIncompatibleMethodOverride]
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = byte_str[0]
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
return -1
|
EUCTWDistributionAnalysis
|
python
|
huggingface__transformers
|
src/transformers/models/align/processing_align.py
|
{
"start": 975,
"end": 2358
}
|
class ____(ProcessorMixin):
r"""
Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
[`BertTokenizer`]/[`BertTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~AlignProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import AlignProcessor
from PIL import Image
model_id = "kakaobrain/align-base"
processor = AlignProcessor.from_pretrained(model_id)
processor(
images=your_pil_image,
text=["What is that?"],
images_kwargs = {"crop_size": {"height": 224, "width": 224}},
text_kwargs = {"padding": "do_not_pad"},
common_kwargs = {"return_tensors": "pt"},
)
```
Args:
image_processor ([`EfficientNetImageProcessor`]):
The image processor is a required input.
tokenizer ([`BertTokenizer`, `BertTokenizerFast`]):
The tokenizer is a required input.
"""
valid_processor_kwargs = AlignProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
__all__ = ["AlignProcessor"]
|
AlignProcessor
|
python
|
django__django
|
tests/auth_tests/test_validators.py
|
{
"start": 7648,
"end": 11907
}
|
class ____(TestCase):
def test_validate(self):
user = User.objects.create_user(
username="testclient",
password="password",
email="testclient@example.com",
first_name="Test",
last_name="Client",
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate("testclient"))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate("testclient", user=user)
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, "password_too_similar")
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate("example.com", user=user)
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=["first_name"],
max_similarity=0.3,
).validate("testclient", user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# max_similarity=1 doesn't allow passwords that are identical to the
# attribute's value.
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=["first_name"],
max_similarity=1,
).validate(user.first_name, user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# Very low max_similarity is rejected.
msg = "max_similarity must be at least 0.1"
with self.assertRaisesMessage(ValueError, msg):
UserAttributeSimilarityValidator(max_similarity=0.09)
# Passes validation.
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=["first_name"]).validate(
"testclient", user=user
)
)
@isolate_apps("auth_tests")
def test_validate_property(self):
class TestUser(models.Model):
pass
@property
def username(self):
return "foobar"
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate("foobar", user=TestUser())
self.assertEqual(
cm.exception.messages, ["The password is too similar to the username."]
)
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can’t be too similar to your other personal information.",
)
def test_custom_error(self):
class CustomUserAttributeSimilarityValidator(UserAttributeSimilarityValidator):
def get_error_message(self):
return "The password is too close to the %(verbose_name)s."
user = User.objects.create_user(
username="testclient",
password="password",
email="testclient@example.com",
first_name="Test",
last_name="Client",
)
expected_error = "The password is too close to the %s."
with self.assertRaisesMessage(ValidationError, expected_error % "username"):
CustomUserAttributeSimilarityValidator().validate("testclient", user=user)
def test_custom_error_verbose_name_not_used(self):
class CustomUserAttributeSimilarityValidator(UserAttributeSimilarityValidator):
def get_error_message(self):
return "The password is too close to a user attribute."
user = User.objects.create_user(
username="testclient",
password="password",
email="testclient@example.com",
first_name="Test",
last_name="Client",
)
expected_error = "The password is too close to a user attribute."
with self.assertRaisesMessage(ValidationError, expected_error):
CustomUserAttributeSimilarityValidator().validate("testclient", user=user)
|
UserAttributeSimilarityValidatorTest
|
python
|
ApeWorX__ape
|
src/ape/api/address.py
|
{
"start": 9569,
"end": 10215
}
|
class ____(BaseAddress):
"""
A generic blockchain address.
Typically, this is used when we do not know the contract type at a given address,
or to refer to an EOA the user doesn't personally control.
"""
def __init__(self, address: AddressType):
self._address = address
@property
def address(self) -> AddressType:
"""
The raw address type.
Returns:
:class:`~ape.types.address.AddressType`: An alias to
`ChecksumAddress <https://eth-typing.readthedocs.io/en/latest/types.html#checksumaddress>`__. # noqa: E501
"""
return self._address
|
Address
|
python
|
dask__distributed
|
distributed/objects.py
|
{
"start": 496,
"end": 682
}
|
class ____(dict):
"""A dictionary of all keys and which workers have that key."""
def _repr_html_(self):
return get_template("who_has.html.j2").render(who_has=self)
|
WhoHas
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_grpc/impl.py
|
{
"start": 3260,
"end": 21868
}
|
class ____:
"""Sentinel passed over multiprocessing Queue when launch is successful in subprocess."""
def _report_run_failed_if_not_finished(
instance: DagsterInstance, run_id: str
) -> Generator[DagsterEvent, None, None]:
check.inst_param(instance, "instance", DagsterInstance)
dagster_run = instance.get_run_by_id(run_id)
if dagster_run and (not dagster_run.is_finished):
yield instance.report_run_failed(dagster_run)
def core_execute_run(
recon_job: ReconstructableJob,
dagster_run: DagsterRun,
instance: DagsterInstance,
inject_env_vars: bool,
resume_from_failure: bool = False,
) -> Generator[DagsterEvent, None, None]:
check.inst_param(recon_job, "recon_job", ReconstructableJob)
check.inst_param(dagster_run, "dagster_run", DagsterRun)
check.inst_param(instance, "instance", DagsterInstance)
if inject_env_vars:
try:
location_name = (
dagster_run.remote_job_origin.location_name
if dagster_run.remote_job_origin
else None
)
instance.inject_env_vars(location_name)
except Exception:
yield instance.report_engine_event(
"Error while loading environment variables.",
dagster_run,
EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())),
)
yield from _report_run_failed_if_not_finished(instance, dagster_run.run_id)
raise
# try to load the pipeline definition early
try:
# add in cached metadata to load repository more efficiently
if dagster_run.has_repository_load_data:
execution_plan_snapshot = instance.get_execution_plan_snapshot(
check.not_none(dagster_run.execution_plan_snapshot_id)
)
recon_job = recon_job.with_repository_load_data(
execution_plan_snapshot.repository_load_data,
)
recon_job.get_definition()
except Exception:
yield instance.report_engine_event(
"Could not load job definition.",
dagster_run,
EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())),
)
yield from _report_run_failed_if_not_finished(instance, dagster_run.run_id)
raise
# Reload the run to verify that its status didn't change while the pipeline was loaded
dagster_run = check.not_none(
instance.get_run_by_id(dagster_run.run_id),
f"Job run with id '{dagster_run.run_id}' was deleted after the run worker started.",
)
try:
yield from execute_run_iterator(
recon_job, dagster_run, instance, resume_from_failure=resume_from_failure
)
except (KeyboardInterrupt, DagsterExecutionInterruptedError):
yield from _report_run_failed_if_not_finished(instance, dagster_run.run_id)
yield instance.report_engine_event(
message="Run execution terminated by interrupt",
dagster_run=dagster_run,
)
raise
except Exception:
yield instance.report_engine_event(
"An exception was thrown during execution that is likely a framework error, "
"rather than an error in user code.",
dagster_run,
EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())),
)
yield from _report_run_failed_if_not_finished(instance, dagster_run.run_id)
raise
@contextmanager
def _instance_from_ref_for_dynamic_partitions(
instance_ref: Optional[InstanceRef], partitions_def: PartitionsDefinition
) -> Iterator[Optional[DagsterInstance]]:
# Certain gRPC servers do not have access to the instance, so we only attempt to instantiate
# the instance when necessary for dynamic partitions: https://github.com/dagster-io/dagster/issues/12440
with (
DagsterInstance.from_ref(instance_ref)
if (instance_ref and (_partitions_def_contains_dynamic_partitions_def(partitions_def)))
else nullcontext()
) as instance:
yield instance
def _run_in_subprocess(
serialized_execute_run_args: str,
recon_pipeline: ReconstructableJob,
termination_event: Any,
subprocess_status_handler,
run_event_handler,
) -> None:
done_event = threading.Event()
start_termination_thread(termination_event, done_event)
exit_stack = ExitStack()
try:
execute_run_args = deserialize_value(serialized_execute_run_args, ExecuteExternalJobArgs)
instance_ref = check.not_none(execute_run_args.instance_ref)
instance = exit_stack.enter_context(DagsterInstance.from_ref(instance_ref))
dagster_run = instance.get_run_by_id(execute_run_args.run_id)
if not dagster_run:
raise DagsterRunNotFoundError(
f"gRPC server could not load run {execute_run_args.run_id} in order to execute it. Make sure that"
" the gRPC server has access to your run storage.",
invalid_run_id=execute_run_args.run_id,
)
pid = os.getpid()
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
event = IPCErrorMessage(
serializable_error_info=serializable_error_info,
message=f"Error during RPC setup for executing run: {serializable_error_info.message}",
)
subprocess_status_handler(event)
subprocess_status_handler(RunInSubprocessComplete())
exit_stack.close()
# set events to stop the termination thread on exit
done_event.set()
termination_event.set()
return
subprocess_status_handler(StartRunInSubprocessSuccessful())
run_event_handler(
instance.report_engine_event(
f"Started process for run (pid: {pid}).",
dagster_run,
EngineEventData.in_process(pid),
)
)
# This is so nasty but seemingly unavoidable
# https://amir.rachum.com/blog/2017/03/03/generator-cleanup/
closed = False
try:
for event in core_execute_run(recon_pipeline, dagster_run, instance, inject_env_vars=False):
run_event_handler(event)
except GeneratorExit:
closed = True
raise
except:
# Relies on core_execute_run logging all exceptions to the event log before raising
pass
finally:
if not closed:
run_event_handler(
instance.report_engine_event(
f"Process for run exited (pid: {pid}).",
dagster_run,
)
)
subprocess_status_handler(RunInSubprocessComplete())
exit_stack.close()
# set events to stop the termination thread on exit
done_event.set()
termination_event.set()
def start_run_in_subprocess(
serialized_execute_run_args, recon_pipeline, event_queue, termination_event
):
with capture_interrupts():
_run_in_subprocess(
serialized_execute_run_args,
recon_pipeline,
termination_event,
subprocess_status_handler=event_queue.put,
run_event_handler=lambda x: None,
)
def get_external_pipeline_subset_result(
repo_def: RepositoryDefinition,
recon_repo: ReconstructableRepository,
job_name: str,
op_selection: Optional[Sequence[str]],
asset_selection: Optional[AbstractSet[AssetKey]],
asset_check_selection: Optional[AbstractSet[AssetCheckKey]],
include_parent_snapshot: bool,
):
try:
definition = repo_def.get_maybe_subset_job_def(
job_name,
op_selection=op_selection,
asset_selection=asset_selection,
asset_check_selection=asset_check_selection,
)
job_data_snap = JobDataSnap.from_job_def(
definition, include_parent_snapshot=include_parent_snapshot
)
return RemoteJobSubsetResult(
success=True,
job_data_snap=job_data_snap,
repository_python_origin=recon_repo.get_python_origin(),
)
except Exception:
return RemoteJobSubsetResult(
success=False, error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_external_schedule_execution(
repo_def: RepositoryDefinition,
instance_ref: Optional[InstanceRef],
schedule_name: str,
scheduled_execution_timestamp: Optional[float],
scheduled_execution_timezone: Optional[str],
log_key: Optional[Sequence[str]],
) -> Union["ScheduleExecutionData", ScheduleExecutionErrorSnap]:
from dagster._core.execution.resources_init import get_transitive_required_resource_keys
try:
schedule_def = repo_def.get_schedule_def(schedule_name)
scheduled_execution_time = (
datetime_from_timestamp(
scheduled_execution_timestamp,
tz=scheduled_execution_timezone or "UTC",
)
if scheduled_execution_timestamp
else None
)
required_resource_keys = get_transitive_required_resource_keys(
schedule_def.required_resource_keys, repo_def.get_top_level_resources()
)
resources_to_build = {
k: v
for k, v in repo_def.get_top_level_resources().items()
if k in required_resource_keys
}
with ScheduleEvaluationContext(
instance_ref,
scheduled_execution_time,
log_key,
repo_def.name,
schedule_name,
resources=resources_to_build,
repository_def=repo_def,
) as schedule_context:
with user_code_error_boundary(
ScheduleExecutionError,
lambda: (
f"Error occurred during the execution function for schedule {schedule_def.name}"
),
):
return schedule_def.evaluate_tick(schedule_context)
except Exception:
return ScheduleExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_external_sensor_execution(
repo_def: RepositoryDefinition,
code_location_origin: CodeLocationOrigin,
instance_ref: Optional[InstanceRef],
sensor_name: str,
last_tick_completion_timestamp: Optional[float],
last_run_key: Optional[str],
cursor: Optional[str],
log_key: Optional[Sequence[str]],
last_sensor_start_timestamp: Optional[float],
) -> Union["SensorExecutionData", SensorExecutionErrorSnap]:
from dagster._core.execution.resources_init import get_transitive_required_resource_keys
try:
sensor_def = repo_def.get_sensor_def(sensor_name)
required_resource_keys = get_transitive_required_resource_keys(
sensor_def.required_resource_keys, repo_def.get_top_level_resources()
)
resources_to_build = {
k: v
for k, v in repo_def.get_top_level_resources().items()
if k in required_resource_keys
}
with SensorEvaluationContext(
instance_ref,
last_tick_completion_time=last_tick_completion_timestamp,
last_run_key=last_run_key,
cursor=cursor,
log_key=log_key,
repository_name=repo_def.name,
repository_def=repo_def,
sensor_name=sensor_name,
resources=resources_to_build,
last_sensor_start_time=last_sensor_start_timestamp,
code_location_origin=code_location_origin,
) as sensor_context:
with user_code_error_boundary(
SensorExecutionError,
lambda: (
f"Error occurred during the execution of evaluation_fn for sensor {sensor_def.name}"
),
):
return sensor_def.evaluate_tick(sensor_context)
except Exception:
return SensorExecutionErrorSnap(error=serializable_error_info_from_exc_info(sys.exc_info()))
def _partitions_def_contains_dynamic_partitions_def(partitions_def: PartitionsDefinition) -> bool:
if isinstance(partitions_def, DynamicPartitionsDefinition):
return True
if isinstance(partitions_def, MultiPartitionsDefinition):
return any(
_partitions_def_contains_dynamic_partitions_def(dimension.partitions_def)
for dimension in partitions_def.partitions_defs
)
return False
def _get_job_partitions_and_config_for_partition_set_name(
repo_def: RepositoryDefinition,
partition_set_name: str,
) -> tuple[JobDefinition, PartitionsDefinition, PartitionedConfig]:
job_name = job_name_for_partition_set_snap_name(partition_set_name)
job_def = repo_def.get_job(job_name)
assert job_def.partitions_def and job_def.partitioned_config, (
f"Job {job_def.name} corresponding to external partition set {partition_set_name} does not"
" have a partitions_def"
)
return job_def, job_def.partitions_def, job_def.partitioned_config
def get_partition_config(
repo_def: RepositoryDefinition,
job_name: str,
partition_key: str,
instance_ref: Optional[InstanceRef] = None,
) -> Union[PartitionConfigSnap, PartitionExecutionErrorSnap]:
try:
job_def = repo_def.get_job(job_name)
with user_code_error_boundary(
PartitionExecutionError,
lambda: (
"Error occurred during the evaluation of the `run_config_for_partition`"
f" function for job {job_name}"
),
):
run_config = job_def.get_run_config_for_partition_key(partition_key)
return PartitionConfigSnap(name=partition_key, run_config=run_config)
except Exception:
return PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_names(
repo_def: RepositoryDefinition, job_name: str
) -> Union[PartitionNamesSnap, PartitionExecutionErrorSnap]:
try:
job_def = repo_def.get_job(job_name)
with user_code_error_boundary(
PartitionExecutionError,
lambda: (
"Error occurred during the execution of the partition generation function for"
f" partitioned config on job '{job_def.name}'"
),
):
return PartitionNamesSnap(
partition_names=job_def.get_partition_keys(selected_asset_keys=None)
)
except Exception:
return PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_tags(
repo_def: RepositoryDefinition,
job_name: str,
partition_name: str,
instance_ref: Optional[InstanceRef] = None,
) -> Union[PartitionTagsSnap, PartitionExecutionErrorSnap]:
try:
job_def = repo_def.get_job(job_name)
with user_code_error_boundary(
PartitionExecutionError,
lambda: (
"Error occurred during the evaluation of the `tags_for_partition` function for"
f" partitioned config on job '{job_def.name}'"
),
):
tags = job_def.get_tags_for_partition_key(partition_name, selected_asset_keys=None)
return PartitionTagsSnap(name=partition_name, tags=tags)
except Exception:
return PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_external_execution_plan_snapshot(
repo_def: RepositoryDefinition,
job_name: str,
args: ExecutionPlanSnapshotArgs,
):
job_def = repo_def.get_maybe_subset_job_def(
job_name,
op_selection=args.op_selection,
asset_selection=args.asset_selection,
asset_check_selection=args.asset_check_selection,
)
return snapshot_from_execution_plan(
create_execution_plan(
job_def,
run_config=args.run_config,
step_keys_to_execute=args.step_keys_to_execute,
known_state=args.known_state,
instance_ref=args.instance_ref,
repository_load_data=repo_def.repository_load_data,
),
args.job_snapshot_id,
)
def get_partition_set_execution_param_data(
repo_def: RepositoryDefinition,
partition_set_name: str,
partition_names: Sequence[str],
instance_ref: Optional[InstanceRef] = None,
) -> Union[PartitionSetExecutionParamSnap, PartitionExecutionErrorSnap]:
(
job_def,
partitions_def,
partitioned_config,
) = _get_job_partitions_and_config_for_partition_set_name(repo_def, partition_set_name)
try:
partition_data = []
for key in partition_names:
def _error_message_fn(partition_name: str):
return lambda: (
"Error occurred during the partition config and tag generation for"
f" '{partition_name}' in partitioned config on job '{job_def.name}'"
)
with user_code_error_boundary(PartitionExecutionError, _error_message_fn(key)):
run_config = partitioned_config.get_run_config_for_partition_key(key)
tags = partitioned_config.get_tags_for_partition_key(key, job_name=job_def.name)
partition_data.append(
PartitionExecutionParamSnap(
name=key,
tags=tags,
run_config=run_config,
)
)
return PartitionSetExecutionParamSnap(partition_data=partition_data)
except Exception:
return PartitionExecutionErrorSnap(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_notebook_data(notebook_path):
check.str_param(notebook_path, "notebook_path")
if not notebook_path.endswith(".ipynb"):
raise Exception(
"unexpected file extension for notebooks. Please provide a path that ends with"
" '.ipynb'."
)
requested_path = os.path.abspath(notebook_path)
working_dir = os.path.abspath(os.getcwd())
common_prefix = os.path.commonpath([requested_path, working_dir])
if common_prefix != working_dir:
raise Exception(
"Access denied. Notebook path must be within the current working directory."
)
with open(requested_path, "rb") as f:
content = f.read()
return content
|
StartRunInSubprocessSuccessful
|
python
|
dagster-io__dagster
|
python_modules/dagster-test/dagster_test/components/simple_pipes_script_asset.py
|
{
"start": 854,
"end": 939
}
|
class ____(Model):
asset_key: str
filename: str
|
SimplePipesScriptComponentModel
|
python
|
pytorch__pytorch
|
test/dynamo/test_functions.py
|
{
"start": 81134,
"end": 81737
}
|
class ____(torch.nn.Module):
def forward(self, L_lambda0_keywords_y_: "f32[2, 2]"):
l_lambda0_keywords_y_ = L_lambda0_keywords_y_
mul: "f32[2, 2]" = l_lambda0_keywords_y_ * l_lambda0_keywords_y_
mul_1: "f32[2, 2]" = l_lambda0_keywords_y_ * l_lambda0_keywords_y_; l_lambda0_keywords_y_ = None
mul_2: "f32[2, 2]" = torch.mul(mul, mul_1); mul = mul_1 = None
return (mul_2,)
""",
)
else:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
django__django
|
tests/generic_relations/tests.py
|
{
"start": 35404,
"end": 37754
}
|
class ____(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases.set([base])
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
|
ProxyRelatedModelTest
|
python
|
joke2k__faker
|
tests/providers/test_date_time.py
|
{
"start": 33739,
"end": 34095
}
|
class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("es_ES")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in EsEsProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in EsEsProvider.MONTH_NAMES.values()
|
TestEsEs
|
python
|
kamyu104__LeetCode-Solutions
|
Python/lru-cache.py
|
{
"start": 1620,
"end": 2359
}
|
class ____(object):
def __init__(self, capacity):
self.list = LinkedList()
self.dict = {}
self.capacity = capacity
def get(self, key):
if key not in self.dict:
return -1
val = self.dict[key].val
self.__update(key, val)
return val
def put(self, key, val):
if key not in self.dict and len(self.dict) == self.capacity:
del self.dict[self.list.head.key]
self.list.delete(self.list.head)
self.__update(key, val)
def __update(self, key, val):
if key in self.dict:
self.list.delete(self.dict[key])
node = ListNode(key, val)
self.list.insert(node)
self.dict[key] = node
|
LRUCache2
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/faker.py
|
{
"start": 0,
"end": 196
}
|
class ____:
"""Mock Faker class to test fake_email_creator.py.
Meant to mock https://github.com/joke2k/faker package.
"""
def email(self) -> str:
return "fake@fake.com"
|
Faker
|
python
|
pytorch__pytorch
|
test/custom_backend/backend.py
|
{
"start": 1057,
"end": 1939
}
|
class ____(torch.nn.Module):
"""
Simple model used for testing that to_backend API supports saving, loading,
and executing in C++.
"""
def forward(self, a, b):
return (a + b, a - b)
def main():
parser = argparse.ArgumentParser(description="Lower a Module to a custom backend")
parser.add_argument("--export-module-to", required=True)
options = parser.parse_args()
# Load the library containing the custom backend.
library_path = get_custom_backend_library_path()
torch.ops.load_library(library_path)
assert library_path in torch.ops.loaded_libraries
# Lower an instance of Model to the custom backend and export it
# to the specified location.
lowered_module = to_custom_backend(torch.jit.script(Model()))
torch.jit.save(lowered_module, options.export_module_to)
if __name__ == "__main__":
main()
|
Model
|
python
|
scipy__scipy
|
benchmarks/benchmarks/stats.py
|
{
"start": 25426,
"end": 25960
}
|
class ____(Benchmark):
param_names = ['d', 'radius', 'ncandidates', 'n']
params = [
[1, 3, 5],
[0.2, 0.1, 0.05],
[30, 60, 120],
[30, 100, 300]
]
def setup(self, d, radius, ncandidates, n):
self.rng = np.random.default_rng(168525179735951991038384544)
def time_poisson_disk(self, d, radius, ncandidates, n):
seq = stats.qmc.PoissonDisk(d, radius=radius, ncandidates=ncandidates,
seed=self.rng)
seq.random(n)
|
BenchPoissonDisk
|
python
|
huggingface__transformers
|
tests/models/conditional_detr/test_modeling_conditional_detr.py
|
{
"start": 21566,
"end": 25082
}
|
class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50")
if is_vision_available()
else None
)
def test_inference_no_head(self):
model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape = torch.Size((1, 300, 256))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[
[0.4223, 0.7474, 0.8760],
[0.6397, -0.2727, 0.7126],
[-0.3089, 0.7643, 0.9529],
]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
def test_inference_object_detection_head(self):
model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to(
torch_device
)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
pixel_values = encoding["pixel_values"].to(torch_device)
pixel_mask = encoding["pixel_mask"].to(torch_device)
with torch.no_grad():
outputs = model(pixel_values, pixel_mask)
# verify logits + box predictions
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_slice_logits = torch.tensor(
[
[-10.4371, -5.7565, -8.6765],
[-10.5413, -5.8700, -8.0589],
[-10.6824, -6.3477, -8.3927],
]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=2e-4, atol=2e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slice_boxes = torch.tensor(
[
[0.7733, 0.6576, 0.4496],
[0.5171, 0.1184, 0.9095],
[0.8846, 0.5647, 0.2486],
]
).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=2e-4, atol=2e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
outputs, threshold=0.3, target_sizes=[image.size[::-1]]
)[0]
expected_scores = torch.tensor([0.8330, 0.8315, 0.8039, 0.6829, 0.5354]).to(torch_device)
expected_labels = [75, 17, 17, 75, 63]
expected_slice_boxes = torch.tensor([38.3089, 72.1023, 177.6292, 118.4514]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-4, atol=2e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes)
|
ConditionalDetrModelIntegrationTests
|
python
|
getsentry__sentry
|
src/sentry/seer/sentry_data_models.py
|
{
"start": 2466,
"end": 2579
}
|
class ____(BaseModel):
transaction_name: str
project_id: int
issues: list[IssueDetails]
|
TransactionIssues
|
python
|
spack__spack
|
lib/spack/spack/cmd/common/arguments.py
|
{
"start": 5492,
"end": 5869
}
|
class ____(argparse.Action):
"""Creates a flag of valid dependency types from a deptype argument."""
def __call__(self, parser, namespace, values, option_string=None):
if not values or values == "all":
deptype = dt.ALL
else:
deptype = dt.canonicalize(values.split(","))
setattr(namespace, self.dest, deptype)
|
DeptypeAction
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 101. 分割等和子串/Solution2.py
|
{
"start": 0,
"end": 434
}
|
class ____:
def canPartition(self, nums: List[int]) -> bool:
s = sum(nums)
if s % 2 != 0:
return False
m, n = len(nums), (s >> 1) + 1
dp = [False] * n
dp[0] = True
if nums[0] < n:
dp[nums[0]] = True
for i in range(1, m):
for j in range(n - 1, nums[i] - 1, -1):
dp[j] = dp[j] or dp[j - nums[i]]
return dp[-1]
|
Solution
|
python
|
doocs__leetcode
|
solution/3300-3399/3387.Maximize Amount After Two Days of Conversions/Solution.py
|
{
"start": 0,
"end": 853
}
|
class ____:
def maxAmount(
self,
initialCurrency: str,
pairs1: List[List[str]],
rates1: List[float],
pairs2: List[List[str]],
rates2: List[float],
) -> float:
d1 = self.build(pairs1, rates1, initialCurrency)
d2 = self.build(pairs2, rates2, initialCurrency)
return max(d1.get(a, 0) / r2 for a, r2 in d2.items())
def build(
self, pairs: List[List[str]], rates: List[float], init: str
) -> Dict[str, float]:
def dfs(a: str, v: float):
d[a] = v
for b, r in g[a]:
if b not in d:
dfs(b, v * r)
g = defaultdict(list)
for (a, b), r in zip(pairs, rates):
g[a].append((b, r))
g[b].append((a, 1 / r))
d = {}
dfs(init, 1)
return d
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/extension/base/constructors.py
|
{
"start": 189,
"end": 5623
}
|
class ____:
def test_from_sequence_from_cls(self, data):
result = type(data)._from_sequence(data, dtype=data.dtype)
tm.assert_extension_array_equal(result, data)
data = data[:0]
result = type(data)._from_sequence(data, dtype=data.dtype)
tm.assert_extension_array_equal(result, data)
def test_array_from_scalars(self, data):
scalars = [data[0], data[1], data[2]]
result = data._from_sequence(scalars, dtype=data.dtype)
assert isinstance(result, type(data))
def test_series_constructor(self, data):
result = pd.Series(data, copy=False)
assert result.dtype == data.dtype
assert len(result) == len(data)
if hasattr(result._mgr, "blocks"):
assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert result._mgr.array is data
# Series[EA] is unboxed / boxed correctly
result2 = pd.Series(result)
assert result2.dtype == data.dtype
if hasattr(result._mgr, "blocks"):
assert isinstance(result2._mgr.blocks[0], EABackedBlock)
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_series_constructor_scalar_with_index(self, data, dtype):
scalar = data[0]
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
result = pd.Series(scalar, index=["foo"], dtype=dtype)
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("from_series", [True, False])
def test_dataframe_constructor_from_dict(self, data, from_series):
if from_series:
data = pd.Series(data)
result = pd.DataFrame({"A": data})
assert result.dtypes["A"] == data.dtype
assert result.shape == (len(data), 1)
if hasattr(result._mgr, "blocks"):
assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert isinstance(result._mgr.blocks[0].values, ExtensionArray)
def test_dataframe_from_series(self, data):
result = pd.DataFrame(pd.Series(data))
assert result.dtypes[0] == data.dtype
assert result.shape == (len(data), 1)
if hasattr(result._mgr, "blocks"):
assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert isinstance(result._mgr.blocks[0].values, ExtensionArray)
def test_series_given_mismatched_index_raises(self, data):
msg = r"Length of values \(3\) does not match length of index \(5\)"
with pytest.raises(ValueError, match=msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
def test_from_dtype(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
expected = pd.Series(data)
result = pd.Series(list(data), dtype=dtype)
tm.assert_series_equal(result, expected)
result = pd.Series(list(data), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# gh-30280
expected = pd.DataFrame(data).astype(dtype)
result = pd.DataFrame(list(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
result = pd.DataFrame(list(data), dtype=str(dtype))
tm.assert_frame_equal(result, expected)
def test_pandas_array(self, data):
# pd.array(extension_array) should be idempotent...
result = pd.array(data)
tm.assert_extension_array_equal(result, data)
def test_pandas_array_dtype(self, data):
# ... but specifying dtype will override idempotency
result = pd.array(data, dtype=np.dtype(object))
expected = pd.arrays.NumpyExtensionArray(np.asarray(data, dtype=object))
tm.assert_equal(result, expected)
def test_construct_empty_dataframe(self, dtype):
# GH 33623
result = pd.DataFrame(columns=["a"], dtype=dtype)
expected = pd.DataFrame(
{"a": pd.array([], dtype=dtype)}, index=pd.RangeIndex(0)
)
tm.assert_frame_equal(result, expected)
def test_empty(self, dtype):
cls = dtype.construct_array_type()
result = cls._empty((4,), dtype=dtype)
assert isinstance(result, cls)
assert result.dtype == dtype
assert result.shape == (4,)
# GH#19600 method on ExtensionDtype
result2 = dtype.empty((4,))
assert isinstance(result2, cls)
assert result2.dtype == dtype
assert result2.shape == (4,)
result2 = dtype.empty(4)
assert isinstance(result2, cls)
assert result2.dtype == dtype
assert result2.shape == (4,)
|
BaseConstructorsTests
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatter3d/_hoverlabel.py
|
{
"start": 233,
"end": 11255
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatter3d.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Hoverlabel
|
python
|
redis__redis-py
|
tests/test_connection_pool.py
|
{
"start": 9190,
"end": 15172
}
|
class ____:
def test_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my.host")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "my.host"}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "my / host +=+"}
def test_port(self):
pool = redis.ConnectionPool.from_url("redis://localhost:6380")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "port": 6380}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url("redis://myuser:@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "username": "myuser"}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
"redis://%2Fmyuser%2F%2B name%3D%24+:@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "/myuser/+ name=$+",
}
def test_password(self):
pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "password": "mypassword"}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
"redis://:%2Fmypass%2F%2B word%3D%24+@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"password": "/mypass/+ word=$+",
}
@skip_if_server_version_lt("6.0.0")
def test_username_and_password(self):
pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "myuser",
"password": "mypass",
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url("redis://localhost", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "db": 1}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "db": 2}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "db": 3}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=20&socket_connect_timeout=10"
"&socket_keepalive=&retry_on_timeout=Yes&max_connections=10"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 2,
"socket_timeout": 20.0,
"socket_connect_timeout": 10.0,
"retry_on_timeout": True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ""),
(False, 0),
(False, "0"),
(False, "f"),
(False, "F"),
(False, "False"),
(False, "n"),
(False, "N"),
(False, "No"),
(True, 1),
(True, "1"),
(True, "y"),
(True, "Y"),
(True, "Yes"),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
assert pool.connection_kwargs["client_name"] == "test-client"
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=_&socket_connect_timeout=abc"
)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url("redis://localhost")
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url("redis://myhost")
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {"host": "myhost"}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url("localhost")
assert str(cm.value) == (
"Redis URL must specify one of the following schemes "
"(redis://, rediss://, unix://)"
)
def test_invalid_scheme_raises_error_when_double_slash_missing(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url("redis:foo.bar.com:12345")
assert str(cm.value) == (
"Redis URL must specify one of the following schemes "
"(redis://, rediss://, unix://)"
)
|
TestConnectionPoolURLParsing
|
python
|
google__jax
|
tests/state_test.py
|
{
"start": 42045,
"end": 43499
}
|
class ____(NamedTuple):
vmap_index_param: VmappableIndexParam
bat_ref: np.ndarray
bat_val: np.ndarray
bat_idxs: tuple[np.ndarray, ...]
@hps.composite
def set_vmap_params(draw):
vmap_index_param: VmappableIndexParam = draw(vmappable_index_params(
op_type="swap"))
bat_ref = draw(hnp.arrays(np.float32, vmap_index_param.bat_ref_shape))
bat_idx_shapes_ = iter(vmap_index_param.bat_non_slice_idx_shapes)
bat_idxs = tuple(
draw(index_arrays(size, next(bat_idx_shapes_)))
for size, indexed in zip(
vmap_index_param.index_param.ref_shape,
vmap_index_param.index_param.indexed_dims)
if indexed)
assert next(bat_idx_shapes_, None) is None
bat_val = draw(hnp.arrays(np.float32, vmap_index_param.bat_slice_shape))
return SetVmapParams(vmap_index_param, bat_ref, bat_val, bat_idxs)
Indexer = tuple[Union[int, slice, np.ndarray]]
def _unpack_idx(idx: Indexer
) -> tuple[Sequence[int | np.ndarray], Sequence[bool]]:
indexed_dims = [type(i) != slice for i in idx]
non_slice_idx = [i for i, b in zip(idx, indexed_dims) if b]
return non_slice_idx, indexed_dims
def _pack_idx(non_slice_idx: Sequence[int | np.ndarray],
indexed_dims: Sequence[bool]) -> Indexer:
idx_ = iter(non_slice_idx)
idx = tuple(next(idx_) if b else slice(None) for b in indexed_dims)
assert next(idx_, None) is None
return idx
@jtu.thread_unsafe_test_class(condition=not jtu.hypothesis_is_thread_safe())
|
SetVmapParams
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vercel/test_integration.py
|
{
"start": 1160,
"end": 20892
}
|
class ____(IntegrationTestCase):
provider = VercelIntegrationProvider
# Vercel Variables
project_id = "Qme9NXBpguaRxcXssZ1NWHVaM98MAL6PHDXUs1jPrgiM8H"
team_id = "my_team_id"
config_id = "my_config_id"
def assert_setup_flow(self, is_team=False, multi_config_org=None, no_name=False):
responses.reset()
access_json = {
"user_id": "my_user_id",
"access_token": "my_access_token",
"installation_id": self.config_id,
}
if is_team:
team_query = f"teamId={self.team_id}"
access_json["team_id"] = self.team_id
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_TEAM_URL % self.team_id}?{team_query}",
json={"name": "My Team Name", "slug": "my_team_slug"},
)
else:
team_query = ""
name = None if no_name else "My Name"
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_USER_URL}",
json={"user": {"name": name, "username": "my_user_name"}},
)
responses.add(
responses.POST, VercelIdentityProvider.oauth_access_token_url, json=access_json
)
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_PROJECTS_URL}?limit={VercelClient.pagination_limit}&{team_query}",
json={"projects": [], "pagination": {"count": 0, "next": None}},
)
params = {
"configurationId": "config_id",
"code": "oauth-code",
"next": "https://example.com",
}
self.pipeline.bind_state("user_id", self.user.id)
# TODO: Should use the setup path since we /configure instead
resp = self.client.get(self.setup_path, params)
mock_request = responses.calls[0].request
req_params = parse_qs(mock_request.body)
assert req_params["grant_type"] == ["authorization_code"]
assert req_params["code"] == ["oauth-code"]
assert req_params["redirect_uri"] == ["http://testserver/extensions/vercel/configure/"]
assert req_params["client_id"] == ["vercel-client-id"]
assert req_params["client_secret"] == ["vercel-client-secret"]
assert resp.status_code == 200
self.assertDialogSuccess(resp)
integration = Integration.objects.get(provider=self.provider.key)
external_id = self.team_id if is_team else "my_user_id"
name = "My Team Name" if is_team else "my_user_name" if no_name else "My Name"
installation_type = "team" if is_team else "user"
assert integration.external_id == external_id
assert integration.name == name
assert integration.metadata == {
"access_token": "my_access_token",
"installation_id": self.config_id,
"installation_type": installation_type,
}
assert OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
assert SentryAppInstallationForProvider.objects.get(
organization_id=self.organization.id, provider="vercel"
)
@responses.activate
def test_team_flow(self) -> None:
self.assert_setup_flow(is_team=True)
@responses.activate
def test_user_flow(self) -> None:
self.assert_setup_flow(is_team=False)
@responses.activate
def test_no_name(self) -> None:
self.assert_setup_flow(no_name=True)
@responses.activate
def test_use_existing_installation(self) -> None:
sentry_app = self.create_internal_integration(
webhook_url=None,
name="Vercel Internal Integration",
organization=self.organization,
)
sentry_app_installation = SentryAppInstallation.objects.get(sentry_app=sentry_app)
SentryAppInstallationForProvider.objects.create(
organization_id=self.organization.id,
provider="vercel",
sentry_app_installation=sentry_app_installation,
)
self.assert_setup_flow(is_team=False)
assert SentryAppInstallation.objects.count() == 1
@responses.activate
def test_update_organization_config(self) -> None:
"""Test that Vercel environment variables are created"""
with self.tasks():
self.assert_setup_flow()
org = self.organization
project_id = self.project.id
with assume_test_silo_mode(SiloMode.REGION):
project_key = ProjectKey.get_default(project=Project.objects.get(id=project_id))
enabled_dsn = project_key.get_dsn(public=True)
integration_endpoint = project_key.integration_endpoint
public_key = project_key.public_key
sentry_auth_token = SentryAppInstallationToken.objects.get_token(org.id, "vercel")
env_var_map = {
"SENTRY_ORG": {
"type": "encrypted",
"value": org.slug,
"target": ["production", "preview"],
},
"SENTRY_PROJECT": {
"type": "encrypted",
"value": self.project.slug,
"target": ["production", "preview"],
},
"SENTRY_DSN": {
"type": "encrypted",
"value": enabled_dsn,
"target": [
"production",
"preview",
"development",
],
},
"SENTRY_AUTH_TOKEN": {
"type": "encrypted",
"value": sentry_auth_token,
"target": ["production", "preview"],
},
"VERCEL_GIT_COMMIT_SHA": {
"type": "system",
"value": "VERCEL_GIT_COMMIT_SHA",
"target": ["production", "preview"],
},
"SENTRY_VERCEL_LOG_DRAIN_URL": {
"type": "encrypted",
"value": f"{integration_endpoint}vercel/logs/",
"target": ["production", "preview"],
},
"SENTRY_OTLP_TRACES_URL": {
"type": "encrypted",
"value": f"{integration_endpoint}otlp/v1/traces",
"target": ["production", "preview"],
},
"SENTRY_PUBLIC_KEY": {
"type": "encrypted",
"value": public_key,
"target": ["production", "preview"],
},
}
# mock get_project API call
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_PROJECT_URL % self.project_id}",
json={"link": {"type": "github"}, "framework": "nextjs"},
)
# mock create the env vars
for env_var, details in env_var_map.items():
responses.add(
responses.POST,
f"{VercelClient.base_url}{VercelClient.CREATE_ENV_VAR_URL % self.project_id}",
json={
"key": env_var,
"value": details["value"],
"target": details["target"],
"type": details["type"],
},
)
integration = Integration.objects.get(provider=self.provider.key)
installation = integration.get_installation(org.id)
org_integration = OrganizationIntegration.objects.get(
organization_id=org.id, integration_id=integration.id
)
assert org_integration.config == {}
data = {"project_mappings": [[project_id, self.project_id]]}
installation.update_organization_config(data)
org_integration = OrganizationIntegration.objects.get(
organization_id=org.id, integration_id=integration.id
)
assert org_integration.config == {"project_mappings": [[project_id, self.project_id]]}
# assert the env vars were created correctly
req_params = orjson.loads(responses.calls[5].request.body)
assert req_params["key"] == "SENTRY_ORG"
assert req_params["value"] == org.slug
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[6].request.body)
assert req_params["key"] == "SENTRY_PROJECT"
assert req_params["value"] == self.project.slug
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[7].request.body)
assert req_params["key"] == "NEXT_PUBLIC_SENTRY_DSN"
assert req_params["value"] == enabled_dsn
assert req_params["target"] == ["production", "preview", "development"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[8].request.body)
assert req_params["key"] == "SENTRY_AUTH_TOKEN"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[9].request.body)
assert req_params["key"] == "VERCEL_GIT_COMMIT_SHA"
assert req_params["value"] == "VERCEL_GIT_COMMIT_SHA"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "system"
req_params = orjson.loads(responses.calls[10].request.body)
assert req_params["key"] == "SENTRY_VERCEL_LOG_DRAIN_URL"
assert req_params["value"] == f"{integration_endpoint}vercel/logs/"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[11].request.body)
assert req_params["key"] == "SENTRY_OTLP_TRACES_URL"
assert req_params["value"] == f"{integration_endpoint}otlp/v1/traces"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[12].request.body)
assert req_params["key"] == "SENTRY_PUBLIC_KEY"
assert req_params["value"] == public_key
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
@responses.activate
def test_update_org_config_vars_exist(self) -> None:
"""Test the case wherein the secret and env vars already exist"""
with self.tasks():
self.assert_setup_flow()
org = self.organization
project_id = self.project.id
with assume_test_silo_mode(SiloMode.REGION):
project_key = ProjectKey.get_default(project=Project.objects.get(id=project_id))
enabled_dsn = project_key.get_dsn(public=True)
integration_endpoint = project_key.integration_endpoint
public_key = project_key.public_key
sentry_auth_token = SentryAppInstallationToken.objects.get_token(org.id, "vercel")
env_var_map = {
"SENTRY_ORG": {
"type": "encrypted",
"value": org.slug,
"target": ["production", "preview"],
},
"SENTRY_PROJECT": {
"type": "encrypted",
"value": self.project.slug,
"target": ["production", "preview"],
},
"SENTRY_DSN": {
"type": "encrypted",
"value": enabled_dsn,
"target": [
"production",
"preview",
"development",
],
},
"SENTRY_AUTH_TOKEN": {
"type": "encrypted",
"value": sentry_auth_token,
"target": ["production", "preview"],
},
"VERCEL_GIT_COMMIT_SHA": {
"type": "system",
"value": "VERCEL_GIT_COMMIT_SHA",
"target": ["production", "preview"],
},
"SENTRY_VERCEL_LOG_DRAIN_URL": {
"type": "encrypted",
"value": f"{integration_endpoint}vercel/logs/",
"target": ["production", "preview"],
},
"SENTRY_OTLP_TRACES_URL": {
"type": "encrypted",
"value": f"{integration_endpoint}otlp/v1/traces",
"target": ["production", "preview"],
},
"SENTRY_PUBLIC_KEY": {
"type": "encrypted",
"value": public_key,
"target": ["production", "preview"],
},
}
# mock get_project API call
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_PROJECT_URL % self.project_id}",
json={"link": {"type": "github"}, "framework": "gatsby"},
)
# mock update env vars
count = 0
for env_var, details in env_var_map.items():
# mock try to create env var
responses.add(
responses.POST,
f"{VercelClient.base_url}{VercelClient.CREATE_ENV_VAR_URL % self.project_id}",
json={"error": {"code": "ENV_ALREADY_EXISTS"}},
status=400,
)
# mock get env var
responses.add(
responses.GET,
f"{VercelClient.base_url}{VercelClient.GET_ENV_VAR_URL % self.project_id}",
json={"envs": [{"id": count, "key": env_var}]},
)
# mock update env var
responses.add(
responses.PATCH,
f"{VercelClient.base_url}{VercelClient.UPDATE_ENV_VAR_URL % (self.project_id, count)}",
json={
"key": env_var,
"value": details["value"],
"target": details["target"],
"type": details["type"],
},
)
count += 1
data = {"project_mappings": [[project_id, self.project_id]]}
integration = Integration.objects.get(provider=self.provider.key)
installation = integration.get_installation(org.id)
org_integration = OrganizationIntegration.objects.get(
organization_id=org.id, integration_id=integration.id
)
assert org_integration.config == {}
installation.update_organization_config(data)
org_integration = OrganizationIntegration.objects.get(
organization_id=org.id, integration_id=integration.id
)
assert org_integration.config == {"project_mappings": [[project_id, self.project_id]]}
req_params = orjson.loads(responses.calls[5].request.body)
assert req_params["key"] == "SENTRY_ORG"
assert req_params["value"] == org.slug
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[8].request.body)
assert req_params["key"] == "SENTRY_PROJECT"
assert req_params["value"] == self.project.slug
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[11].request.body)
assert req_params["key"] == "SENTRY_DSN"
assert req_params["value"] == enabled_dsn
assert req_params["target"] == ["production", "preview", "development"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[14].request.body)
assert req_params["key"] == "SENTRY_AUTH_TOKEN"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[17].request.body)
assert req_params["key"] == "VERCEL_GIT_COMMIT_SHA"
assert req_params["value"] == "VERCEL_GIT_COMMIT_SHA"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "system"
req_params = orjson.loads(responses.calls[20].request.body)
assert req_params["key"] == "SENTRY_VERCEL_LOG_DRAIN_URL"
assert req_params["value"] == f"{integration_endpoint}vercel/logs/"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[23].request.body)
assert req_params["key"] == "SENTRY_OTLP_TRACES_URL"
assert req_params["value"] == f"{integration_endpoint}otlp/v1/traces"
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
req_params = orjson.loads(responses.calls[26].request.body)
assert req_params["key"] == "SENTRY_PUBLIC_KEY"
assert req_params["value"] == public_key
assert req_params["target"] == ["production", "preview"]
assert req_params["type"] == "encrypted"
@responses.activate
def test_upgrade_org_config_no_dsn(self) -> None:
"""Test that the function doesn't progress if there is no active DSN"""
with self.tasks():
self.assert_setup_flow()
project_id = self.project.id
org = self.organization
data = {"project_mappings": [[project_id, self.project_id]]}
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
installation = integration.get_installation(org.id)
with assume_test_silo_mode(SiloMode.REGION):
dsn = ProjectKey.get_default(project=Project.objects.get(id=project_id))
dsn.update(id=dsn.id, status=ProjectKeyStatus.INACTIVE)
with pytest.raises(ValidationError):
installation.update_organization_config(data)
@responses.activate
def test_get_dynamic_display_information(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = integration.get_installation(self.organization.id)
dynamic_display_info = installation.get_dynamic_display_information()
assert dynamic_display_info is not None
instructions = dynamic_display_info["configure_integration"]["instructions"]
assert len(instructions) == 1
assert "configure your repositories." in instructions[0]
@responses.activate
def test_uninstall(self) -> None:
with self.tasks():
self.assert_setup_flow()
responses.add(
responses.DELETE,
f"{VercelClient.base_url}{VercelClient.UNINSTALL % self.config_id}",
json={},
)
integration = Integration.objects.get(provider=self.provider.key)
path = f"/api/0/organizations/{self.organization.slug}/integrations/{integration.id}/"
response = self.client.delete(path, format="json")
assert response.status_code == 204
# deleting the integration only happens when we get the Vercel webhook
integration = Integration.objects.get(provider=self.provider.key)
org_integration = OrganizationIntegration.objects.get(
integration_id=integration.id, organization_id=self.organization.id
)
assert org_integration.status == ObjectStatus.PENDING_DELETION
assert ScheduledDeletion.objects.filter(
model_name="OrganizationIntegration", object_id=org_integration.id
).exists()
|
VercelIntegrationTest
|
python
|
chroma-core__chroma
|
chromadb/quota/simple_quota_enforcer/__init__.py
|
{
"start": 396,
"end": 1495
}
|
class ____(QuotaEnforcer):
"""
A naive implementation of a quota enforcer that allows all requests.
"""
def __init__(self, system: System) -> None:
super().__init__(system)
@override
def set_context(self, context: Dict[str, Any]) -> None:
pass
@override
def enforce(
self,
action: Action,
tenant: str,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
embeddings: Optional[Embeddings] = None,
uris: Optional[URIs] = None,
ids: Optional[IDs] = None,
name: Optional[str] = None,
new_name: Optional[str] = None,
metadata: Optional[CollectionMetadata] = None,
new_metadata: Optional[CollectionMetadata] = None,
limit: Optional[int] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
n_results: Optional[int] = None,
query_embeddings: Optional[Embeddings] = None,
collection_id: Optional[UUID] = None,
) -> None:
pass
|
SimpleQuotaEnforcer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/patching-array.py
|
{
"start": 1333,
"end": 1748
}
|
class ____(object):
def minPatches(self, nums, n):
"""
:type nums: List[int]
:type n: int
:rtype: int
"""
patch, miss, i = 0, 1, 0
while miss <= n:
if i < len(nums) and nums[i] <= miss:
miss += nums[i]
i += 1
else:
miss += miss
patch += 1
return patch
|
Solution3
|
python
|
rushter__MLAlgorithms
|
mla/knn.py
|
{
"start": 151,
"end": 1682
}
|
class ____(BaseEstimator):
def __init__(self, k=5, distance_func=euclidean):
"""Base class for Nearest neighbors classifier and regressor.
Parameters
----------
k : int, default 5
The number of neighbors to take into account. If 0, all the
training examples are used.
distance_func : function, default euclidean distance
A distance function taking two arguments. Any function from
scipy.spatial.distance will do.
"""
self.k = None if k == 0 else k # l[:None] returns the whole list
self.distance_func = distance_func
def aggregate(self, neighbors_targets):
raise NotImplementedError()
def _predict(self, X=None):
predictions = [self._predict_x(x) for x in X]
return np.array(predictions)
def _predict_x(self, x):
"""Predict the label of a single instance x."""
# compute distances between x and all examples in the training set.
distances = (self.distance_func(x, example) for example in self.X)
# Sort all examples by their distance to x and keep their target value.
neighbors = sorted(
((dist, target) for (dist, target) in zip(distances, self.y)),
key=lambda x: x[0],
)
# Get targets of the k-nn and aggregate them (most common one or
# average).
neighbors_targets = [target for (_, target) in neighbors[: self.k]]
return self.aggregate(neighbors_targets)
|
KNNBase
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 176225,
"end": 179505
}
|
class ____(rv_continuous):
r"""Jones and Faddy skew-t distribution.
%(before_notes)s
Notes
-----
The probability density function for `jf_skew_t` is:
.. math::
f(x; a, b) = C_{a,b}^{-1}
\left(1+\frac{x}{\left(a+b+x^2\right)^{1/2}}\right)^{a+1/2}
\left(1-\frac{x}{\left(a+b+x^2\right)^{1/2}}\right)^{b+1/2}
for real numbers :math:`a>0` and :math:`b>0`, where
:math:`C_{a,b} = 2^{a+b-1}B(a,b)(a+b)^{1/2}`, and :math:`B` denotes the
beta function (`scipy.special.beta`).
When :math:`a<b`, the distribution is negatively skewed, and when
:math:`a>b`, the distribution is positively skewed. If :math:`a=b`, then
we recover the `t` distribution with :math:`2a` degrees of freedom.
`jf_skew_t` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
References
----------
.. [1] M.C. Jones and M.J. Faddy. "A skew extension of the t distribution,
with applications" *Journal of the Royal Statistical Society*.
Series B (Statistical Methodology) 65, no. 1 (2003): 159-174.
:doi:`10.1111/1467-9868.00378`
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _pdf(self, x, a, b):
c = 2 ** (a + b - 1) * sc.beta(a, b) * np.sqrt(a + b)
d1 = (1 + x / np.sqrt(a + b + x ** 2)) ** (a + 0.5)
d2 = (1 - x / np.sqrt(a + b + x ** 2)) ** (b + 0.5)
return d1 * d2 / c
def _rvs(self, a, b, size=None, random_state=None):
d1 = random_state.beta(a, b, size)
d2 = (2 * d1 - 1) * np.sqrt(a + b)
d3 = 2 * np.sqrt(d1 * (1 - d1))
return d2 / d3
def _cdf(self, x, a, b):
y = (1 + x / np.sqrt(a + b + x ** 2)) * 0.5
return sc.betainc(a, b, y)
def _sf(self, x, a, b):
y = (1 + x / np.sqrt(a + b + x ** 2)) * 0.5
return sc.betaincc(a, b, y)
def _ppf(self, q, a, b):
d1 = beta.ppf(q, a, b)
d2 = (2 * d1 - 1) * np.sqrt(a + b)
d3 = 2 * np.sqrt(d1 * (1 - d1))
return d2 / d3
def _munp(self, n, a, b):
"""Returns the n-th moment(s) where all the following hold:
- n >= 0
- a > n / 2
- b > n / 2
The result is np.nan in all other cases.
"""
def nth_moment(n_k, a_k, b_k):
"""Computes E[T^(n_k)] where T is skew-t distributed with
parameters a_k and b_k.
"""
num = (a_k + b_k) ** (0.5 * n_k)
denom = 2 ** n_k * sc.beta(a_k, b_k)
indices = np.arange(n_k + 1)
sgn = np.where(indices % 2 > 0, -1, 1)
d = sc.beta(a_k + 0.5 * n_k - indices, b_k - 0.5 * n_k + indices)
sum_terms = sc.comb(n_k, indices) * sgn * d
return num / denom * sum_terms.sum()
nth_moment_valid = (a > 0.5 * n) & (b > 0.5 * n) & (n >= 0)
return xpx.apply_where(
nth_moment_valid,
(n, a, b),
np.vectorize(nth_moment, otypes=[np.float64]),
fill_value=np.nan,
)
jf_skew_t = jf_skew_t_gen(name='jf_skew_t')
|
jf_skew_t_gen
|
python
|
getsentry__sentry
|
src/sentry/integrations/jira_server/integration.py
|
{
"start": 4667,
"end": 4752
}
|
class ____(TypedDict):
choices: list[tuple[str, str]]
placeholder: str
|
_Choices
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 17335,
"end": 17638
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "HandledOutputEvent"
output_name = graphene.NonNull(graphene.String)
manager_key = graphene.NonNull(graphene.String)
|
GrapheneHandledOutputEvent
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_unitofwork.py
|
{
"start": 102709,
"end": 106660
}
|
class ____(fixtures.MappedTest):
class SomeEnum:
# Implements PEP 435 in the minimal fashion needed by SQLAlchemy
__members__ = OrderedDict()
def __init__(self, name, value, alias=None):
self.name = name
self.value = value
self.__members__[name] = self
setattr(self.__class__, name, self)
if alias:
self.__members__[alias] = self
setattr(self.__class__, alias, self)
class MySortableEnum(SomeEnum):
__members__ = OrderedDict()
def __lt__(self, other):
return self.value < other.value
class MyNotSortableEnum(SomeEnum):
__members__ = OrderedDict()
one = MySortableEnum("one", 1)
two = MySortableEnum("two", 2)
three = MyNotSortableEnum("three", 3)
four = MyNotSortableEnum("four", 4)
five = MyNotSortableEnum("five", 5)
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id",
Enum(cls.MySortableEnum, create_constraint=False),
primary_key=True,
),
Column("data", String(10)),
)
Table(
"t2",
metadata,
Column(
"id",
Enum(
cls.MyNotSortableEnum,
sort_key_function=None,
create_constraint=False,
),
primary_key=True,
),
Column("data", String(10)),
)
Table(
"t3",
metadata,
Column(
"id",
Enum(cls.MyNotSortableEnum, create_constraint=False),
primary_key=True,
),
Column("value", Integer),
)
@staticmethod
def sort_enum_key_value(value):
return value.value
@classmethod
def setup_classes(cls):
class T1(cls.Basic):
pass
class T2(cls.Basic):
pass
class T3(cls.Basic):
def __str__(self):
return f"T3(id={self.id})"
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(cls.classes.T1, cls.tables.t1)
cls.mapper_registry.map_imperatively(cls.classes.T2, cls.tables.t2)
cls.mapper_registry.map_imperatively(cls.classes.T3, cls.tables.t3)
def test_exception_persistent_flush_py3k(self):
s = fixture_session()
a, b = self.classes.T2(id=self.three), self.classes.T2(id=self.four)
s.add_all([a, b])
s.commit()
a.data = "bar"
b.data = "foo"
message = (
r"Could not sort objects by primary key; primary key "
r"values must be sortable in Python \(was: '<' not "
r"supported between instances of 'MyNotSortableEnum'"
r" and 'MyNotSortableEnum'\)"
)
assert_raises_message(
sa.exc.InvalidRequestError,
message,
s.flush,
)
s.close()
def test_persistent_flush_sortable(self):
s = fixture_session()
a, b = self.classes.T1(id=self.one), self.classes.T1(id=self.two)
s.add_all([a, b])
s.commit()
a.data = "bar"
b.data = "foo"
s.commit()
def test_pep435_custom_sort_key(self):
s = fixture_session()
a = self.classes.T3(id=self.three, value=1)
b = self.classes.T3(id=self.four, value=2)
s.add_all([a, b])
s.commit()
c = self.classes.T3(id=self.five, value=0)
s.add(c)
states = [o._sa_instance_state for o in [b, a, c]]
eq_(
_sort_states(inspect(self.classes.T3), states),
# pending come first, then "four" < "three"
[o._sa_instance_state for o in [c, b, a]],
)
|
EnsurePKSortableTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.